1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256
31 
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37 			       u16 *allocated_size, bool is_alloc);
38 
39 static struct hnae3_ae_algo ae_algo;
40 
41 static const struct pci_device_id ae_algo_pci_tbl[] = {
42 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
49 	/* required last entry */
50 	{0, }
51 };
52 
53 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
54 
55 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
56 					 HCLGE_CMDQ_TX_ADDR_H_REG,
57 					 HCLGE_CMDQ_TX_DEPTH_REG,
58 					 HCLGE_CMDQ_TX_TAIL_REG,
59 					 HCLGE_CMDQ_TX_HEAD_REG,
60 					 HCLGE_CMDQ_RX_ADDR_L_REG,
61 					 HCLGE_CMDQ_RX_ADDR_H_REG,
62 					 HCLGE_CMDQ_RX_DEPTH_REG,
63 					 HCLGE_CMDQ_RX_TAIL_REG,
64 					 HCLGE_CMDQ_RX_HEAD_REG,
65 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
66 					 HCLGE_CMDQ_INTR_STS_REG,
67 					 HCLGE_CMDQ_INTR_EN_REG,
68 					 HCLGE_CMDQ_INTR_GEN_REG};
69 
70 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
71 					   HCLGE_VECTOR0_OTER_EN_REG,
72 					   HCLGE_MISC_RESET_STS_REG,
73 					   HCLGE_MISC_VECTOR_INT_STS,
74 					   HCLGE_GLOBAL_RESET_REG,
75 					   HCLGE_FUN_RST_ING,
76 					   HCLGE_GRO_EN_REG};
77 
78 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
79 					 HCLGE_RING_RX_ADDR_H_REG,
80 					 HCLGE_RING_RX_BD_NUM_REG,
81 					 HCLGE_RING_RX_BD_LENGTH_REG,
82 					 HCLGE_RING_RX_MERGE_EN_REG,
83 					 HCLGE_RING_RX_TAIL_REG,
84 					 HCLGE_RING_RX_HEAD_REG,
85 					 HCLGE_RING_RX_FBD_NUM_REG,
86 					 HCLGE_RING_RX_OFFSET_REG,
87 					 HCLGE_RING_RX_FBD_OFFSET_REG,
88 					 HCLGE_RING_RX_STASH_REG,
89 					 HCLGE_RING_RX_BD_ERR_REG,
90 					 HCLGE_RING_TX_ADDR_L_REG,
91 					 HCLGE_RING_TX_ADDR_H_REG,
92 					 HCLGE_RING_TX_BD_NUM_REG,
93 					 HCLGE_RING_TX_PRIORITY_REG,
94 					 HCLGE_RING_TX_TC_REG,
95 					 HCLGE_RING_TX_MERGE_EN_REG,
96 					 HCLGE_RING_TX_TAIL_REG,
97 					 HCLGE_RING_TX_HEAD_REG,
98 					 HCLGE_RING_TX_FBD_NUM_REG,
99 					 HCLGE_RING_TX_OFFSET_REG,
100 					 HCLGE_RING_TX_EBD_NUM_REG,
101 					 HCLGE_RING_TX_EBD_OFFSET_REG,
102 					 HCLGE_RING_TX_BD_ERR_REG,
103 					 HCLGE_RING_EN_REG};
104 
105 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
106 					     HCLGE_TQP_INTR_GL0_REG,
107 					     HCLGE_TQP_INTR_GL1_REG,
108 					     HCLGE_TQP_INTR_GL2_REG,
109 					     HCLGE_TQP_INTR_RL_REG};
110 
111 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
112 	"App    Loopback test",
113 	"Serdes serial Loopback test",
114 	"Serdes parallel Loopback test",
115 	"Phy    Loopback test"
116 };
117 
118 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
119 	{"mac_tx_mac_pause_num",
120 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
121 	{"mac_rx_mac_pause_num",
122 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
123 	{"mac_tx_control_pkt_num",
124 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
125 	{"mac_rx_control_pkt_num",
126 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
127 	{"mac_tx_pfc_pkt_num",
128 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
129 	{"mac_tx_pfc_pri0_pkt_num",
130 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
131 	{"mac_tx_pfc_pri1_pkt_num",
132 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
133 	{"mac_tx_pfc_pri2_pkt_num",
134 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
135 	{"mac_tx_pfc_pri3_pkt_num",
136 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
137 	{"mac_tx_pfc_pri4_pkt_num",
138 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
139 	{"mac_tx_pfc_pri5_pkt_num",
140 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
141 	{"mac_tx_pfc_pri6_pkt_num",
142 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
143 	{"mac_tx_pfc_pri7_pkt_num",
144 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
145 	{"mac_rx_pfc_pkt_num",
146 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
147 	{"mac_rx_pfc_pri0_pkt_num",
148 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
149 	{"mac_rx_pfc_pri1_pkt_num",
150 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
151 	{"mac_rx_pfc_pri2_pkt_num",
152 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
153 	{"mac_rx_pfc_pri3_pkt_num",
154 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
155 	{"mac_rx_pfc_pri4_pkt_num",
156 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
157 	{"mac_rx_pfc_pri5_pkt_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
159 	{"mac_rx_pfc_pri6_pkt_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
161 	{"mac_rx_pfc_pri7_pkt_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
163 	{"mac_tx_total_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
165 	{"mac_tx_total_oct_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
167 	{"mac_tx_good_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
169 	{"mac_tx_bad_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
171 	{"mac_tx_good_oct_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
173 	{"mac_tx_bad_oct_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
175 	{"mac_tx_uni_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
177 	{"mac_tx_multi_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
179 	{"mac_tx_broad_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
181 	{"mac_tx_undersize_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
183 	{"mac_tx_oversize_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
185 	{"mac_tx_64_oct_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
187 	{"mac_tx_65_127_oct_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
189 	{"mac_tx_128_255_oct_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
191 	{"mac_tx_256_511_oct_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
193 	{"mac_tx_512_1023_oct_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
195 	{"mac_tx_1024_1518_oct_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
197 	{"mac_tx_1519_2047_oct_pkt_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
199 	{"mac_tx_2048_4095_oct_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
201 	{"mac_tx_4096_8191_oct_pkt_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
203 	{"mac_tx_8192_9216_oct_pkt_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
205 	{"mac_tx_9217_12287_oct_pkt_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
207 	{"mac_tx_12288_16383_oct_pkt_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
209 	{"mac_tx_1519_max_good_pkt_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
211 	{"mac_tx_1519_max_bad_pkt_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
213 	{"mac_rx_total_pkt_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
215 	{"mac_rx_total_oct_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
217 	{"mac_rx_good_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
219 	{"mac_rx_bad_pkt_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
221 	{"mac_rx_good_oct_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
223 	{"mac_rx_bad_oct_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
225 	{"mac_rx_uni_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
227 	{"mac_rx_multi_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
229 	{"mac_rx_broad_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
231 	{"mac_rx_undersize_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
233 	{"mac_rx_oversize_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
235 	{"mac_rx_64_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
237 	{"mac_rx_65_127_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
239 	{"mac_rx_128_255_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
241 	{"mac_rx_256_511_oct_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
243 	{"mac_rx_512_1023_oct_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
245 	{"mac_rx_1024_1518_oct_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
247 	{"mac_rx_1519_2047_oct_pkt_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
249 	{"mac_rx_2048_4095_oct_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
251 	{"mac_rx_4096_8191_oct_pkt_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
253 	{"mac_rx_8192_9216_oct_pkt_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
255 	{"mac_rx_9217_12287_oct_pkt_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
257 	{"mac_rx_12288_16383_oct_pkt_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
259 	{"mac_rx_1519_max_good_pkt_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
261 	{"mac_rx_1519_max_bad_pkt_num",
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
263 
264 	{"mac_tx_fragment_pkt_num",
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
266 	{"mac_tx_undermin_pkt_num",
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
268 	{"mac_tx_jabber_pkt_num",
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
270 	{"mac_tx_err_all_pkt_num",
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
272 	{"mac_tx_from_app_good_pkt_num",
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
274 	{"mac_tx_from_app_bad_pkt_num",
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
276 	{"mac_rx_fragment_pkt_num",
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
278 	{"mac_rx_undermin_pkt_num",
279 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
280 	{"mac_rx_jabber_pkt_num",
281 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
282 	{"mac_rx_fcs_err_pkt_num",
283 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
284 	{"mac_rx_send_app_good_pkt_num",
285 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
286 	{"mac_rx_send_app_bad_pkt_num",
287 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
288 };
289 
290 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
291 	{
292 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
293 		.ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
294 		.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
295 		.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
296 		.i_port_bitmap = 0x1,
297 	},
298 };
299 
300 static const u8 hclge_hash_key[] = {
301 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
302 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
303 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
304 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
305 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
306 };
307 
308 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
309 {
310 #define HCLGE_MAC_CMD_NUM 21
311 
312 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
313 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
314 	__le64 *desc_data;
315 	int i, k, n;
316 	int ret;
317 
318 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
319 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
320 	if (ret) {
321 		dev_err(&hdev->pdev->dev,
322 			"Get MAC pkt stats fail, status = %d.\n", ret);
323 
324 		return ret;
325 	}
326 
327 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
328 		/* for special opcode 0032, only the first desc has the head */
329 		if (unlikely(i == 0)) {
330 			desc_data = (__le64 *)(&desc[i].data[0]);
331 			n = HCLGE_RD_FIRST_STATS_NUM;
332 		} else {
333 			desc_data = (__le64 *)(&desc[i]);
334 			n = HCLGE_RD_OTHER_STATS_NUM;
335 		}
336 
337 		for (k = 0; k < n; k++) {
338 			*data += le64_to_cpu(*desc_data);
339 			data++;
340 			desc_data++;
341 		}
342 	}
343 
344 	return 0;
345 }
346 
347 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
348 {
349 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
350 	struct hclge_desc *desc;
351 	__le64 *desc_data;
352 	u16 i, k, n;
353 	int ret;
354 
355 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
356 	if (!desc)
357 		return -ENOMEM;
358 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
359 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
360 	if (ret) {
361 		kfree(desc);
362 		return ret;
363 	}
364 
365 	for (i = 0; i < desc_num; i++) {
366 		/* for special opcode 0034, only the first desc has the head */
367 		if (i == 0) {
368 			desc_data = (__le64 *)(&desc[i].data[0]);
369 			n = HCLGE_RD_FIRST_STATS_NUM;
370 		} else {
371 			desc_data = (__le64 *)(&desc[i]);
372 			n = HCLGE_RD_OTHER_STATS_NUM;
373 		}
374 
375 		for (k = 0; k < n; k++) {
376 			*data += le64_to_cpu(*desc_data);
377 			data++;
378 			desc_data++;
379 		}
380 	}
381 
382 	kfree(desc);
383 
384 	return 0;
385 }
386 
387 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
388 {
389 	struct hclge_desc desc;
390 	__le32 *desc_data;
391 	u32 reg_num;
392 	int ret;
393 
394 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
395 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
396 	if (ret)
397 		return ret;
398 
399 	desc_data = (__le32 *)(&desc.data[0]);
400 	reg_num = le32_to_cpu(*desc_data);
401 
402 	*desc_num = 1 + ((reg_num - 3) >> 2) +
403 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
404 
405 	return 0;
406 }
407 
408 static int hclge_mac_update_stats(struct hclge_dev *hdev)
409 {
410 	u32 desc_num;
411 	int ret;
412 
413 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
414 
415 	/* The firmware supports the new statistics acquisition method */
416 	if (!ret)
417 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
418 	else if (ret == -EOPNOTSUPP)
419 		ret = hclge_mac_update_stats_defective(hdev);
420 	else
421 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
422 
423 	return ret;
424 }
425 
426 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
427 {
428 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
429 	struct hclge_vport *vport = hclge_get_vport(handle);
430 	struct hclge_dev *hdev = vport->back;
431 	struct hnae3_queue *queue;
432 	struct hclge_desc desc[1];
433 	struct hclge_tqp *tqp;
434 	int ret, i;
435 
436 	for (i = 0; i < kinfo->num_tqps; i++) {
437 		queue = handle->kinfo.tqp[i];
438 		tqp = container_of(queue, struct hclge_tqp, q);
439 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
440 		hclge_cmd_setup_basic_desc(&desc[0],
441 					   HCLGE_OPC_QUERY_RX_STATUS,
442 					   true);
443 
444 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
445 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
446 		if (ret) {
447 			dev_err(&hdev->pdev->dev,
448 				"Query tqp stat fail, status = %d,queue = %d\n",
449 				ret,	i);
450 			return ret;
451 		}
452 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
453 			le32_to_cpu(desc[0].data[1]);
454 	}
455 
456 	for (i = 0; i < kinfo->num_tqps; i++) {
457 		queue = handle->kinfo.tqp[i];
458 		tqp = container_of(queue, struct hclge_tqp, q);
459 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
460 		hclge_cmd_setup_basic_desc(&desc[0],
461 					   HCLGE_OPC_QUERY_TX_STATUS,
462 					   true);
463 
464 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
465 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
466 		if (ret) {
467 			dev_err(&hdev->pdev->dev,
468 				"Query tqp stat fail, status = %d,queue = %d\n",
469 				ret, i);
470 			return ret;
471 		}
472 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
473 			le32_to_cpu(desc[0].data[1]);
474 	}
475 
476 	return 0;
477 }
478 
479 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
480 {
481 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
482 	struct hclge_tqp *tqp;
483 	u64 *buff = data;
484 	int i;
485 
486 	for (i = 0; i < kinfo->num_tqps; i++) {
487 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
488 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
489 	}
490 
491 	for (i = 0; i < kinfo->num_tqps; i++) {
492 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
493 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
494 	}
495 
496 	return buff;
497 }
498 
499 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
500 {
501 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
502 
503 	return kinfo->num_tqps * (2);
504 }
505 
506 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
507 {
508 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
509 	u8 *buff = data;
510 	int i = 0;
511 
512 	for (i = 0; i < kinfo->num_tqps; i++) {
513 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
514 			struct hclge_tqp, q);
515 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
516 			 tqp->index);
517 		buff = buff + ETH_GSTRING_LEN;
518 	}
519 
520 	for (i = 0; i < kinfo->num_tqps; i++) {
521 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
522 			struct hclge_tqp, q);
523 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
524 			 tqp->index);
525 		buff = buff + ETH_GSTRING_LEN;
526 	}
527 
528 	return buff;
529 }
530 
531 static u64 *hclge_comm_get_stats(void *comm_stats,
532 				 const struct hclge_comm_stats_str strs[],
533 				 int size, u64 *data)
534 {
535 	u64 *buf = data;
536 	u32 i;
537 
538 	for (i = 0; i < size; i++)
539 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
540 
541 	return buf + size;
542 }
543 
544 static u8 *hclge_comm_get_strings(u32 stringset,
545 				  const struct hclge_comm_stats_str strs[],
546 				  int size, u8 *data)
547 {
548 	char *buff = (char *)data;
549 	u32 i;
550 
551 	if (stringset != ETH_SS_STATS)
552 		return buff;
553 
554 	for (i = 0; i < size; i++) {
555 		snprintf(buff, ETH_GSTRING_LEN,
556 			 strs[i].desc);
557 		buff = buff + ETH_GSTRING_LEN;
558 	}
559 
560 	return (u8 *)buff;
561 }
562 
563 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
564 {
565 	struct hnae3_handle *handle;
566 	int status;
567 
568 	handle = &hdev->vport[0].nic;
569 	if (handle->client) {
570 		status = hclge_tqps_update_stats(handle);
571 		if (status) {
572 			dev_err(&hdev->pdev->dev,
573 				"Update TQPS stats fail, status = %d.\n",
574 				status);
575 		}
576 	}
577 
578 	status = hclge_mac_update_stats(hdev);
579 	if (status)
580 		dev_err(&hdev->pdev->dev,
581 			"Update MAC stats fail, status = %d.\n", status);
582 }
583 
584 static void hclge_update_stats(struct hnae3_handle *handle,
585 			       struct net_device_stats *net_stats)
586 {
587 	struct hclge_vport *vport = hclge_get_vport(handle);
588 	struct hclge_dev *hdev = vport->back;
589 	int status;
590 
591 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
592 		return;
593 
594 	status = hclge_mac_update_stats(hdev);
595 	if (status)
596 		dev_err(&hdev->pdev->dev,
597 			"Update MAC stats fail, status = %d.\n",
598 			status);
599 
600 	status = hclge_tqps_update_stats(handle);
601 	if (status)
602 		dev_err(&hdev->pdev->dev,
603 			"Update TQPS stats fail, status = %d.\n",
604 			status);
605 
606 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
607 }
608 
609 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
610 {
611 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
612 		HNAE3_SUPPORT_PHY_LOOPBACK |\
613 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
614 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
615 
616 	struct hclge_vport *vport = hclge_get_vport(handle);
617 	struct hclge_dev *hdev = vport->back;
618 	int count = 0;
619 
620 	/* Loopback test support rules:
621 	 * mac: only GE mode support
622 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
623 	 * phy: only support when phy device exist on board
624 	 */
625 	if (stringset == ETH_SS_TEST) {
626 		/* clear loopback bit flags at first */
627 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
628 		if (hdev->pdev->revision >= 0x21 ||
629 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
630 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
631 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
632 			count += 1;
633 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
634 		}
635 
636 		count += 2;
637 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
638 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
639 	} else if (stringset == ETH_SS_STATS) {
640 		count = ARRAY_SIZE(g_mac_stats_string) +
641 			hclge_tqps_get_sset_count(handle, stringset);
642 	}
643 
644 	return count;
645 }
646 
647 static void hclge_get_strings(struct hnae3_handle *handle,
648 			      u32 stringset,
649 			      u8 *data)
650 {
651 	u8 *p = (char *)data;
652 	int size;
653 
654 	if (stringset == ETH_SS_STATS) {
655 		size = ARRAY_SIZE(g_mac_stats_string);
656 		p = hclge_comm_get_strings(stringset,
657 					   g_mac_stats_string,
658 					   size,
659 					   p);
660 		p = hclge_tqps_get_strings(handle, p);
661 	} else if (stringset == ETH_SS_TEST) {
662 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
663 			memcpy(p,
664 			       hns3_nic_test_strs[HNAE3_LOOP_APP],
665 			       ETH_GSTRING_LEN);
666 			p += ETH_GSTRING_LEN;
667 		}
668 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
669 			memcpy(p,
670 			       hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
671 			       ETH_GSTRING_LEN);
672 			p += ETH_GSTRING_LEN;
673 		}
674 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
675 			memcpy(p,
676 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
677 			       ETH_GSTRING_LEN);
678 			p += ETH_GSTRING_LEN;
679 		}
680 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
681 			memcpy(p,
682 			       hns3_nic_test_strs[HNAE3_LOOP_PHY],
683 			       ETH_GSTRING_LEN);
684 			p += ETH_GSTRING_LEN;
685 		}
686 	}
687 }
688 
689 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
690 {
691 	struct hclge_vport *vport = hclge_get_vport(handle);
692 	struct hclge_dev *hdev = vport->back;
693 	u64 *p;
694 
695 	p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
696 				 g_mac_stats_string,
697 				 ARRAY_SIZE(g_mac_stats_string),
698 				 data);
699 	p = hclge_tqps_get_stats(handle, p);
700 }
701 
702 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
703 				     u64 *rx_cnt)
704 {
705 	struct hclge_vport *vport = hclge_get_vport(handle);
706 	struct hclge_dev *hdev = vport->back;
707 
708 	*tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
709 	*rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
710 }
711 
712 static int hclge_parse_func_status(struct hclge_dev *hdev,
713 				   struct hclge_func_status_cmd *status)
714 {
715 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
716 		return -EINVAL;
717 
718 	/* Set the pf to main pf */
719 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
720 		hdev->flag |= HCLGE_FLAG_MAIN;
721 	else
722 		hdev->flag &= ~HCLGE_FLAG_MAIN;
723 
724 	return 0;
725 }
726 
727 static int hclge_query_function_status(struct hclge_dev *hdev)
728 {
729 	struct hclge_func_status_cmd *req;
730 	struct hclge_desc desc;
731 	int timeout = 0;
732 	int ret;
733 
734 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
735 	req = (struct hclge_func_status_cmd *)desc.data;
736 
737 	do {
738 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
739 		if (ret) {
740 			dev_err(&hdev->pdev->dev,
741 				"query function status failed %d.\n",
742 				ret);
743 
744 			return ret;
745 		}
746 
747 		/* Check pf reset is done */
748 		if (req->pf_state)
749 			break;
750 		usleep_range(1000, 2000);
751 	} while (timeout++ < 5);
752 
753 	ret = hclge_parse_func_status(hdev, req);
754 
755 	return ret;
756 }
757 
758 static int hclge_query_pf_resource(struct hclge_dev *hdev)
759 {
760 	struct hclge_pf_res_cmd *req;
761 	struct hclge_desc desc;
762 	int ret;
763 
764 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
765 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
766 	if (ret) {
767 		dev_err(&hdev->pdev->dev,
768 			"query pf resource failed %d.\n", ret);
769 		return ret;
770 	}
771 
772 	req = (struct hclge_pf_res_cmd *)desc.data;
773 	hdev->num_tqps = __le16_to_cpu(req->tqp_num);
774 	hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
775 
776 	if (req->tx_buf_size)
777 		hdev->tx_buf_size =
778 			__le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
779 	else
780 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
781 
782 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
783 
784 	if (req->dv_buf_size)
785 		hdev->dv_buf_size =
786 			__le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
787 	else
788 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
789 
790 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
791 
792 	if (hnae3_dev_roce_supported(hdev)) {
793 		hdev->roce_base_msix_offset =
794 		hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
795 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
796 		hdev->num_roce_msi =
797 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
798 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
799 
800 		/* PF should have NIC vectors and Roce vectors,
801 		 * NIC vectors are queued before Roce vectors.
802 		 */
803 		hdev->num_msi = hdev->num_roce_msi  +
804 				hdev->roce_base_msix_offset;
805 	} else {
806 		hdev->num_msi =
807 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
808 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
809 	}
810 
811 	return 0;
812 }
813 
814 static int hclge_parse_speed(int speed_cmd, int *speed)
815 {
816 	switch (speed_cmd) {
817 	case 6:
818 		*speed = HCLGE_MAC_SPEED_10M;
819 		break;
820 	case 7:
821 		*speed = HCLGE_MAC_SPEED_100M;
822 		break;
823 	case 0:
824 		*speed = HCLGE_MAC_SPEED_1G;
825 		break;
826 	case 1:
827 		*speed = HCLGE_MAC_SPEED_10G;
828 		break;
829 	case 2:
830 		*speed = HCLGE_MAC_SPEED_25G;
831 		break;
832 	case 3:
833 		*speed = HCLGE_MAC_SPEED_40G;
834 		break;
835 	case 4:
836 		*speed = HCLGE_MAC_SPEED_50G;
837 		break;
838 	case 5:
839 		*speed = HCLGE_MAC_SPEED_100G;
840 		break;
841 	default:
842 		return -EINVAL;
843 	}
844 
845 	return 0;
846 }
847 
848 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
849 					u8 speed_ability)
850 {
851 	unsigned long *supported = hdev->hw.mac.supported;
852 
853 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
854 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
855 				 supported);
856 
857 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
858 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
859 				 supported);
860 
861 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
862 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
863 				 supported);
864 
865 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
866 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
867 				 supported);
868 
869 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
870 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
871 				 supported);
872 
873 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
874 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
875 }
876 
877 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
878 					 u8 speed_ability)
879 {
880 	unsigned long *supported = hdev->hw.mac.supported;
881 
882 	/* default to support all speed for GE port */
883 	if (!speed_ability)
884 		speed_ability = HCLGE_SUPPORT_GE;
885 
886 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
887 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
888 				 supported);
889 
890 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
891 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
892 				 supported);
893 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
894 				 supported);
895 	}
896 
897 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
898 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
899 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
900 	}
901 
902 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
903 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
904 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
905 }
906 
907 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
908 {
909 	u8 media_type = hdev->hw.mac.media_type;
910 
911 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
912 		hclge_parse_fiber_link_mode(hdev, speed_ability);
913 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
914 		hclge_parse_copper_link_mode(hdev, speed_ability);
915 }
916 
917 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
918 {
919 	struct hclge_cfg_param_cmd *req;
920 	u64 mac_addr_tmp_high;
921 	u64 mac_addr_tmp;
922 	int i;
923 
924 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
925 
926 	/* get the configuration */
927 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
928 					      HCLGE_CFG_VMDQ_M,
929 					      HCLGE_CFG_VMDQ_S);
930 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
931 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
932 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
933 					    HCLGE_CFG_TQP_DESC_N_M,
934 					    HCLGE_CFG_TQP_DESC_N_S);
935 
936 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
937 					HCLGE_CFG_PHY_ADDR_M,
938 					HCLGE_CFG_PHY_ADDR_S);
939 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
940 					  HCLGE_CFG_MEDIA_TP_M,
941 					  HCLGE_CFG_MEDIA_TP_S);
942 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
943 					  HCLGE_CFG_RX_BUF_LEN_M,
944 					  HCLGE_CFG_RX_BUF_LEN_S);
945 	/* get mac_address */
946 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
947 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
948 					    HCLGE_CFG_MAC_ADDR_H_M,
949 					    HCLGE_CFG_MAC_ADDR_H_S);
950 
951 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
952 
953 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
954 					     HCLGE_CFG_DEFAULT_SPEED_M,
955 					     HCLGE_CFG_DEFAULT_SPEED_S);
956 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
957 					    HCLGE_CFG_RSS_SIZE_M,
958 					    HCLGE_CFG_RSS_SIZE_S);
959 
960 	for (i = 0; i < ETH_ALEN; i++)
961 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
962 
963 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
964 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
965 
966 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
967 					     HCLGE_CFG_SPEED_ABILITY_M,
968 					     HCLGE_CFG_SPEED_ABILITY_S);
969 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
970 					 HCLGE_CFG_UMV_TBL_SPACE_M,
971 					 HCLGE_CFG_UMV_TBL_SPACE_S);
972 	if (!cfg->umv_space)
973 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
974 }
975 
976 /* hclge_get_cfg: query the static parameter from flash
977  * @hdev: pointer to struct hclge_dev
978  * @hcfg: the config structure to be getted
979  */
980 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
981 {
982 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
983 	struct hclge_cfg_param_cmd *req;
984 	int i, ret;
985 
986 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
987 		u32 offset = 0;
988 
989 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
990 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
991 					   true);
992 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
993 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
994 		/* Len should be united by 4 bytes when send to hardware */
995 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
996 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
997 		req->offset = cpu_to_le32(offset);
998 	}
999 
1000 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1001 	if (ret) {
1002 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1003 		return ret;
1004 	}
1005 
1006 	hclge_parse_cfg(hcfg, desc);
1007 
1008 	return 0;
1009 }
1010 
1011 static int hclge_get_cap(struct hclge_dev *hdev)
1012 {
1013 	int ret;
1014 
1015 	ret = hclge_query_function_status(hdev);
1016 	if (ret) {
1017 		dev_err(&hdev->pdev->dev,
1018 			"query function status error %d.\n", ret);
1019 		return ret;
1020 	}
1021 
1022 	/* get pf resource */
1023 	ret = hclge_query_pf_resource(hdev);
1024 	if (ret)
1025 		dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1026 
1027 	return ret;
1028 }
1029 
1030 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1031 {
1032 #define HCLGE_MIN_TX_DESC	64
1033 #define HCLGE_MIN_RX_DESC	64
1034 
1035 	if (!is_kdump_kernel())
1036 		return;
1037 
1038 	dev_info(&hdev->pdev->dev,
1039 		 "Running kdump kernel. Using minimal resources\n");
1040 
1041 	/* minimal queue pairs equals to the number of vports */
1042 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1043 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1044 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1045 }
1046 
1047 static int hclge_configure(struct hclge_dev *hdev)
1048 {
1049 	struct hclge_cfg cfg;
1050 	int ret, i;
1051 
1052 	ret = hclge_get_cfg(hdev, &cfg);
1053 	if (ret) {
1054 		dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1055 		return ret;
1056 	}
1057 
1058 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1059 	hdev->base_tqp_pid = 0;
1060 	hdev->rss_size_max = cfg.rss_size_max;
1061 	hdev->rx_buf_len = cfg.rx_buf_len;
1062 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1063 	hdev->hw.mac.media_type = cfg.media_type;
1064 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1065 	hdev->num_tx_desc = cfg.tqp_desc_num;
1066 	hdev->num_rx_desc = cfg.tqp_desc_num;
1067 	hdev->tm_info.num_pg = 1;
1068 	hdev->tc_max = cfg.tc_num;
1069 	hdev->tm_info.hw_pfc_map = 0;
1070 	hdev->wanted_umv_size = cfg.umv_space;
1071 
1072 	if (hnae3_dev_fd_supported(hdev))
1073 		hdev->fd_en = true;
1074 
1075 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1076 	if (ret) {
1077 		dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1078 		return ret;
1079 	}
1080 
1081 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1082 
1083 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1084 	    (hdev->tc_max < 1)) {
1085 		dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1086 			 hdev->tc_max);
1087 		hdev->tc_max = 1;
1088 	}
1089 
1090 	/* Dev does not support DCB */
1091 	if (!hnae3_dev_dcb_supported(hdev)) {
1092 		hdev->tc_max = 1;
1093 		hdev->pfc_max = 0;
1094 	} else {
1095 		hdev->pfc_max = hdev->tc_max;
1096 	}
1097 
1098 	hdev->tm_info.num_tc = 1;
1099 
1100 	/* Currently not support uncontiuous tc */
1101 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1102 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1103 
1104 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1105 
1106 	hclge_init_kdump_kernel_config(hdev);
1107 
1108 	return ret;
1109 }
1110 
1111 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1112 			    int tso_mss_max)
1113 {
1114 	struct hclge_cfg_tso_status_cmd *req;
1115 	struct hclge_desc desc;
1116 	u16 tso_mss;
1117 
1118 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1119 
1120 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1121 
1122 	tso_mss = 0;
1123 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1124 			HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1125 	req->tso_mss_min = cpu_to_le16(tso_mss);
1126 
1127 	tso_mss = 0;
1128 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1129 			HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1130 	req->tso_mss_max = cpu_to_le16(tso_mss);
1131 
1132 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1133 }
1134 
1135 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1136 {
1137 	struct hclge_cfg_gro_status_cmd *req;
1138 	struct hclge_desc desc;
1139 	int ret;
1140 
1141 	if (!hnae3_dev_gro_supported(hdev))
1142 		return 0;
1143 
1144 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1145 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1146 
1147 	req->gro_en = cpu_to_le16(en ? 1 : 0);
1148 
1149 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1150 	if (ret)
1151 		dev_err(&hdev->pdev->dev,
1152 			"GRO hardware config cmd failed, ret = %d\n", ret);
1153 
1154 	return ret;
1155 }
1156 
1157 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1158 {
1159 	struct hclge_tqp *tqp;
1160 	int i;
1161 
1162 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1163 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1164 	if (!hdev->htqp)
1165 		return -ENOMEM;
1166 
1167 	tqp = hdev->htqp;
1168 
1169 	for (i = 0; i < hdev->num_tqps; i++) {
1170 		tqp->dev = &hdev->pdev->dev;
1171 		tqp->index = i;
1172 
1173 		tqp->q.ae_algo = &ae_algo;
1174 		tqp->q.buf_size = hdev->rx_buf_len;
1175 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1176 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1177 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1178 			i * HCLGE_TQP_REG_SIZE;
1179 
1180 		tqp++;
1181 	}
1182 
1183 	return 0;
1184 }
1185 
1186 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1187 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1188 {
1189 	struct hclge_tqp_map_cmd *req;
1190 	struct hclge_desc desc;
1191 	int ret;
1192 
1193 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1194 
1195 	req = (struct hclge_tqp_map_cmd *)desc.data;
1196 	req->tqp_id = cpu_to_le16(tqp_pid);
1197 	req->tqp_vf = func_id;
1198 	req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1199 			1 << HCLGE_TQP_MAP_EN_B;
1200 	req->tqp_vid = cpu_to_le16(tqp_vid);
1201 
1202 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1203 	if (ret)
1204 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1205 
1206 	return ret;
1207 }
1208 
1209 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1210 {
1211 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1212 	struct hclge_dev *hdev = vport->back;
1213 	int i, alloced;
1214 
1215 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1216 	     alloced < num_tqps; i++) {
1217 		if (!hdev->htqp[i].alloced) {
1218 			hdev->htqp[i].q.handle = &vport->nic;
1219 			hdev->htqp[i].q.tqp_index = alloced;
1220 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1221 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1222 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1223 			hdev->htqp[i].alloced = true;
1224 			alloced++;
1225 		}
1226 	}
1227 	vport->alloc_tqps = alloced;
1228 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1229 				vport->alloc_tqps / hdev->tm_info.num_tc);
1230 
1231 	return 0;
1232 }
1233 
1234 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1235 			    u16 num_tx_desc, u16 num_rx_desc)
1236 
1237 {
1238 	struct hnae3_handle *nic = &vport->nic;
1239 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1240 	struct hclge_dev *hdev = vport->back;
1241 	int ret;
1242 
1243 	kinfo->num_tx_desc = num_tx_desc;
1244 	kinfo->num_rx_desc = num_rx_desc;
1245 
1246 	kinfo->rx_buf_len = hdev->rx_buf_len;
1247 
1248 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1249 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1250 	if (!kinfo->tqp)
1251 		return -ENOMEM;
1252 
1253 	ret = hclge_assign_tqp(vport, num_tqps);
1254 	if (ret)
1255 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1256 
1257 	return ret;
1258 }
1259 
1260 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1261 				  struct hclge_vport *vport)
1262 {
1263 	struct hnae3_handle *nic = &vport->nic;
1264 	struct hnae3_knic_private_info *kinfo;
1265 	u16 i;
1266 
1267 	kinfo = &nic->kinfo;
1268 	for (i = 0; i < vport->alloc_tqps; i++) {
1269 		struct hclge_tqp *q =
1270 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1271 		bool is_pf;
1272 		int ret;
1273 
1274 		is_pf = !(vport->vport_id);
1275 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1276 					     i, is_pf);
1277 		if (ret)
1278 			return ret;
1279 	}
1280 
1281 	return 0;
1282 }
1283 
1284 static int hclge_map_tqp(struct hclge_dev *hdev)
1285 {
1286 	struct hclge_vport *vport = hdev->vport;
1287 	u16 i, num_vport;
1288 
1289 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1290 	for (i = 0; i < num_vport; i++)	{
1291 		int ret;
1292 
1293 		ret = hclge_map_tqp_to_vport(hdev, vport);
1294 		if (ret)
1295 			return ret;
1296 
1297 		vport++;
1298 	}
1299 
1300 	return 0;
1301 }
1302 
1303 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1304 {
1305 	/* this would be initialized later */
1306 }
1307 
1308 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1309 {
1310 	struct hnae3_handle *nic = &vport->nic;
1311 	struct hclge_dev *hdev = vport->back;
1312 	int ret;
1313 
1314 	nic->pdev = hdev->pdev;
1315 	nic->ae_algo = &ae_algo;
1316 	nic->numa_node_mask = hdev->numa_node_mask;
1317 
1318 	if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1319 		ret = hclge_knic_setup(vport, num_tqps,
1320 				       hdev->num_tx_desc, hdev->num_rx_desc);
1321 
1322 		if (ret) {
1323 			dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1324 				ret);
1325 			return ret;
1326 		}
1327 	} else {
1328 		hclge_unic_setup(vport, num_tqps);
1329 	}
1330 
1331 	return 0;
1332 }
1333 
1334 static int hclge_alloc_vport(struct hclge_dev *hdev)
1335 {
1336 	struct pci_dev *pdev = hdev->pdev;
1337 	struct hclge_vport *vport;
1338 	u32 tqp_main_vport;
1339 	u32 tqp_per_vport;
1340 	int num_vport, i;
1341 	int ret;
1342 
1343 	/* We need to alloc a vport for main NIC of PF */
1344 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1345 
1346 	if (hdev->num_tqps < num_vport) {
1347 		dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1348 			hdev->num_tqps, num_vport);
1349 		return -EINVAL;
1350 	}
1351 
1352 	/* Alloc the same number of TQPs for every vport */
1353 	tqp_per_vport = hdev->num_tqps / num_vport;
1354 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1355 
1356 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1357 			     GFP_KERNEL);
1358 	if (!vport)
1359 		return -ENOMEM;
1360 
1361 	hdev->vport = vport;
1362 	hdev->num_alloc_vport = num_vport;
1363 
1364 	if (IS_ENABLED(CONFIG_PCI_IOV))
1365 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1366 
1367 	for (i = 0; i < num_vport; i++) {
1368 		vport->back = hdev;
1369 		vport->vport_id = i;
1370 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1371 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1372 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1373 		INIT_LIST_HEAD(&vport->vlan_list);
1374 		INIT_LIST_HEAD(&vport->uc_mac_list);
1375 		INIT_LIST_HEAD(&vport->mc_mac_list);
1376 
1377 		if (i == 0)
1378 			ret = hclge_vport_setup(vport, tqp_main_vport);
1379 		else
1380 			ret = hclge_vport_setup(vport, tqp_per_vport);
1381 		if (ret) {
1382 			dev_err(&pdev->dev,
1383 				"vport setup failed for vport %d, %d\n",
1384 				i, ret);
1385 			return ret;
1386 		}
1387 
1388 		vport++;
1389 	}
1390 
1391 	return 0;
1392 }
1393 
1394 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1395 				    struct hclge_pkt_buf_alloc *buf_alloc)
1396 {
1397 /* TX buffer size is unit by 128 byte */
1398 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1399 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1400 	struct hclge_tx_buff_alloc_cmd *req;
1401 	struct hclge_desc desc;
1402 	int ret;
1403 	u8 i;
1404 
1405 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1406 
1407 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1408 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1409 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1410 
1411 		req->tx_pkt_buff[i] =
1412 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1413 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1414 	}
1415 
1416 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1417 	if (ret)
1418 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1419 			ret);
1420 
1421 	return ret;
1422 }
1423 
1424 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1425 				 struct hclge_pkt_buf_alloc *buf_alloc)
1426 {
1427 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1428 
1429 	if (ret)
1430 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1431 
1432 	return ret;
1433 }
1434 
1435 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1436 {
1437 	int i, cnt = 0;
1438 
1439 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1440 		if (hdev->hw_tc_map & BIT(i))
1441 			cnt++;
1442 	return cnt;
1443 }
1444 
1445 /* Get the number of pfc enabled TCs, which have private buffer */
1446 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1447 				  struct hclge_pkt_buf_alloc *buf_alloc)
1448 {
1449 	struct hclge_priv_buf *priv;
1450 	int i, cnt = 0;
1451 
1452 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1453 		priv = &buf_alloc->priv_buf[i];
1454 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1455 		    priv->enable)
1456 			cnt++;
1457 	}
1458 
1459 	return cnt;
1460 }
1461 
1462 /* Get the number of pfc disabled TCs, which have private buffer */
1463 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1464 				     struct hclge_pkt_buf_alloc *buf_alloc)
1465 {
1466 	struct hclge_priv_buf *priv;
1467 	int i, cnt = 0;
1468 
1469 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1470 		priv = &buf_alloc->priv_buf[i];
1471 		if (hdev->hw_tc_map & BIT(i) &&
1472 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1473 		    priv->enable)
1474 			cnt++;
1475 	}
1476 
1477 	return cnt;
1478 }
1479 
1480 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1481 {
1482 	struct hclge_priv_buf *priv;
1483 	u32 rx_priv = 0;
1484 	int i;
1485 
1486 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1487 		priv = &buf_alloc->priv_buf[i];
1488 		if (priv->enable)
1489 			rx_priv += priv->buf_size;
1490 	}
1491 	return rx_priv;
1492 }
1493 
1494 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1495 {
1496 	u32 i, total_tx_size = 0;
1497 
1498 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1499 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1500 
1501 	return total_tx_size;
1502 }
1503 
1504 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1505 				struct hclge_pkt_buf_alloc *buf_alloc,
1506 				u32 rx_all)
1507 {
1508 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1509 	u32 tc_num = hclge_get_tc_num(hdev);
1510 	u32 shared_buf, aligned_mps;
1511 	u32 rx_priv;
1512 	int i;
1513 
1514 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1515 
1516 	if (hnae3_dev_dcb_supported(hdev))
1517 		shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1518 	else
1519 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1520 					+ hdev->dv_buf_size;
1521 
1522 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1523 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1524 			     HCLGE_BUF_SIZE_UNIT);
1525 
1526 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1527 	if (rx_all < rx_priv + shared_std)
1528 		return false;
1529 
1530 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1531 	buf_alloc->s_buf.buf_size = shared_buf;
1532 	if (hnae3_dev_dcb_supported(hdev)) {
1533 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1534 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1535 			- roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1536 	} else {
1537 		buf_alloc->s_buf.self.high = aligned_mps +
1538 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1539 		buf_alloc->s_buf.self.low = aligned_mps;
1540 	}
1541 
1542 	if (hnae3_dev_dcb_supported(hdev)) {
1543 		if (tc_num)
1544 			hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1545 		else
1546 			hi_thrd = shared_buf - hdev->dv_buf_size;
1547 
1548 		hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1549 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1550 		lo_thrd = hi_thrd - aligned_mps / 2;
1551 	} else {
1552 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1553 		lo_thrd = aligned_mps;
1554 	}
1555 
1556 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1557 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1558 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1559 	}
1560 
1561 	return true;
1562 }
1563 
1564 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1565 				struct hclge_pkt_buf_alloc *buf_alloc)
1566 {
1567 	u32 i, total_size;
1568 
1569 	total_size = hdev->pkt_buf_size;
1570 
1571 	/* alloc tx buffer for all enabled tc */
1572 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1573 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1574 
1575 		if (hdev->hw_tc_map & BIT(i)) {
1576 			if (total_size < hdev->tx_buf_size)
1577 				return -ENOMEM;
1578 
1579 			priv->tx_buf_size = hdev->tx_buf_size;
1580 		} else {
1581 			priv->tx_buf_size = 0;
1582 		}
1583 
1584 		total_size -= priv->tx_buf_size;
1585 	}
1586 
1587 	return 0;
1588 }
1589 
1590 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1591 				  struct hclge_pkt_buf_alloc *buf_alloc)
1592 {
1593 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1594 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1595 	int i;
1596 
1597 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1598 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1599 
1600 		priv->enable = 0;
1601 		priv->wl.low = 0;
1602 		priv->wl.high = 0;
1603 		priv->buf_size = 0;
1604 
1605 		if (!(hdev->hw_tc_map & BIT(i)))
1606 			continue;
1607 
1608 		priv->enable = 1;
1609 
1610 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1611 			priv->wl.low = max ? aligned_mps : 256;
1612 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
1613 						HCLGE_BUF_SIZE_UNIT);
1614 		} else {
1615 			priv->wl.low = 0;
1616 			priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1617 		}
1618 
1619 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1620 	}
1621 
1622 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1623 }
1624 
1625 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1626 					  struct hclge_pkt_buf_alloc *buf_alloc)
1627 {
1628 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1629 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1630 	int i;
1631 
1632 	/* let the last to be cleared first */
1633 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1634 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1635 
1636 		if (hdev->hw_tc_map & BIT(i) &&
1637 		    !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1638 			/* Clear the no pfc TC private buffer */
1639 			priv->wl.low = 0;
1640 			priv->wl.high = 0;
1641 			priv->buf_size = 0;
1642 			priv->enable = 0;
1643 			no_pfc_priv_num--;
1644 		}
1645 
1646 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1647 		    no_pfc_priv_num == 0)
1648 			break;
1649 	}
1650 
1651 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1652 }
1653 
1654 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1655 					struct hclge_pkt_buf_alloc *buf_alloc)
1656 {
1657 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1658 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1659 	int i;
1660 
1661 	/* let the last to be cleared first */
1662 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1663 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1664 
1665 		if (hdev->hw_tc_map & BIT(i) &&
1666 		    hdev->tm_info.hw_pfc_map & BIT(i)) {
1667 			/* Reduce the number of pfc TC with private buffer */
1668 			priv->wl.low = 0;
1669 			priv->enable = 0;
1670 			priv->wl.high = 0;
1671 			priv->buf_size = 0;
1672 			pfc_priv_num--;
1673 		}
1674 
1675 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1676 		    pfc_priv_num == 0)
1677 			break;
1678 	}
1679 
1680 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1681 }
1682 
1683 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1684  * @hdev: pointer to struct hclge_dev
1685  * @buf_alloc: pointer to buffer calculation data
1686  * @return: 0: calculate sucessful, negative: fail
1687  */
1688 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1689 				struct hclge_pkt_buf_alloc *buf_alloc)
1690 {
1691 	/* When DCB is not supported, rx private buffer is not allocated. */
1692 	if (!hnae3_dev_dcb_supported(hdev)) {
1693 		u32 rx_all = hdev->pkt_buf_size;
1694 
1695 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1696 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1697 			return -ENOMEM;
1698 
1699 		return 0;
1700 	}
1701 
1702 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1703 		return 0;
1704 
1705 	/* try to decrease the buffer size */
1706 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1707 		return 0;
1708 
1709 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1710 		return 0;
1711 
1712 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1713 		return 0;
1714 
1715 	return -ENOMEM;
1716 }
1717 
1718 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1719 				   struct hclge_pkt_buf_alloc *buf_alloc)
1720 {
1721 	struct hclge_rx_priv_buff_cmd *req;
1722 	struct hclge_desc desc;
1723 	int ret;
1724 	int i;
1725 
1726 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1727 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1728 
1729 	/* Alloc private buffer TCs */
1730 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1731 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1732 
1733 		req->buf_num[i] =
1734 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1735 		req->buf_num[i] |=
1736 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1737 	}
1738 
1739 	req->shared_buf =
1740 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1741 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
1742 
1743 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1744 	if (ret)
1745 		dev_err(&hdev->pdev->dev,
1746 			"rx private buffer alloc cmd failed %d\n", ret);
1747 
1748 	return ret;
1749 }
1750 
1751 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1752 				   struct hclge_pkt_buf_alloc *buf_alloc)
1753 {
1754 	struct hclge_rx_priv_wl_buf *req;
1755 	struct hclge_priv_buf *priv;
1756 	struct hclge_desc desc[2];
1757 	int i, j;
1758 	int ret;
1759 
1760 	for (i = 0; i < 2; i++) {
1761 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1762 					   false);
1763 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1764 
1765 		/* The first descriptor set the NEXT bit to 1 */
1766 		if (i == 0)
1767 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1768 		else
1769 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1770 
1771 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1772 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1773 
1774 			priv = &buf_alloc->priv_buf[idx];
1775 			req->tc_wl[j].high =
1776 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1777 			req->tc_wl[j].high |=
1778 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1779 			req->tc_wl[j].low =
1780 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1781 			req->tc_wl[j].low |=
1782 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1783 		}
1784 	}
1785 
1786 	/* Send 2 descriptor at one time */
1787 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1788 	if (ret)
1789 		dev_err(&hdev->pdev->dev,
1790 			"rx private waterline config cmd failed %d\n",
1791 			ret);
1792 	return ret;
1793 }
1794 
1795 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1796 				    struct hclge_pkt_buf_alloc *buf_alloc)
1797 {
1798 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1799 	struct hclge_rx_com_thrd *req;
1800 	struct hclge_desc desc[2];
1801 	struct hclge_tc_thrd *tc;
1802 	int i, j;
1803 	int ret;
1804 
1805 	for (i = 0; i < 2; i++) {
1806 		hclge_cmd_setup_basic_desc(&desc[i],
1807 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1808 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
1809 
1810 		/* The first descriptor set the NEXT bit to 1 */
1811 		if (i == 0)
1812 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1813 		else
1814 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1815 
1816 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1817 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1818 
1819 			req->com_thrd[j].high =
1820 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1821 			req->com_thrd[j].high |=
1822 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1823 			req->com_thrd[j].low =
1824 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1825 			req->com_thrd[j].low |=
1826 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1827 		}
1828 	}
1829 
1830 	/* Send 2 descriptors at one time */
1831 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1832 	if (ret)
1833 		dev_err(&hdev->pdev->dev,
1834 			"common threshold config cmd failed %d\n", ret);
1835 	return ret;
1836 }
1837 
1838 static int hclge_common_wl_config(struct hclge_dev *hdev,
1839 				  struct hclge_pkt_buf_alloc *buf_alloc)
1840 {
1841 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1842 	struct hclge_rx_com_wl *req;
1843 	struct hclge_desc desc;
1844 	int ret;
1845 
1846 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
1847 
1848 	req = (struct hclge_rx_com_wl *)desc.data;
1849 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
1850 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1851 
1852 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
1853 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1854 
1855 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1856 	if (ret)
1857 		dev_err(&hdev->pdev->dev,
1858 			"common waterline config cmd failed %d\n", ret);
1859 
1860 	return ret;
1861 }
1862 
1863 int hclge_buffer_alloc(struct hclge_dev *hdev)
1864 {
1865 	struct hclge_pkt_buf_alloc *pkt_buf;
1866 	int ret;
1867 
1868 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
1869 	if (!pkt_buf)
1870 		return -ENOMEM;
1871 
1872 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
1873 	if (ret) {
1874 		dev_err(&hdev->pdev->dev,
1875 			"could not calc tx buffer size for all TCs %d\n", ret);
1876 		goto out;
1877 	}
1878 
1879 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
1880 	if (ret) {
1881 		dev_err(&hdev->pdev->dev,
1882 			"could not alloc tx buffers %d\n", ret);
1883 		goto out;
1884 	}
1885 
1886 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
1887 	if (ret) {
1888 		dev_err(&hdev->pdev->dev,
1889 			"could not calc rx priv buffer size for all TCs %d\n",
1890 			ret);
1891 		goto out;
1892 	}
1893 
1894 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
1895 	if (ret) {
1896 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
1897 			ret);
1898 		goto out;
1899 	}
1900 
1901 	if (hnae3_dev_dcb_supported(hdev)) {
1902 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
1903 		if (ret) {
1904 			dev_err(&hdev->pdev->dev,
1905 				"could not configure rx private waterline %d\n",
1906 				ret);
1907 			goto out;
1908 		}
1909 
1910 		ret = hclge_common_thrd_config(hdev, pkt_buf);
1911 		if (ret) {
1912 			dev_err(&hdev->pdev->dev,
1913 				"could not configure common threshold %d\n",
1914 				ret);
1915 			goto out;
1916 		}
1917 	}
1918 
1919 	ret = hclge_common_wl_config(hdev, pkt_buf);
1920 	if (ret)
1921 		dev_err(&hdev->pdev->dev,
1922 			"could not configure common waterline %d\n", ret);
1923 
1924 out:
1925 	kfree(pkt_buf);
1926 	return ret;
1927 }
1928 
1929 static int hclge_init_roce_base_info(struct hclge_vport *vport)
1930 {
1931 	struct hnae3_handle *roce = &vport->roce;
1932 	struct hnae3_handle *nic = &vport->nic;
1933 
1934 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
1935 
1936 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
1937 	    vport->back->num_msi_left == 0)
1938 		return -EINVAL;
1939 
1940 	roce->rinfo.base_vector = vport->back->roce_base_vector;
1941 
1942 	roce->rinfo.netdev = nic->kinfo.netdev;
1943 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
1944 
1945 	roce->pdev = nic->pdev;
1946 	roce->ae_algo = nic->ae_algo;
1947 	roce->numa_node_mask = nic->numa_node_mask;
1948 
1949 	return 0;
1950 }
1951 
1952 static int hclge_init_msi(struct hclge_dev *hdev)
1953 {
1954 	struct pci_dev *pdev = hdev->pdev;
1955 	int vectors;
1956 	int i;
1957 
1958 	vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1959 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
1960 	if (vectors < 0) {
1961 		dev_err(&pdev->dev,
1962 			"failed(%d) to allocate MSI/MSI-X vectors\n",
1963 			vectors);
1964 		return vectors;
1965 	}
1966 	if (vectors < hdev->num_msi)
1967 		dev_warn(&hdev->pdev->dev,
1968 			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1969 			 hdev->num_msi, vectors);
1970 
1971 	hdev->num_msi = vectors;
1972 	hdev->num_msi_left = vectors;
1973 	hdev->base_msi_vector = pdev->irq;
1974 	hdev->roce_base_vector = hdev->base_msi_vector +
1975 				hdev->roce_base_msix_offset;
1976 
1977 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1978 					   sizeof(u16), GFP_KERNEL);
1979 	if (!hdev->vector_status) {
1980 		pci_free_irq_vectors(pdev);
1981 		return -ENOMEM;
1982 	}
1983 
1984 	for (i = 0; i < hdev->num_msi; i++)
1985 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
1986 
1987 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1988 					sizeof(int), GFP_KERNEL);
1989 	if (!hdev->vector_irq) {
1990 		pci_free_irq_vectors(pdev);
1991 		return -ENOMEM;
1992 	}
1993 
1994 	return 0;
1995 }
1996 
1997 static u8 hclge_check_speed_dup(u8 duplex, int speed)
1998 {
1999 
2000 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2001 		duplex = HCLGE_MAC_FULL;
2002 
2003 	return duplex;
2004 }
2005 
2006 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2007 				      u8 duplex)
2008 {
2009 	struct hclge_config_mac_speed_dup_cmd *req;
2010 	struct hclge_desc desc;
2011 	int ret;
2012 
2013 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2014 
2015 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2016 
2017 	hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2018 
2019 	switch (speed) {
2020 	case HCLGE_MAC_SPEED_10M:
2021 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2022 				HCLGE_CFG_SPEED_S, 6);
2023 		break;
2024 	case HCLGE_MAC_SPEED_100M:
2025 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2026 				HCLGE_CFG_SPEED_S, 7);
2027 		break;
2028 	case HCLGE_MAC_SPEED_1G:
2029 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2030 				HCLGE_CFG_SPEED_S, 0);
2031 		break;
2032 	case HCLGE_MAC_SPEED_10G:
2033 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2034 				HCLGE_CFG_SPEED_S, 1);
2035 		break;
2036 	case HCLGE_MAC_SPEED_25G:
2037 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2038 				HCLGE_CFG_SPEED_S, 2);
2039 		break;
2040 	case HCLGE_MAC_SPEED_40G:
2041 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2042 				HCLGE_CFG_SPEED_S, 3);
2043 		break;
2044 	case HCLGE_MAC_SPEED_50G:
2045 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2046 				HCLGE_CFG_SPEED_S, 4);
2047 		break;
2048 	case HCLGE_MAC_SPEED_100G:
2049 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2050 				HCLGE_CFG_SPEED_S, 5);
2051 		break;
2052 	default:
2053 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2054 		return -EINVAL;
2055 	}
2056 
2057 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2058 		      1);
2059 
2060 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2061 	if (ret) {
2062 		dev_err(&hdev->pdev->dev,
2063 			"mac speed/duplex config cmd failed %d.\n", ret);
2064 		return ret;
2065 	}
2066 
2067 	return 0;
2068 }
2069 
2070 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2071 {
2072 	int ret;
2073 
2074 	duplex = hclge_check_speed_dup(duplex, speed);
2075 	if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2076 		return 0;
2077 
2078 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2079 	if (ret)
2080 		return ret;
2081 
2082 	hdev->hw.mac.speed = speed;
2083 	hdev->hw.mac.duplex = duplex;
2084 
2085 	return 0;
2086 }
2087 
2088 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2089 				     u8 duplex)
2090 {
2091 	struct hclge_vport *vport = hclge_get_vport(handle);
2092 	struct hclge_dev *hdev = vport->back;
2093 
2094 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2095 }
2096 
2097 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2098 {
2099 	struct hclge_config_auto_neg_cmd *req;
2100 	struct hclge_desc desc;
2101 	u32 flag = 0;
2102 	int ret;
2103 
2104 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2105 
2106 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2107 	hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2108 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2109 
2110 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2111 	if (ret)
2112 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2113 			ret);
2114 
2115 	return ret;
2116 }
2117 
2118 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2119 {
2120 	struct hclge_vport *vport = hclge_get_vport(handle);
2121 	struct hclge_dev *hdev = vport->back;
2122 
2123 	return hclge_set_autoneg_en(hdev, enable);
2124 }
2125 
2126 static int hclge_get_autoneg(struct hnae3_handle *handle)
2127 {
2128 	struct hclge_vport *vport = hclge_get_vport(handle);
2129 	struct hclge_dev *hdev = vport->back;
2130 	struct phy_device *phydev = hdev->hw.mac.phydev;
2131 
2132 	if (phydev)
2133 		return phydev->autoneg;
2134 
2135 	return hdev->hw.mac.autoneg;
2136 }
2137 
2138 static int hclge_mac_init(struct hclge_dev *hdev)
2139 {
2140 	struct hclge_mac *mac = &hdev->hw.mac;
2141 	int ret;
2142 
2143 	hdev->support_sfp_query = true;
2144 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2145 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2146 					 hdev->hw.mac.duplex);
2147 	if (ret) {
2148 		dev_err(&hdev->pdev->dev,
2149 			"Config mac speed dup fail ret=%d\n", ret);
2150 		return ret;
2151 	}
2152 
2153 	mac->link = 0;
2154 
2155 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2156 	if (ret) {
2157 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2158 		return ret;
2159 	}
2160 
2161 	ret = hclge_buffer_alloc(hdev);
2162 	if (ret)
2163 		dev_err(&hdev->pdev->dev,
2164 			"allocate buffer fail, ret=%d\n", ret);
2165 
2166 	return ret;
2167 }
2168 
2169 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2170 {
2171 	if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2172 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2173 		schedule_work(&hdev->mbx_service_task);
2174 }
2175 
2176 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2177 {
2178 	if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2179 		schedule_work(&hdev->rst_service_task);
2180 }
2181 
2182 static void hclge_task_schedule(struct hclge_dev *hdev)
2183 {
2184 	if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2185 	    !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2186 	    !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2187 		(void)schedule_work(&hdev->service_task);
2188 }
2189 
2190 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2191 {
2192 	struct hclge_link_status_cmd *req;
2193 	struct hclge_desc desc;
2194 	int link_status;
2195 	int ret;
2196 
2197 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2198 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2199 	if (ret) {
2200 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2201 			ret);
2202 		return ret;
2203 	}
2204 
2205 	req = (struct hclge_link_status_cmd *)desc.data;
2206 	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2207 
2208 	return !!link_status;
2209 }
2210 
2211 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2212 {
2213 	int mac_state;
2214 	int link_stat;
2215 
2216 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2217 		return 0;
2218 
2219 	mac_state = hclge_get_mac_link_status(hdev);
2220 
2221 	if (hdev->hw.mac.phydev) {
2222 		if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2223 			link_stat = mac_state &
2224 				hdev->hw.mac.phydev->link;
2225 		else
2226 			link_stat = 0;
2227 
2228 	} else {
2229 		link_stat = mac_state;
2230 	}
2231 
2232 	return !!link_stat;
2233 }
2234 
2235 static void hclge_update_link_status(struct hclge_dev *hdev)
2236 {
2237 	struct hnae3_client *rclient = hdev->roce_client;
2238 	struct hnae3_client *client = hdev->nic_client;
2239 	struct hnae3_handle *rhandle;
2240 	struct hnae3_handle *handle;
2241 	int state;
2242 	int i;
2243 
2244 	if (!client)
2245 		return;
2246 	state = hclge_get_mac_phy_link(hdev);
2247 	if (state != hdev->hw.mac.link) {
2248 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2249 			handle = &hdev->vport[i].nic;
2250 			client->ops->link_status_change(handle, state);
2251 			hclge_config_mac_tnl_int(hdev, state);
2252 			rhandle = &hdev->vport[i].roce;
2253 			if (rclient && rclient->ops->link_status_change)
2254 				rclient->ops->link_status_change(rhandle,
2255 								 state);
2256 		}
2257 		hdev->hw.mac.link = state;
2258 	}
2259 }
2260 
2261 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2262 {
2263 	struct hclge_sfp_speed_cmd *resp = NULL;
2264 	struct hclge_desc desc;
2265 	int ret;
2266 
2267 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
2268 	resp = (struct hclge_sfp_speed_cmd *)desc.data;
2269 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2270 	if (ret == -EOPNOTSUPP) {
2271 		dev_warn(&hdev->pdev->dev,
2272 			 "IMP do not support get SFP speed %d\n", ret);
2273 		return ret;
2274 	} else if (ret) {
2275 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2276 		return ret;
2277 	}
2278 
2279 	*speed = resp->sfp_speed;
2280 
2281 	return 0;
2282 }
2283 
2284 static int hclge_update_speed_duplex(struct hclge_dev *hdev)
2285 {
2286 	struct hclge_mac mac = hdev->hw.mac;
2287 	int speed;
2288 	int ret;
2289 
2290 	/* get the speed from SFP cmd when phy
2291 	 * doesn't exit.
2292 	 */
2293 	if (mac.phydev)
2294 		return 0;
2295 
2296 	/* if IMP does not support get SFP/qSFP speed, return directly */
2297 	if (!hdev->support_sfp_query)
2298 		return 0;
2299 
2300 	ret = hclge_get_sfp_speed(hdev, &speed);
2301 	if (ret == -EOPNOTSUPP) {
2302 		hdev->support_sfp_query = false;
2303 		return ret;
2304 	} else if (ret) {
2305 		return ret;
2306 	}
2307 
2308 	if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2309 		return 0; /* do nothing if no SFP */
2310 
2311 	/* must config full duplex for SFP */
2312 	return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2313 }
2314 
2315 static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
2316 {
2317 	struct hclge_vport *vport = hclge_get_vport(handle);
2318 	struct hclge_dev *hdev = vport->back;
2319 
2320 	return hclge_update_speed_duplex(hdev);
2321 }
2322 
2323 static int hclge_get_status(struct hnae3_handle *handle)
2324 {
2325 	struct hclge_vport *vport = hclge_get_vport(handle);
2326 	struct hclge_dev *hdev = vport->back;
2327 
2328 	hclge_update_link_status(hdev);
2329 
2330 	return hdev->hw.mac.link;
2331 }
2332 
2333 static void hclge_service_timer(struct timer_list *t)
2334 {
2335 	struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2336 
2337 	mod_timer(&hdev->service_timer, jiffies + HZ);
2338 	hdev->hw_stats.stats_timer++;
2339 	hclge_task_schedule(hdev);
2340 }
2341 
2342 static void hclge_service_complete(struct hclge_dev *hdev)
2343 {
2344 	WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2345 
2346 	/* Flush memory before next watchdog */
2347 	smp_mb__before_atomic();
2348 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2349 }
2350 
2351 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2352 {
2353 	u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2354 
2355 	/* fetch the events from their corresponding regs */
2356 	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2357 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2358 	msix_src_reg = hclge_read_dev(&hdev->hw,
2359 				      HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2360 
2361 	/* Assumption: If by any chance reset and mailbox events are reported
2362 	 * together then we will only process reset event in this go and will
2363 	 * defer the processing of the mailbox events. Since, we would have not
2364 	 * cleared RX CMDQ event this time we would receive again another
2365 	 * interrupt from H/W just for the mailbox.
2366 	 */
2367 
2368 	/* check for vector0 reset event sources */
2369 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2370 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2371 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2372 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2373 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2374 		hdev->rst_stats.imp_rst_cnt++;
2375 		return HCLGE_VECTOR0_EVENT_RST;
2376 	}
2377 
2378 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2379 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2380 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2381 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2382 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2383 		hdev->rst_stats.global_rst_cnt++;
2384 		return HCLGE_VECTOR0_EVENT_RST;
2385 	}
2386 
2387 	if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2388 		dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2389 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2390 		set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2391 		*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2392 		hdev->rst_stats.core_rst_cnt++;
2393 		return HCLGE_VECTOR0_EVENT_RST;
2394 	}
2395 
2396 	/* check for vector0 msix event source */
2397 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2398 		dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2399 			msix_src_reg);
2400 		return HCLGE_VECTOR0_EVENT_ERR;
2401 	}
2402 
2403 	/* check for vector0 mailbox(=CMDQ RX) event source */
2404 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2405 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2406 		*clearval = cmdq_src_reg;
2407 		return HCLGE_VECTOR0_EVENT_MBX;
2408 	}
2409 
2410 	/* print other vector0 event source */
2411 	dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2412 		cmdq_src_reg, msix_src_reg);
2413 	return HCLGE_VECTOR0_EVENT_OTHER;
2414 }
2415 
2416 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2417 				    u32 regclr)
2418 {
2419 	switch (event_type) {
2420 	case HCLGE_VECTOR0_EVENT_RST:
2421 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2422 		break;
2423 	case HCLGE_VECTOR0_EVENT_MBX:
2424 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2425 		break;
2426 	default:
2427 		break;
2428 	}
2429 }
2430 
2431 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2432 {
2433 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2434 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2435 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2436 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2437 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2438 }
2439 
2440 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2441 {
2442 	writel(enable ? 1 : 0, vector->addr);
2443 }
2444 
2445 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2446 {
2447 	struct hclge_dev *hdev = data;
2448 	u32 event_cause;
2449 	u32 clearval;
2450 
2451 	hclge_enable_vector(&hdev->misc_vector, false);
2452 	event_cause = hclge_check_event_cause(hdev, &clearval);
2453 
2454 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
2455 	switch (event_cause) {
2456 	case HCLGE_VECTOR0_EVENT_ERR:
2457 		/* we do not know what type of reset is required now. This could
2458 		 * only be decided after we fetch the type of errors which
2459 		 * caused this event. Therefore, we will do below for now:
2460 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2461 		 *    have defered type of reset to be used.
2462 		 * 2. Schedule the reset serivce task.
2463 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2464 		 *    will fetch the correct type of reset.  This would be done
2465 		 *    by first decoding the types of errors.
2466 		 */
2467 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2468 		/* fall through */
2469 	case HCLGE_VECTOR0_EVENT_RST:
2470 		hclge_reset_task_schedule(hdev);
2471 		break;
2472 	case HCLGE_VECTOR0_EVENT_MBX:
2473 		/* If we are here then,
2474 		 * 1. Either we are not handling any mbx task and we are not
2475 		 *    scheduled as well
2476 		 *                        OR
2477 		 * 2. We could be handling a mbx task but nothing more is
2478 		 *    scheduled.
2479 		 * In both cases, we should schedule mbx task as there are more
2480 		 * mbx messages reported by this interrupt.
2481 		 */
2482 		hclge_mbx_task_schedule(hdev);
2483 		break;
2484 	default:
2485 		dev_warn(&hdev->pdev->dev,
2486 			 "received unknown or unhandled event of vector0\n");
2487 		break;
2488 	}
2489 
2490 	/* clear the source of interrupt if it is not cause by reset */
2491 	if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2492 		hclge_clear_event_cause(hdev, event_cause, clearval);
2493 		hclge_enable_vector(&hdev->misc_vector, true);
2494 	}
2495 
2496 	return IRQ_HANDLED;
2497 }
2498 
2499 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2500 {
2501 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2502 		dev_warn(&hdev->pdev->dev,
2503 			 "vector(vector_id %d) has been freed.\n", vector_id);
2504 		return;
2505 	}
2506 
2507 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2508 	hdev->num_msi_left += 1;
2509 	hdev->num_msi_used -= 1;
2510 }
2511 
2512 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2513 {
2514 	struct hclge_misc_vector *vector = &hdev->misc_vector;
2515 
2516 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2517 
2518 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2519 	hdev->vector_status[0] = 0;
2520 
2521 	hdev->num_msi_left -= 1;
2522 	hdev->num_msi_used += 1;
2523 }
2524 
2525 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2526 {
2527 	int ret;
2528 
2529 	hclge_get_misc_vector(hdev);
2530 
2531 	/* this would be explicitly freed in the end */
2532 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2533 			  0, "hclge_misc", hdev);
2534 	if (ret) {
2535 		hclge_free_vector(hdev, 0);
2536 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2537 			hdev->misc_vector.vector_irq);
2538 	}
2539 
2540 	return ret;
2541 }
2542 
2543 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2544 {
2545 	free_irq(hdev->misc_vector.vector_irq, hdev);
2546 	hclge_free_vector(hdev, 0);
2547 }
2548 
2549 int hclge_notify_client(struct hclge_dev *hdev,
2550 			enum hnae3_reset_notify_type type)
2551 {
2552 	struct hnae3_client *client = hdev->nic_client;
2553 	u16 i;
2554 
2555 	if (!client->ops->reset_notify)
2556 		return -EOPNOTSUPP;
2557 
2558 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2559 		struct hnae3_handle *handle = &hdev->vport[i].nic;
2560 		int ret;
2561 
2562 		ret = client->ops->reset_notify(handle, type);
2563 		if (ret) {
2564 			dev_err(&hdev->pdev->dev,
2565 				"notify nic client failed %d(%d)\n", type, ret);
2566 			return ret;
2567 		}
2568 	}
2569 
2570 	return 0;
2571 }
2572 
2573 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2574 				    enum hnae3_reset_notify_type type)
2575 {
2576 	struct hnae3_client *client = hdev->roce_client;
2577 	int ret = 0;
2578 	u16 i;
2579 
2580 	if (!client)
2581 		return 0;
2582 
2583 	if (!client->ops->reset_notify)
2584 		return -EOPNOTSUPP;
2585 
2586 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2587 		struct hnae3_handle *handle = &hdev->vport[i].roce;
2588 
2589 		ret = client->ops->reset_notify(handle, type);
2590 		if (ret) {
2591 			dev_err(&hdev->pdev->dev,
2592 				"notify roce client failed %d(%d)",
2593 				type, ret);
2594 			return ret;
2595 		}
2596 	}
2597 
2598 	return ret;
2599 }
2600 
2601 static int hclge_reset_wait(struct hclge_dev *hdev)
2602 {
2603 #define HCLGE_RESET_WATI_MS	100
2604 #define HCLGE_RESET_WAIT_CNT	200
2605 	u32 val, reg, reg_bit;
2606 	u32 cnt = 0;
2607 
2608 	switch (hdev->reset_type) {
2609 	case HNAE3_IMP_RESET:
2610 		reg = HCLGE_GLOBAL_RESET_REG;
2611 		reg_bit = HCLGE_IMP_RESET_BIT;
2612 		break;
2613 	case HNAE3_GLOBAL_RESET:
2614 		reg = HCLGE_GLOBAL_RESET_REG;
2615 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
2616 		break;
2617 	case HNAE3_CORE_RESET:
2618 		reg = HCLGE_GLOBAL_RESET_REG;
2619 		reg_bit = HCLGE_CORE_RESET_BIT;
2620 		break;
2621 	case HNAE3_FUNC_RESET:
2622 		reg = HCLGE_FUN_RST_ING;
2623 		reg_bit = HCLGE_FUN_RST_ING_B;
2624 		break;
2625 	case HNAE3_FLR_RESET:
2626 		break;
2627 	default:
2628 		dev_err(&hdev->pdev->dev,
2629 			"Wait for unsupported reset type: %d\n",
2630 			hdev->reset_type);
2631 		return -EINVAL;
2632 	}
2633 
2634 	if (hdev->reset_type == HNAE3_FLR_RESET) {
2635 		while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2636 		       cnt++ < HCLGE_RESET_WAIT_CNT)
2637 			msleep(HCLGE_RESET_WATI_MS);
2638 
2639 		if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2640 			dev_err(&hdev->pdev->dev,
2641 				"flr wait timeout: %d\n", cnt);
2642 			return -EBUSY;
2643 		}
2644 
2645 		return 0;
2646 	}
2647 
2648 	val = hclge_read_dev(&hdev->hw, reg);
2649 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2650 		msleep(HCLGE_RESET_WATI_MS);
2651 		val = hclge_read_dev(&hdev->hw, reg);
2652 		cnt++;
2653 	}
2654 
2655 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
2656 		dev_warn(&hdev->pdev->dev,
2657 			 "Wait for reset timeout: %d\n", hdev->reset_type);
2658 		return -EBUSY;
2659 	}
2660 
2661 	return 0;
2662 }
2663 
2664 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2665 {
2666 	struct hclge_vf_rst_cmd *req;
2667 	struct hclge_desc desc;
2668 
2669 	req = (struct hclge_vf_rst_cmd *)desc.data;
2670 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2671 	req->dest_vfid = func_id;
2672 
2673 	if (reset)
2674 		req->vf_rst = 0x1;
2675 
2676 	return hclge_cmd_send(&hdev->hw, &desc, 1);
2677 }
2678 
2679 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2680 {
2681 	int i;
2682 
2683 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2684 		struct hclge_vport *vport = &hdev->vport[i];
2685 		int ret;
2686 
2687 		/* Send cmd to set/clear VF's FUNC_RST_ING */
2688 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2689 		if (ret) {
2690 			dev_err(&hdev->pdev->dev,
2691 				"set vf(%d) rst failed %d!\n",
2692 				vport->vport_id, ret);
2693 			return ret;
2694 		}
2695 
2696 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
2697 			continue;
2698 
2699 		/* Inform VF to process the reset.
2700 		 * hclge_inform_reset_assert_to_vf may fail if VF
2701 		 * driver is not loaded.
2702 		 */
2703 		ret = hclge_inform_reset_assert_to_vf(vport);
2704 		if (ret)
2705 			dev_warn(&hdev->pdev->dev,
2706 				 "inform reset to vf(%d) failed %d!\n",
2707 				 vport->vport_id, ret);
2708 	}
2709 
2710 	return 0;
2711 }
2712 
2713 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
2714 {
2715 	struct hclge_desc desc;
2716 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
2717 	int ret;
2718 
2719 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
2720 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
2721 	req->fun_reset_vfid = func_id;
2722 
2723 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2724 	if (ret)
2725 		dev_err(&hdev->pdev->dev,
2726 			"send function reset cmd fail, status =%d\n", ret);
2727 
2728 	return ret;
2729 }
2730 
2731 static void hclge_do_reset(struct hclge_dev *hdev)
2732 {
2733 	struct hnae3_handle *handle = &hdev->vport[0].nic;
2734 	struct pci_dev *pdev = hdev->pdev;
2735 	u32 val;
2736 
2737 	if (hclge_get_hw_reset_stat(handle)) {
2738 		dev_info(&pdev->dev, "Hardware reset not finish\n");
2739 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
2740 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
2741 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
2742 		return;
2743 	}
2744 
2745 	switch (hdev->reset_type) {
2746 	case HNAE3_GLOBAL_RESET:
2747 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2748 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
2749 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2750 		dev_info(&pdev->dev, "Global Reset requested\n");
2751 		break;
2752 	case HNAE3_CORE_RESET:
2753 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
2754 		hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
2755 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
2756 		dev_info(&pdev->dev, "Core Reset requested\n");
2757 		break;
2758 	case HNAE3_FUNC_RESET:
2759 		dev_info(&pdev->dev, "PF Reset requested\n");
2760 		/* schedule again to check later */
2761 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
2762 		hclge_reset_task_schedule(hdev);
2763 		break;
2764 	case HNAE3_FLR_RESET:
2765 		dev_info(&pdev->dev, "FLR requested\n");
2766 		/* schedule again to check later */
2767 		set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
2768 		hclge_reset_task_schedule(hdev);
2769 		break;
2770 	default:
2771 		dev_warn(&pdev->dev,
2772 			 "Unsupported reset type: %d\n", hdev->reset_type);
2773 		break;
2774 	}
2775 }
2776 
2777 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
2778 						   unsigned long *addr)
2779 {
2780 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
2781 
2782 	/* first, resolve any unknown reset type to the known type(s) */
2783 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
2784 		/* we will intentionally ignore any errors from this function
2785 		 *  as we will end up in *some* reset request in any case
2786 		 */
2787 		hclge_handle_hw_msix_error(hdev, addr);
2788 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
2789 		/* We defered the clearing of the error event which caused
2790 		 * interrupt since it was not posssible to do that in
2791 		 * interrupt context (and this is the reason we introduced
2792 		 * new UNKNOWN reset type). Now, the errors have been
2793 		 * handled and cleared in hardware we can safely enable
2794 		 * interrupts. This is an exception to the norm.
2795 		 */
2796 		hclge_enable_vector(&hdev->misc_vector, true);
2797 	}
2798 
2799 	/* return the highest priority reset level amongst all */
2800 	if (test_bit(HNAE3_IMP_RESET, addr)) {
2801 		rst_level = HNAE3_IMP_RESET;
2802 		clear_bit(HNAE3_IMP_RESET, addr);
2803 		clear_bit(HNAE3_GLOBAL_RESET, addr);
2804 		clear_bit(HNAE3_CORE_RESET, addr);
2805 		clear_bit(HNAE3_FUNC_RESET, addr);
2806 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
2807 		rst_level = HNAE3_GLOBAL_RESET;
2808 		clear_bit(HNAE3_GLOBAL_RESET, addr);
2809 		clear_bit(HNAE3_CORE_RESET, addr);
2810 		clear_bit(HNAE3_FUNC_RESET, addr);
2811 	} else if (test_bit(HNAE3_CORE_RESET, addr)) {
2812 		rst_level = HNAE3_CORE_RESET;
2813 		clear_bit(HNAE3_CORE_RESET, addr);
2814 		clear_bit(HNAE3_FUNC_RESET, addr);
2815 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
2816 		rst_level = HNAE3_FUNC_RESET;
2817 		clear_bit(HNAE3_FUNC_RESET, addr);
2818 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
2819 		rst_level = HNAE3_FLR_RESET;
2820 		clear_bit(HNAE3_FLR_RESET, addr);
2821 	}
2822 
2823 	if (hdev->reset_type != HNAE3_NONE_RESET &&
2824 	    rst_level < hdev->reset_type)
2825 		return HNAE3_NONE_RESET;
2826 
2827 	return rst_level;
2828 }
2829 
2830 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
2831 {
2832 	u32 clearval = 0;
2833 
2834 	switch (hdev->reset_type) {
2835 	case HNAE3_IMP_RESET:
2836 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2837 		break;
2838 	case HNAE3_GLOBAL_RESET:
2839 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2840 		break;
2841 	case HNAE3_CORE_RESET:
2842 		clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2843 		break;
2844 	default:
2845 		break;
2846 	}
2847 
2848 	if (!clearval)
2849 		return;
2850 
2851 	hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
2852 	hclge_enable_vector(&hdev->misc_vector, true);
2853 }
2854 
2855 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
2856 {
2857 	int ret = 0;
2858 
2859 	switch (hdev->reset_type) {
2860 	case HNAE3_FUNC_RESET:
2861 		/* fall through */
2862 	case HNAE3_FLR_RESET:
2863 		ret = hclge_set_all_vf_rst(hdev, true);
2864 		break;
2865 	default:
2866 		break;
2867 	}
2868 
2869 	return ret;
2870 }
2871 
2872 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
2873 {
2874 	u32 reg_val;
2875 	int ret = 0;
2876 
2877 	switch (hdev->reset_type) {
2878 	case HNAE3_FUNC_RESET:
2879 		/* There is no mechanism for PF to know if VF has stopped IO
2880 		 * for now, just wait 100 ms for VF to stop IO
2881 		 */
2882 		msleep(100);
2883 		ret = hclge_func_reset_cmd(hdev, 0);
2884 		if (ret) {
2885 			dev_err(&hdev->pdev->dev,
2886 				"asserting function reset fail %d!\n", ret);
2887 			return ret;
2888 		}
2889 
2890 		/* After performaning pf reset, it is not necessary to do the
2891 		 * mailbox handling or send any command to firmware, because
2892 		 * any mailbox handling or command to firmware is only valid
2893 		 * after hclge_cmd_init is called.
2894 		 */
2895 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2896 		hdev->rst_stats.pf_rst_cnt++;
2897 		break;
2898 	case HNAE3_FLR_RESET:
2899 		/* There is no mechanism for PF to know if VF has stopped IO
2900 		 * for now, just wait 100 ms for VF to stop IO
2901 		 */
2902 		msleep(100);
2903 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2904 		set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
2905 		hdev->rst_stats.flr_rst_cnt++;
2906 		break;
2907 	case HNAE3_IMP_RESET:
2908 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
2909 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
2910 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
2911 		break;
2912 	default:
2913 		break;
2914 	}
2915 
2916 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
2917 
2918 	return ret;
2919 }
2920 
2921 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
2922 {
2923 #define MAX_RESET_FAIL_CNT 5
2924 #define RESET_UPGRADE_DELAY_SEC 10
2925 
2926 	if (hdev->reset_pending) {
2927 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
2928 			 hdev->reset_pending);
2929 		return true;
2930 	} else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
2931 		   (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
2932 		    BIT(HCLGE_IMP_RESET_BIT))) {
2933 		dev_info(&hdev->pdev->dev,
2934 			 "reset failed because IMP Reset is pending\n");
2935 		hclge_clear_reset_cause(hdev);
2936 		return false;
2937 	} else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
2938 		hdev->reset_fail_cnt++;
2939 		if (is_timeout) {
2940 			set_bit(hdev->reset_type, &hdev->reset_pending);
2941 			dev_info(&hdev->pdev->dev,
2942 				 "re-schedule to wait for hw reset done\n");
2943 			return true;
2944 		}
2945 
2946 		dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
2947 		hclge_clear_reset_cause(hdev);
2948 		mod_timer(&hdev->reset_timer,
2949 			  jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
2950 
2951 		return false;
2952 	}
2953 
2954 	hclge_clear_reset_cause(hdev);
2955 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
2956 	return false;
2957 }
2958 
2959 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
2960 {
2961 	int ret = 0;
2962 
2963 	switch (hdev->reset_type) {
2964 	case HNAE3_FUNC_RESET:
2965 		/* fall through */
2966 	case HNAE3_FLR_RESET:
2967 		ret = hclge_set_all_vf_rst(hdev, false);
2968 		break;
2969 	default:
2970 		break;
2971 	}
2972 
2973 	return ret;
2974 }
2975 
2976 static void hclge_reset(struct hclge_dev *hdev)
2977 {
2978 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2979 	bool is_timeout = false;
2980 	int ret;
2981 
2982 	/* Initialize ae_dev reset status as well, in case enet layer wants to
2983 	 * know if device is undergoing reset
2984 	 */
2985 	ae_dev->reset_type = hdev->reset_type;
2986 	hdev->rst_stats.reset_cnt++;
2987 	/* perform reset of the stack & ae device for a client */
2988 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
2989 	if (ret)
2990 		goto err_reset;
2991 
2992 	ret = hclge_reset_prepare_down(hdev);
2993 	if (ret)
2994 		goto err_reset;
2995 
2996 	rtnl_lock();
2997 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2998 	if (ret)
2999 		goto err_reset_lock;
3000 
3001 	rtnl_unlock();
3002 
3003 	ret = hclge_reset_prepare_wait(hdev);
3004 	if (ret)
3005 		goto err_reset;
3006 
3007 	if (hclge_reset_wait(hdev)) {
3008 		is_timeout = true;
3009 		goto err_reset;
3010 	}
3011 
3012 	hdev->rst_stats.hw_reset_done_cnt++;
3013 
3014 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3015 	if (ret)
3016 		goto err_reset;
3017 
3018 	rtnl_lock();
3019 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3020 	if (ret)
3021 		goto err_reset_lock;
3022 
3023 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3024 	if (ret)
3025 		goto err_reset_lock;
3026 
3027 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3028 	if (ret)
3029 		goto err_reset_lock;
3030 
3031 	ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3032 	if (ret)
3033 		goto err_reset_lock;
3034 
3035 	hclge_clear_reset_cause(hdev);
3036 
3037 	ret = hclge_reset_prepare_up(hdev);
3038 	if (ret)
3039 		goto err_reset_lock;
3040 
3041 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3042 	if (ret)
3043 		goto err_reset_lock;
3044 
3045 	rtnl_unlock();
3046 
3047 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3048 	if (ret)
3049 		goto err_reset;
3050 
3051 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3052 	if (ret)
3053 		goto err_reset;
3054 
3055 	hdev->last_reset_time = jiffies;
3056 	hdev->reset_fail_cnt = 0;
3057 	hdev->rst_stats.reset_done_cnt++;
3058 	ae_dev->reset_type = HNAE3_NONE_RESET;
3059 	del_timer(&hdev->reset_timer);
3060 
3061 	return;
3062 
3063 err_reset_lock:
3064 	rtnl_unlock();
3065 err_reset:
3066 	if (hclge_reset_err_handle(hdev, is_timeout))
3067 		hclge_reset_task_schedule(hdev);
3068 }
3069 
3070 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3071 {
3072 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3073 	struct hclge_dev *hdev = ae_dev->priv;
3074 
3075 	/* We might end up getting called broadly because of 2 below cases:
3076 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3077 	 *    normalcy is to reset.
3078 	 * 2. A new reset request from the stack due to timeout
3079 	 *
3080 	 * For the first case,error event might not have ae handle available.
3081 	 * check if this is a new reset request and we are not here just because
3082 	 * last reset attempt did not succeed and watchdog hit us again. We will
3083 	 * know this if last reset request did not occur very recently (watchdog
3084 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3085 	 * In case of new request we reset the "reset level" to PF reset.
3086 	 * And if it is a repeat reset request of the most recent one then we
3087 	 * want to make sure we throttle the reset request. Therefore, we will
3088 	 * not allow it again before 3*HZ times.
3089 	 */
3090 	if (!handle)
3091 		handle = &hdev->vport[0].nic;
3092 
3093 	if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3094 		return;
3095 	else if (hdev->default_reset_request)
3096 		hdev->reset_level =
3097 			hclge_get_reset_level(hdev,
3098 					      &hdev->default_reset_request);
3099 	else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3100 		hdev->reset_level = HNAE3_FUNC_RESET;
3101 
3102 	dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3103 		 hdev->reset_level);
3104 
3105 	/* request reset & schedule reset task */
3106 	set_bit(hdev->reset_level, &hdev->reset_request);
3107 	hclge_reset_task_schedule(hdev);
3108 
3109 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3110 		hdev->reset_level++;
3111 }
3112 
3113 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3114 					enum hnae3_reset_type rst_type)
3115 {
3116 	struct hclge_dev *hdev = ae_dev->priv;
3117 
3118 	set_bit(rst_type, &hdev->default_reset_request);
3119 }
3120 
3121 static void hclge_reset_timer(struct timer_list *t)
3122 {
3123 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3124 
3125 	dev_info(&hdev->pdev->dev,
3126 		 "triggering global reset in reset timer\n");
3127 	set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3128 	hclge_reset_event(hdev->pdev, NULL);
3129 }
3130 
3131 static void hclge_reset_subtask(struct hclge_dev *hdev)
3132 {
3133 	/* check if there is any ongoing reset in the hardware. This status can
3134 	 * be checked from reset_pending. If there is then, we need to wait for
3135 	 * hardware to complete reset.
3136 	 *    a. If we are able to figure out in reasonable time that hardware
3137 	 *       has fully resetted then, we can proceed with driver, client
3138 	 *       reset.
3139 	 *    b. else, we can come back later to check this status so re-sched
3140 	 *       now.
3141 	 */
3142 	hdev->last_reset_time = jiffies;
3143 	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3144 	if (hdev->reset_type != HNAE3_NONE_RESET)
3145 		hclge_reset(hdev);
3146 
3147 	/* check if we got any *new* reset requests to be honored */
3148 	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3149 	if (hdev->reset_type != HNAE3_NONE_RESET)
3150 		hclge_do_reset(hdev);
3151 
3152 	hdev->reset_type = HNAE3_NONE_RESET;
3153 }
3154 
3155 static void hclge_reset_service_task(struct work_struct *work)
3156 {
3157 	struct hclge_dev *hdev =
3158 		container_of(work, struct hclge_dev, rst_service_task);
3159 
3160 	if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3161 		return;
3162 
3163 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3164 
3165 	hclge_reset_subtask(hdev);
3166 
3167 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3168 }
3169 
3170 static void hclge_mailbox_service_task(struct work_struct *work)
3171 {
3172 	struct hclge_dev *hdev =
3173 		container_of(work, struct hclge_dev, mbx_service_task);
3174 
3175 	if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3176 		return;
3177 
3178 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3179 
3180 	hclge_mbx_handler(hdev);
3181 
3182 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3183 }
3184 
3185 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3186 {
3187 	int i;
3188 
3189 	/* start from vport 1 for PF is always alive */
3190 	for (i = 1; i < hdev->num_alloc_vport; i++) {
3191 		struct hclge_vport *vport = &hdev->vport[i];
3192 
3193 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3194 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3195 
3196 		/* If vf is not alive, set to default value */
3197 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3198 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3199 	}
3200 }
3201 
3202 static void hclge_service_task(struct work_struct *work)
3203 {
3204 	struct hclge_dev *hdev =
3205 		container_of(work, struct hclge_dev, service_task);
3206 
3207 	if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3208 		hclge_update_stats_for_all(hdev);
3209 		hdev->hw_stats.stats_timer = 0;
3210 	}
3211 
3212 	hclge_update_speed_duplex(hdev);
3213 	hclge_update_link_status(hdev);
3214 	hclge_update_vport_alive(hdev);
3215 	hclge_service_complete(hdev);
3216 }
3217 
3218 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3219 {
3220 	/* VF handle has no client */
3221 	if (!handle->client)
3222 		return container_of(handle, struct hclge_vport, nic);
3223 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
3224 		return container_of(handle, struct hclge_vport, roce);
3225 	else
3226 		return container_of(handle, struct hclge_vport, nic);
3227 }
3228 
3229 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3230 			    struct hnae3_vector_info *vector_info)
3231 {
3232 	struct hclge_vport *vport = hclge_get_vport(handle);
3233 	struct hnae3_vector_info *vector = vector_info;
3234 	struct hclge_dev *hdev = vport->back;
3235 	int alloc = 0;
3236 	int i, j;
3237 
3238 	vector_num = min(hdev->num_msi_left, vector_num);
3239 
3240 	for (j = 0; j < vector_num; j++) {
3241 		for (i = 1; i < hdev->num_msi; i++) {
3242 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3243 				vector->vector = pci_irq_vector(hdev->pdev, i);
3244 				vector->io_addr = hdev->hw.io_base +
3245 					HCLGE_VECTOR_REG_BASE +
3246 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
3247 					vport->vport_id *
3248 					HCLGE_VECTOR_VF_OFFSET;
3249 				hdev->vector_status[i] = vport->vport_id;
3250 				hdev->vector_irq[i] = vector->vector;
3251 
3252 				vector++;
3253 				alloc++;
3254 
3255 				break;
3256 			}
3257 		}
3258 	}
3259 	hdev->num_msi_left -= alloc;
3260 	hdev->num_msi_used += alloc;
3261 
3262 	return alloc;
3263 }
3264 
3265 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3266 {
3267 	int i;
3268 
3269 	for (i = 0; i < hdev->num_msi; i++)
3270 		if (vector == hdev->vector_irq[i])
3271 			return i;
3272 
3273 	return -EINVAL;
3274 }
3275 
3276 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3277 {
3278 	struct hclge_vport *vport = hclge_get_vport(handle);
3279 	struct hclge_dev *hdev = vport->back;
3280 	int vector_id;
3281 
3282 	vector_id = hclge_get_vector_index(hdev, vector);
3283 	if (vector_id < 0) {
3284 		dev_err(&hdev->pdev->dev,
3285 			"Get vector index fail. vector_id =%d\n", vector_id);
3286 		return vector_id;
3287 	}
3288 
3289 	hclge_free_vector(hdev, vector_id);
3290 
3291 	return 0;
3292 }
3293 
3294 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3295 {
3296 	return HCLGE_RSS_KEY_SIZE;
3297 }
3298 
3299 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3300 {
3301 	return HCLGE_RSS_IND_TBL_SIZE;
3302 }
3303 
3304 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3305 				  const u8 hfunc, const u8 *key)
3306 {
3307 	struct hclge_rss_config_cmd *req;
3308 	struct hclge_desc desc;
3309 	int key_offset;
3310 	int key_size;
3311 	int ret;
3312 
3313 	req = (struct hclge_rss_config_cmd *)desc.data;
3314 
3315 	for (key_offset = 0; key_offset < 3; key_offset++) {
3316 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3317 					   false);
3318 
3319 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3320 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3321 
3322 		if (key_offset == 2)
3323 			key_size =
3324 			HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3325 		else
3326 			key_size = HCLGE_RSS_HASH_KEY_NUM;
3327 
3328 		memcpy(req->hash_key,
3329 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3330 
3331 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3332 		if (ret) {
3333 			dev_err(&hdev->pdev->dev,
3334 				"Configure RSS config fail, status = %d\n",
3335 				ret);
3336 			return ret;
3337 		}
3338 	}
3339 	return 0;
3340 }
3341 
3342 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3343 {
3344 	struct hclge_rss_indirection_table_cmd *req;
3345 	struct hclge_desc desc;
3346 	int i, j;
3347 	int ret;
3348 
3349 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3350 
3351 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3352 		hclge_cmd_setup_basic_desc
3353 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3354 
3355 		req->start_table_index =
3356 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3357 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3358 
3359 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3360 			req->rss_result[j] =
3361 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3362 
3363 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3364 		if (ret) {
3365 			dev_err(&hdev->pdev->dev,
3366 				"Configure rss indir table fail,status = %d\n",
3367 				ret);
3368 			return ret;
3369 		}
3370 	}
3371 	return 0;
3372 }
3373 
3374 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3375 				 u16 *tc_size, u16 *tc_offset)
3376 {
3377 	struct hclge_rss_tc_mode_cmd *req;
3378 	struct hclge_desc desc;
3379 	int ret;
3380 	int i;
3381 
3382 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3383 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3384 
3385 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3386 		u16 mode = 0;
3387 
3388 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3389 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3390 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3391 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3392 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3393 
3394 		req->rss_tc_mode[i] = cpu_to_le16(mode);
3395 	}
3396 
3397 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3398 	if (ret)
3399 		dev_err(&hdev->pdev->dev,
3400 			"Configure rss tc mode fail, status = %d\n", ret);
3401 
3402 	return ret;
3403 }
3404 
3405 static void hclge_get_rss_type(struct hclge_vport *vport)
3406 {
3407 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
3408 	    vport->rss_tuple_sets.ipv4_udp_en ||
3409 	    vport->rss_tuple_sets.ipv4_sctp_en ||
3410 	    vport->rss_tuple_sets.ipv6_tcp_en ||
3411 	    vport->rss_tuple_sets.ipv6_udp_en ||
3412 	    vport->rss_tuple_sets.ipv6_sctp_en)
3413 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3414 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3415 		 vport->rss_tuple_sets.ipv6_fragment_en)
3416 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3417 	else
3418 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3419 }
3420 
3421 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3422 {
3423 	struct hclge_rss_input_tuple_cmd *req;
3424 	struct hclge_desc desc;
3425 	int ret;
3426 
3427 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3428 
3429 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3430 
3431 	/* Get the tuple cfg from pf */
3432 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3433 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3434 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3435 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3436 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3437 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3438 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3439 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3440 	hclge_get_rss_type(&hdev->vport[0]);
3441 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3442 	if (ret)
3443 		dev_err(&hdev->pdev->dev,
3444 			"Configure rss input fail, status = %d\n", ret);
3445 	return ret;
3446 }
3447 
3448 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3449 			 u8 *key, u8 *hfunc)
3450 {
3451 	struct hclge_vport *vport = hclge_get_vport(handle);
3452 	int i;
3453 
3454 	/* Get hash algorithm */
3455 	if (hfunc) {
3456 		switch (vport->rss_algo) {
3457 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3458 			*hfunc = ETH_RSS_HASH_TOP;
3459 			break;
3460 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
3461 			*hfunc = ETH_RSS_HASH_XOR;
3462 			break;
3463 		default:
3464 			*hfunc = ETH_RSS_HASH_UNKNOWN;
3465 			break;
3466 		}
3467 	}
3468 
3469 	/* Get the RSS Key required by the user */
3470 	if (key)
3471 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3472 
3473 	/* Get indirect table */
3474 	if (indir)
3475 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3476 			indir[i] =  vport->rss_indirection_tbl[i];
3477 
3478 	return 0;
3479 }
3480 
3481 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3482 			 const  u8 *key, const  u8 hfunc)
3483 {
3484 	struct hclge_vport *vport = hclge_get_vport(handle);
3485 	struct hclge_dev *hdev = vport->back;
3486 	u8 hash_algo;
3487 	int ret, i;
3488 
3489 	/* Set the RSS Hash Key if specififed by the user */
3490 	if (key) {
3491 		switch (hfunc) {
3492 		case ETH_RSS_HASH_TOP:
3493 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3494 			break;
3495 		case ETH_RSS_HASH_XOR:
3496 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3497 			break;
3498 		case ETH_RSS_HASH_NO_CHANGE:
3499 			hash_algo = vport->rss_algo;
3500 			break;
3501 		default:
3502 			return -EINVAL;
3503 		}
3504 
3505 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3506 		if (ret)
3507 			return ret;
3508 
3509 		/* Update the shadow RSS key with user specified qids */
3510 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3511 		vport->rss_algo = hash_algo;
3512 	}
3513 
3514 	/* Update the shadow RSS table with user specified qids */
3515 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3516 		vport->rss_indirection_tbl[i] = indir[i];
3517 
3518 	/* Update the hardware */
3519 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3520 }
3521 
3522 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3523 {
3524 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3525 
3526 	if (nfc->data & RXH_L4_B_2_3)
3527 		hash_sets |= HCLGE_D_PORT_BIT;
3528 	else
3529 		hash_sets &= ~HCLGE_D_PORT_BIT;
3530 
3531 	if (nfc->data & RXH_IP_SRC)
3532 		hash_sets |= HCLGE_S_IP_BIT;
3533 	else
3534 		hash_sets &= ~HCLGE_S_IP_BIT;
3535 
3536 	if (nfc->data & RXH_IP_DST)
3537 		hash_sets |= HCLGE_D_IP_BIT;
3538 	else
3539 		hash_sets &= ~HCLGE_D_IP_BIT;
3540 
3541 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3542 		hash_sets |= HCLGE_V_TAG_BIT;
3543 
3544 	return hash_sets;
3545 }
3546 
3547 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3548 			       struct ethtool_rxnfc *nfc)
3549 {
3550 	struct hclge_vport *vport = hclge_get_vport(handle);
3551 	struct hclge_dev *hdev = vport->back;
3552 	struct hclge_rss_input_tuple_cmd *req;
3553 	struct hclge_desc desc;
3554 	u8 tuple_sets;
3555 	int ret;
3556 
3557 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3558 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
3559 		return -EINVAL;
3560 
3561 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3562 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3563 
3564 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3565 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3566 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3567 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3568 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3569 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3570 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3571 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3572 
3573 	tuple_sets = hclge_get_rss_hash_bits(nfc);
3574 	switch (nfc->flow_type) {
3575 	case TCP_V4_FLOW:
3576 		req->ipv4_tcp_en = tuple_sets;
3577 		break;
3578 	case TCP_V6_FLOW:
3579 		req->ipv6_tcp_en = tuple_sets;
3580 		break;
3581 	case UDP_V4_FLOW:
3582 		req->ipv4_udp_en = tuple_sets;
3583 		break;
3584 	case UDP_V6_FLOW:
3585 		req->ipv6_udp_en = tuple_sets;
3586 		break;
3587 	case SCTP_V4_FLOW:
3588 		req->ipv4_sctp_en = tuple_sets;
3589 		break;
3590 	case SCTP_V6_FLOW:
3591 		if ((nfc->data & RXH_L4_B_0_1) ||
3592 		    (nfc->data & RXH_L4_B_2_3))
3593 			return -EINVAL;
3594 
3595 		req->ipv6_sctp_en = tuple_sets;
3596 		break;
3597 	case IPV4_FLOW:
3598 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3599 		break;
3600 	case IPV6_FLOW:
3601 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3602 		break;
3603 	default:
3604 		return -EINVAL;
3605 	}
3606 
3607 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3608 	if (ret) {
3609 		dev_err(&hdev->pdev->dev,
3610 			"Set rss tuple fail, status = %d\n", ret);
3611 		return ret;
3612 	}
3613 
3614 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3615 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3616 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3617 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3618 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3619 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3620 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3621 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3622 	hclge_get_rss_type(vport);
3623 	return 0;
3624 }
3625 
3626 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3627 			       struct ethtool_rxnfc *nfc)
3628 {
3629 	struct hclge_vport *vport = hclge_get_vport(handle);
3630 	u8 tuple_sets;
3631 
3632 	nfc->data = 0;
3633 
3634 	switch (nfc->flow_type) {
3635 	case TCP_V4_FLOW:
3636 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3637 		break;
3638 	case UDP_V4_FLOW:
3639 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3640 		break;
3641 	case TCP_V6_FLOW:
3642 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3643 		break;
3644 	case UDP_V6_FLOW:
3645 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3646 		break;
3647 	case SCTP_V4_FLOW:
3648 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3649 		break;
3650 	case SCTP_V6_FLOW:
3651 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3652 		break;
3653 	case IPV4_FLOW:
3654 	case IPV6_FLOW:
3655 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3656 		break;
3657 	default:
3658 		return -EINVAL;
3659 	}
3660 
3661 	if (!tuple_sets)
3662 		return 0;
3663 
3664 	if (tuple_sets & HCLGE_D_PORT_BIT)
3665 		nfc->data |= RXH_L4_B_2_3;
3666 	if (tuple_sets & HCLGE_S_PORT_BIT)
3667 		nfc->data |= RXH_L4_B_0_1;
3668 	if (tuple_sets & HCLGE_D_IP_BIT)
3669 		nfc->data |= RXH_IP_DST;
3670 	if (tuple_sets & HCLGE_S_IP_BIT)
3671 		nfc->data |= RXH_IP_SRC;
3672 
3673 	return 0;
3674 }
3675 
3676 static int hclge_get_tc_size(struct hnae3_handle *handle)
3677 {
3678 	struct hclge_vport *vport = hclge_get_vport(handle);
3679 	struct hclge_dev *hdev = vport->back;
3680 
3681 	return hdev->rss_size_max;
3682 }
3683 
3684 int hclge_rss_init_hw(struct hclge_dev *hdev)
3685 {
3686 	struct hclge_vport *vport = hdev->vport;
3687 	u8 *rss_indir = vport[0].rss_indirection_tbl;
3688 	u16 rss_size = vport[0].alloc_rss_size;
3689 	u8 *key = vport[0].rss_hash_key;
3690 	u8 hfunc = vport[0].rss_algo;
3691 	u16 tc_offset[HCLGE_MAX_TC_NUM];
3692 	u16 tc_valid[HCLGE_MAX_TC_NUM];
3693 	u16 tc_size[HCLGE_MAX_TC_NUM];
3694 	u16 roundup_size;
3695 	int i, ret;
3696 
3697 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
3698 	if (ret)
3699 		return ret;
3700 
3701 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
3702 	if (ret)
3703 		return ret;
3704 
3705 	ret = hclge_set_rss_input_tuple(hdev);
3706 	if (ret)
3707 		return ret;
3708 
3709 	/* Each TC have the same queue size, and tc_size set to hardware is
3710 	 * the log2 of roundup power of two of rss_size, the acutal queue
3711 	 * size is limited by indirection table.
3712 	 */
3713 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
3714 		dev_err(&hdev->pdev->dev,
3715 			"Configure rss tc size failed, invalid TC_SIZE = %d\n",
3716 			rss_size);
3717 		return -EINVAL;
3718 	}
3719 
3720 	roundup_size = roundup_pow_of_two(rss_size);
3721 	roundup_size = ilog2(roundup_size);
3722 
3723 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3724 		tc_valid[i] = 0;
3725 
3726 		if (!(hdev->hw_tc_map & BIT(i)))
3727 			continue;
3728 
3729 		tc_valid[i] = 1;
3730 		tc_size[i] = roundup_size;
3731 		tc_offset[i] = rss_size * i;
3732 	}
3733 
3734 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
3735 }
3736 
3737 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
3738 {
3739 	struct hclge_vport *vport = hdev->vport;
3740 	int i, j;
3741 
3742 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
3743 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3744 			vport[j].rss_indirection_tbl[i] =
3745 				i % vport[j].alloc_rss_size;
3746 	}
3747 }
3748 
3749 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
3750 {
3751 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3752 	struct hclge_vport *vport = hdev->vport;
3753 
3754 	if (hdev->pdev->revision >= 0x21)
3755 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3756 
3757 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3758 		vport[i].rss_tuple_sets.ipv4_tcp_en =
3759 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3760 		vport[i].rss_tuple_sets.ipv4_udp_en =
3761 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3762 		vport[i].rss_tuple_sets.ipv4_sctp_en =
3763 			HCLGE_RSS_INPUT_TUPLE_SCTP;
3764 		vport[i].rss_tuple_sets.ipv4_fragment_en =
3765 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3766 		vport[i].rss_tuple_sets.ipv6_tcp_en =
3767 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3768 		vport[i].rss_tuple_sets.ipv6_udp_en =
3769 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3770 		vport[i].rss_tuple_sets.ipv6_sctp_en =
3771 			HCLGE_RSS_INPUT_TUPLE_SCTP;
3772 		vport[i].rss_tuple_sets.ipv6_fragment_en =
3773 			HCLGE_RSS_INPUT_TUPLE_OTHER;
3774 
3775 		vport[i].rss_algo = rss_algo;
3776 
3777 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
3778 		       HCLGE_RSS_KEY_SIZE);
3779 	}
3780 
3781 	hclge_rss_indir_init_cfg(hdev);
3782 }
3783 
3784 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
3785 				int vector_id, bool en,
3786 				struct hnae3_ring_chain_node *ring_chain)
3787 {
3788 	struct hclge_dev *hdev = vport->back;
3789 	struct hnae3_ring_chain_node *node;
3790 	struct hclge_desc desc;
3791 	struct hclge_ctrl_vector_chain_cmd *req
3792 		= (struct hclge_ctrl_vector_chain_cmd *)desc.data;
3793 	enum hclge_cmd_status status;
3794 	enum hclge_opcode_type op;
3795 	u16 tqp_type_and_id;
3796 	int i;
3797 
3798 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
3799 	hclge_cmd_setup_basic_desc(&desc, op, false);
3800 	req->int_vector_id = vector_id;
3801 
3802 	i = 0;
3803 	for (node = ring_chain; node; node = node->next) {
3804 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
3805 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
3806 				HCLGE_INT_TYPE_S,
3807 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
3808 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
3809 				HCLGE_TQP_ID_S, node->tqp_index);
3810 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
3811 				HCLGE_INT_GL_IDX_S,
3812 				hnae3_get_field(node->int_gl_idx,
3813 						HNAE3_RING_GL_IDX_M,
3814 						HNAE3_RING_GL_IDX_S));
3815 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
3816 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
3817 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
3818 			req->vfid = vport->vport_id;
3819 
3820 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
3821 			if (status) {
3822 				dev_err(&hdev->pdev->dev,
3823 					"Map TQP fail, status is %d.\n",
3824 					status);
3825 				return -EIO;
3826 			}
3827 			i = 0;
3828 
3829 			hclge_cmd_setup_basic_desc(&desc,
3830 						   op,
3831 						   false);
3832 			req->int_vector_id = vector_id;
3833 		}
3834 	}
3835 
3836 	if (i > 0) {
3837 		req->int_cause_num = i;
3838 		req->vfid = vport->vport_id;
3839 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
3840 		if (status) {
3841 			dev_err(&hdev->pdev->dev,
3842 				"Map TQP fail, status is %d.\n", status);
3843 			return -EIO;
3844 		}
3845 	}
3846 
3847 	return 0;
3848 }
3849 
3850 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
3851 				    int vector,
3852 				    struct hnae3_ring_chain_node *ring_chain)
3853 {
3854 	struct hclge_vport *vport = hclge_get_vport(handle);
3855 	struct hclge_dev *hdev = vport->back;
3856 	int vector_id;
3857 
3858 	vector_id = hclge_get_vector_index(hdev, vector);
3859 	if (vector_id < 0) {
3860 		dev_err(&hdev->pdev->dev,
3861 			"Get vector index fail. vector_id =%d\n", vector_id);
3862 		return vector_id;
3863 	}
3864 
3865 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
3866 }
3867 
3868 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
3869 				       int vector,
3870 				       struct hnae3_ring_chain_node *ring_chain)
3871 {
3872 	struct hclge_vport *vport = hclge_get_vport(handle);
3873 	struct hclge_dev *hdev = vport->back;
3874 	int vector_id, ret;
3875 
3876 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3877 		return 0;
3878 
3879 	vector_id = hclge_get_vector_index(hdev, vector);
3880 	if (vector_id < 0) {
3881 		dev_err(&handle->pdev->dev,
3882 			"Get vector index fail. ret =%d\n", vector_id);
3883 		return vector_id;
3884 	}
3885 
3886 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
3887 	if (ret)
3888 		dev_err(&handle->pdev->dev,
3889 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
3890 			vector_id,
3891 			ret);
3892 
3893 	return ret;
3894 }
3895 
3896 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
3897 			       struct hclge_promisc_param *param)
3898 {
3899 	struct hclge_promisc_cfg_cmd *req;
3900 	struct hclge_desc desc;
3901 	int ret;
3902 
3903 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
3904 
3905 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
3906 	req->vf_id = param->vf_id;
3907 
3908 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
3909 	 * pdev revision(0x20), new revision support them. The
3910 	 * value of this two fields will not return error when driver
3911 	 * send command to fireware in revision(0x20).
3912 	 */
3913 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
3914 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
3915 
3916 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3917 	if (ret)
3918 		dev_err(&hdev->pdev->dev,
3919 			"Set promisc mode fail, status is %d.\n", ret);
3920 
3921 	return ret;
3922 }
3923 
3924 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
3925 			      bool en_mc, bool en_bc, int vport_id)
3926 {
3927 	if (!param)
3928 		return;
3929 
3930 	memset(param, 0, sizeof(struct hclge_promisc_param));
3931 	if (en_uc)
3932 		param->enable = HCLGE_PROMISC_EN_UC;
3933 	if (en_mc)
3934 		param->enable |= HCLGE_PROMISC_EN_MC;
3935 	if (en_bc)
3936 		param->enable |= HCLGE_PROMISC_EN_BC;
3937 	param->vf_id = vport_id;
3938 }
3939 
3940 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
3941 				  bool en_mc_pmc)
3942 {
3943 	struct hclge_vport *vport = hclge_get_vport(handle);
3944 	struct hclge_dev *hdev = vport->back;
3945 	struct hclge_promisc_param param;
3946 	bool en_bc_pmc = true;
3947 
3948 	/* For revision 0x20, if broadcast promisc enabled, vlan filter is
3949 	 * always bypassed. So broadcast promisc should be disabled until
3950 	 * user enable promisc mode
3951 	 */
3952 	if (handle->pdev->revision == 0x20)
3953 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
3954 
3955 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
3956 				 vport->vport_id);
3957 	return hclge_cmd_set_promisc_mode(hdev, &param);
3958 }
3959 
3960 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
3961 {
3962 	struct hclge_get_fd_mode_cmd *req;
3963 	struct hclge_desc desc;
3964 	int ret;
3965 
3966 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
3967 
3968 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
3969 
3970 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3971 	if (ret) {
3972 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
3973 		return ret;
3974 	}
3975 
3976 	*fd_mode = req->mode;
3977 
3978 	return ret;
3979 }
3980 
3981 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
3982 				   u32 *stage1_entry_num,
3983 				   u32 *stage2_entry_num,
3984 				   u16 *stage1_counter_num,
3985 				   u16 *stage2_counter_num)
3986 {
3987 	struct hclge_get_fd_allocation_cmd *req;
3988 	struct hclge_desc desc;
3989 	int ret;
3990 
3991 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
3992 
3993 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
3994 
3995 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3996 	if (ret) {
3997 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
3998 			ret);
3999 		return ret;
4000 	}
4001 
4002 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4003 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4004 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4005 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4006 
4007 	return ret;
4008 }
4009 
4010 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4011 {
4012 	struct hclge_set_fd_key_config_cmd *req;
4013 	struct hclge_fd_key_cfg *stage;
4014 	struct hclge_desc desc;
4015 	int ret;
4016 
4017 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4018 
4019 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4020 	stage = &hdev->fd_cfg.key_cfg[stage_num];
4021 	req->stage = stage_num;
4022 	req->key_select = stage->key_sel;
4023 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4024 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4025 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4026 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4027 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4028 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4029 
4030 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4031 	if (ret)
4032 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4033 
4034 	return ret;
4035 }
4036 
4037 static int hclge_init_fd_config(struct hclge_dev *hdev)
4038 {
4039 #define LOW_2_WORDS		0x03
4040 	struct hclge_fd_key_cfg *key_cfg;
4041 	int ret;
4042 
4043 	if (!hnae3_dev_fd_supported(hdev))
4044 		return 0;
4045 
4046 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4047 	if (ret)
4048 		return ret;
4049 
4050 	switch (hdev->fd_cfg.fd_mode) {
4051 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4052 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4053 		break;
4054 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4055 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4056 		break;
4057 	default:
4058 		dev_err(&hdev->pdev->dev,
4059 			"Unsupported flow director mode %d\n",
4060 			hdev->fd_cfg.fd_mode);
4061 		return -EOPNOTSUPP;
4062 	}
4063 
4064 	hdev->fd_cfg.proto_support =
4065 		TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4066 		UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4067 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4068 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4069 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4070 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4071 	key_cfg->outer_sipv6_word_en = 0;
4072 	key_cfg->outer_dipv6_word_en = 0;
4073 
4074 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4075 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4076 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4077 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4078 
4079 	/* If use max 400bit key, we can support tuples for ether type */
4080 	if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4081 		hdev->fd_cfg.proto_support |= ETHER_FLOW;
4082 		key_cfg->tuple_active |=
4083 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4084 	}
4085 
4086 	/* roce_type is used to filter roce frames
4087 	 * dst_vport is used to specify the rule
4088 	 */
4089 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4090 
4091 	ret = hclge_get_fd_allocation(hdev,
4092 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4093 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4094 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4095 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4096 	if (ret)
4097 		return ret;
4098 
4099 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4100 }
4101 
4102 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4103 				int loc, u8 *key, bool is_add)
4104 {
4105 	struct hclge_fd_tcam_config_1_cmd *req1;
4106 	struct hclge_fd_tcam_config_2_cmd *req2;
4107 	struct hclge_fd_tcam_config_3_cmd *req3;
4108 	struct hclge_desc desc[3];
4109 	int ret;
4110 
4111 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4112 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4113 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4114 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4115 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4116 
4117 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4118 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4119 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4120 
4121 	req1->stage = stage;
4122 	req1->xy_sel = sel_x ? 1 : 0;
4123 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4124 	req1->index = cpu_to_le32(loc);
4125 	req1->entry_vld = sel_x ? is_add : 0;
4126 
4127 	if (key) {
4128 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4129 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4130 		       sizeof(req2->tcam_data));
4131 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4132 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4133 	}
4134 
4135 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
4136 	if (ret)
4137 		dev_err(&hdev->pdev->dev,
4138 			"config tcam key fail, ret=%d\n",
4139 			ret);
4140 
4141 	return ret;
4142 }
4143 
4144 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4145 			      struct hclge_fd_ad_data *action)
4146 {
4147 	struct hclge_fd_ad_config_cmd *req;
4148 	struct hclge_desc desc;
4149 	u64 ad_data = 0;
4150 	int ret;
4151 
4152 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4153 
4154 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
4155 	req->index = cpu_to_le32(loc);
4156 	req->stage = stage;
4157 
4158 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4159 		      action->write_rule_id_to_bd);
4160 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4161 			action->rule_id);
4162 	ad_data <<= 32;
4163 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4164 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4165 		      action->forward_to_direct_queue);
4166 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4167 			action->queue_id);
4168 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4169 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4170 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4171 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4172 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4173 			action->counter_id);
4174 
4175 	req->ad_data = cpu_to_le64(ad_data);
4176 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4177 	if (ret)
4178 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4179 
4180 	return ret;
4181 }
4182 
4183 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4184 				   struct hclge_fd_rule *rule)
4185 {
4186 	u16 tmp_x_s, tmp_y_s;
4187 	u32 tmp_x_l, tmp_y_l;
4188 	int i;
4189 
4190 	if (rule->unused_tuple & tuple_bit)
4191 		return true;
4192 
4193 	switch (tuple_bit) {
4194 	case 0:
4195 		return false;
4196 	case BIT(INNER_DST_MAC):
4197 		for (i = 0; i < 6; i++) {
4198 			calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4199 			       rule->tuples_mask.dst_mac[i]);
4200 			calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4201 			       rule->tuples_mask.dst_mac[i]);
4202 		}
4203 
4204 		return true;
4205 	case BIT(INNER_SRC_MAC):
4206 		for (i = 0; i < 6; i++) {
4207 			calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4208 			       rule->tuples.src_mac[i]);
4209 			calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4210 			       rule->tuples.src_mac[i]);
4211 		}
4212 
4213 		return true;
4214 	case BIT(INNER_VLAN_TAG_FST):
4215 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4216 		       rule->tuples_mask.vlan_tag1);
4217 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4218 		       rule->tuples_mask.vlan_tag1);
4219 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4220 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4221 
4222 		return true;
4223 	case BIT(INNER_ETH_TYPE):
4224 		calc_x(tmp_x_s, rule->tuples.ether_proto,
4225 		       rule->tuples_mask.ether_proto);
4226 		calc_y(tmp_y_s, rule->tuples.ether_proto,
4227 		       rule->tuples_mask.ether_proto);
4228 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4229 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4230 
4231 		return true;
4232 	case BIT(INNER_IP_TOS):
4233 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4234 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4235 
4236 		return true;
4237 	case BIT(INNER_IP_PROTO):
4238 		calc_x(*key_x, rule->tuples.ip_proto,
4239 		       rule->tuples_mask.ip_proto);
4240 		calc_y(*key_y, rule->tuples.ip_proto,
4241 		       rule->tuples_mask.ip_proto);
4242 
4243 		return true;
4244 	case BIT(INNER_SRC_IP):
4245 		calc_x(tmp_x_l, rule->tuples.src_ip[3],
4246 		       rule->tuples_mask.src_ip[3]);
4247 		calc_y(tmp_y_l, rule->tuples.src_ip[3],
4248 		       rule->tuples_mask.src_ip[3]);
4249 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4250 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4251 
4252 		return true;
4253 	case BIT(INNER_DST_IP):
4254 		calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4255 		       rule->tuples_mask.dst_ip[3]);
4256 		calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4257 		       rule->tuples_mask.dst_ip[3]);
4258 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4259 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4260 
4261 		return true;
4262 	case BIT(INNER_SRC_PORT):
4263 		calc_x(tmp_x_s, rule->tuples.src_port,
4264 		       rule->tuples_mask.src_port);
4265 		calc_y(tmp_y_s, rule->tuples.src_port,
4266 		       rule->tuples_mask.src_port);
4267 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4268 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4269 
4270 		return true;
4271 	case BIT(INNER_DST_PORT):
4272 		calc_x(tmp_x_s, rule->tuples.dst_port,
4273 		       rule->tuples_mask.dst_port);
4274 		calc_y(tmp_y_s, rule->tuples.dst_port,
4275 		       rule->tuples_mask.dst_port);
4276 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4277 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4278 
4279 		return true;
4280 	default:
4281 		return false;
4282 	}
4283 }
4284 
4285 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4286 				 u8 vf_id, u8 network_port_id)
4287 {
4288 	u32 port_number = 0;
4289 
4290 	if (port_type == HOST_PORT) {
4291 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4292 				pf_id);
4293 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4294 				vf_id);
4295 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4296 	} else {
4297 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4298 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
4299 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4300 	}
4301 
4302 	return port_number;
4303 }
4304 
4305 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4306 				       __le32 *key_x, __le32 *key_y,
4307 				       struct hclge_fd_rule *rule)
4308 {
4309 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4310 	u8 cur_pos = 0, tuple_size, shift_bits;
4311 	int i;
4312 
4313 	for (i = 0; i < MAX_META_DATA; i++) {
4314 		tuple_size = meta_data_key_info[i].key_length;
4315 		tuple_bit = key_cfg->meta_data_active & BIT(i);
4316 
4317 		switch (tuple_bit) {
4318 		case BIT(ROCE_TYPE):
4319 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4320 			cur_pos += tuple_size;
4321 			break;
4322 		case BIT(DST_VPORT):
4323 			port_number = hclge_get_port_number(HOST_PORT, 0,
4324 							    rule->vf_id, 0);
4325 			hnae3_set_field(meta_data,
4326 					GENMASK(cur_pos + tuple_size, cur_pos),
4327 					cur_pos, port_number);
4328 			cur_pos += tuple_size;
4329 			break;
4330 		default:
4331 			break;
4332 		}
4333 	}
4334 
4335 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4336 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4337 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
4338 
4339 	*key_x = cpu_to_le32(tmp_x << shift_bits);
4340 	*key_y = cpu_to_le32(tmp_y << shift_bits);
4341 }
4342 
4343 /* A complete key is combined with meta data key and tuple key.
4344  * Meta data key is stored at the MSB region, and tuple key is stored at
4345  * the LSB region, unused bits will be filled 0.
4346  */
4347 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4348 			    struct hclge_fd_rule *rule)
4349 {
4350 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4351 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4352 	u8 *cur_key_x, *cur_key_y;
4353 	int i, ret, tuple_size;
4354 	u8 meta_data_region;
4355 
4356 	memset(key_x, 0, sizeof(key_x));
4357 	memset(key_y, 0, sizeof(key_y));
4358 	cur_key_x = key_x;
4359 	cur_key_y = key_y;
4360 
4361 	for (i = 0 ; i < MAX_TUPLE; i++) {
4362 		bool tuple_valid;
4363 		u32 check_tuple;
4364 
4365 		tuple_size = tuple_key_info[i].key_length / 8;
4366 		check_tuple = key_cfg->tuple_active & BIT(i);
4367 
4368 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4369 						     cur_key_y, rule);
4370 		if (tuple_valid) {
4371 			cur_key_x += tuple_size;
4372 			cur_key_y += tuple_size;
4373 		}
4374 	}
4375 
4376 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4377 			MAX_META_DATA_LENGTH / 8;
4378 
4379 	hclge_fd_convert_meta_data(key_cfg,
4380 				   (__le32 *)(key_x + meta_data_region),
4381 				   (__le32 *)(key_y + meta_data_region),
4382 				   rule);
4383 
4384 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4385 				   true);
4386 	if (ret) {
4387 		dev_err(&hdev->pdev->dev,
4388 			"fd key_y config fail, loc=%d, ret=%d\n",
4389 			rule->queue_id, ret);
4390 		return ret;
4391 	}
4392 
4393 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4394 				   true);
4395 	if (ret)
4396 		dev_err(&hdev->pdev->dev,
4397 			"fd key_x config fail, loc=%d, ret=%d\n",
4398 			rule->queue_id, ret);
4399 	return ret;
4400 }
4401 
4402 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4403 			       struct hclge_fd_rule *rule)
4404 {
4405 	struct hclge_fd_ad_data ad_data;
4406 
4407 	ad_data.ad_id = rule->location;
4408 
4409 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4410 		ad_data.drop_packet = true;
4411 		ad_data.forward_to_direct_queue = false;
4412 		ad_data.queue_id = 0;
4413 	} else {
4414 		ad_data.drop_packet = false;
4415 		ad_data.forward_to_direct_queue = true;
4416 		ad_data.queue_id = rule->queue_id;
4417 	}
4418 
4419 	ad_data.use_counter = false;
4420 	ad_data.counter_id = 0;
4421 
4422 	ad_data.use_next_stage = false;
4423 	ad_data.next_input_key = 0;
4424 
4425 	ad_data.write_rule_id_to_bd = true;
4426 	ad_data.rule_id = rule->location;
4427 
4428 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4429 }
4430 
4431 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4432 			       struct ethtool_rx_flow_spec *fs, u32 *unused)
4433 {
4434 	struct ethtool_tcpip4_spec *tcp_ip4_spec;
4435 	struct ethtool_usrip4_spec *usr_ip4_spec;
4436 	struct ethtool_tcpip6_spec *tcp_ip6_spec;
4437 	struct ethtool_usrip6_spec *usr_ip6_spec;
4438 	struct ethhdr *ether_spec;
4439 
4440 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4441 		return -EINVAL;
4442 
4443 	if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4444 		return -EOPNOTSUPP;
4445 
4446 	if ((fs->flow_type & FLOW_EXT) &&
4447 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4448 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4449 		return -EOPNOTSUPP;
4450 	}
4451 
4452 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4453 	case SCTP_V4_FLOW:
4454 	case TCP_V4_FLOW:
4455 	case UDP_V4_FLOW:
4456 		tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4457 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4458 
4459 		if (!tcp_ip4_spec->ip4src)
4460 			*unused |= BIT(INNER_SRC_IP);
4461 
4462 		if (!tcp_ip4_spec->ip4dst)
4463 			*unused |= BIT(INNER_DST_IP);
4464 
4465 		if (!tcp_ip4_spec->psrc)
4466 			*unused |= BIT(INNER_SRC_PORT);
4467 
4468 		if (!tcp_ip4_spec->pdst)
4469 			*unused |= BIT(INNER_DST_PORT);
4470 
4471 		if (!tcp_ip4_spec->tos)
4472 			*unused |= BIT(INNER_IP_TOS);
4473 
4474 		break;
4475 	case IP_USER_FLOW:
4476 		usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4477 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4478 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4479 
4480 		if (!usr_ip4_spec->ip4src)
4481 			*unused |= BIT(INNER_SRC_IP);
4482 
4483 		if (!usr_ip4_spec->ip4dst)
4484 			*unused |= BIT(INNER_DST_IP);
4485 
4486 		if (!usr_ip4_spec->tos)
4487 			*unused |= BIT(INNER_IP_TOS);
4488 
4489 		if (!usr_ip4_spec->proto)
4490 			*unused |= BIT(INNER_IP_PROTO);
4491 
4492 		if (usr_ip4_spec->l4_4_bytes)
4493 			return -EOPNOTSUPP;
4494 
4495 		if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4496 			return -EOPNOTSUPP;
4497 
4498 		break;
4499 	case SCTP_V6_FLOW:
4500 	case TCP_V6_FLOW:
4501 	case UDP_V6_FLOW:
4502 		tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4503 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4504 			BIT(INNER_IP_TOS);
4505 
4506 		if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4507 		    !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4508 			*unused |= BIT(INNER_SRC_IP);
4509 
4510 		if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4511 		    !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4512 			*unused |= BIT(INNER_DST_IP);
4513 
4514 		if (!tcp_ip6_spec->psrc)
4515 			*unused |= BIT(INNER_SRC_PORT);
4516 
4517 		if (!tcp_ip6_spec->pdst)
4518 			*unused |= BIT(INNER_DST_PORT);
4519 
4520 		if (tcp_ip6_spec->tclass)
4521 			return -EOPNOTSUPP;
4522 
4523 		break;
4524 	case IPV6_USER_FLOW:
4525 		usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4526 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4527 			BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4528 			BIT(INNER_DST_PORT);
4529 
4530 		if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4531 		    !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4532 			*unused |= BIT(INNER_SRC_IP);
4533 
4534 		if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4535 		    !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4536 			*unused |= BIT(INNER_DST_IP);
4537 
4538 		if (!usr_ip6_spec->l4_proto)
4539 			*unused |= BIT(INNER_IP_PROTO);
4540 
4541 		if (usr_ip6_spec->tclass)
4542 			return -EOPNOTSUPP;
4543 
4544 		if (usr_ip6_spec->l4_4_bytes)
4545 			return -EOPNOTSUPP;
4546 
4547 		break;
4548 	case ETHER_FLOW:
4549 		ether_spec = &fs->h_u.ether_spec;
4550 		*unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4551 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4552 			BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4553 
4554 		if (is_zero_ether_addr(ether_spec->h_source))
4555 			*unused |= BIT(INNER_SRC_MAC);
4556 
4557 		if (is_zero_ether_addr(ether_spec->h_dest))
4558 			*unused |= BIT(INNER_DST_MAC);
4559 
4560 		if (!ether_spec->h_proto)
4561 			*unused |= BIT(INNER_ETH_TYPE);
4562 
4563 		break;
4564 	default:
4565 		return -EOPNOTSUPP;
4566 	}
4567 
4568 	if ((fs->flow_type & FLOW_EXT)) {
4569 		if (fs->h_ext.vlan_etype)
4570 			return -EOPNOTSUPP;
4571 		if (!fs->h_ext.vlan_tci)
4572 			*unused |= BIT(INNER_VLAN_TAG_FST);
4573 
4574 		if (fs->m_ext.vlan_tci) {
4575 			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4576 				return -EINVAL;
4577 		}
4578 	} else {
4579 		*unused |= BIT(INNER_VLAN_TAG_FST);
4580 	}
4581 
4582 	if (fs->flow_type & FLOW_MAC_EXT) {
4583 		if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4584 			return -EOPNOTSUPP;
4585 
4586 		if (is_zero_ether_addr(fs->h_ext.h_dest))
4587 			*unused |= BIT(INNER_DST_MAC);
4588 		else
4589 			*unused &= ~(BIT(INNER_DST_MAC));
4590 	}
4591 
4592 	return 0;
4593 }
4594 
4595 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4596 {
4597 	struct hclge_fd_rule *rule = NULL;
4598 	struct hlist_node *node2;
4599 
4600 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4601 		if (rule->location >= location)
4602 			break;
4603 	}
4604 
4605 	return  rule && rule->location == location;
4606 }
4607 
4608 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4609 				     struct hclge_fd_rule *new_rule,
4610 				     u16 location,
4611 				     bool is_add)
4612 {
4613 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
4614 	struct hlist_node *node2;
4615 
4616 	if (is_add && !new_rule)
4617 		return -EINVAL;
4618 
4619 	hlist_for_each_entry_safe(rule, node2,
4620 				  &hdev->fd_rule_list, rule_node) {
4621 		if (rule->location >= location)
4622 			break;
4623 		parent = rule;
4624 	}
4625 
4626 	if (rule && rule->location == location) {
4627 		hlist_del(&rule->rule_node);
4628 		kfree(rule);
4629 		hdev->hclge_fd_rule_num--;
4630 
4631 		if (!is_add)
4632 			return 0;
4633 
4634 	} else if (!is_add) {
4635 		dev_err(&hdev->pdev->dev,
4636 			"delete fail, rule %d is inexistent\n",
4637 			location);
4638 		return -EINVAL;
4639 	}
4640 
4641 	INIT_HLIST_NODE(&new_rule->rule_node);
4642 
4643 	if (parent)
4644 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4645 	else
4646 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4647 
4648 	hdev->hclge_fd_rule_num++;
4649 
4650 	return 0;
4651 }
4652 
4653 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4654 			      struct ethtool_rx_flow_spec *fs,
4655 			      struct hclge_fd_rule *rule)
4656 {
4657 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4658 
4659 	switch (flow_type) {
4660 	case SCTP_V4_FLOW:
4661 	case TCP_V4_FLOW:
4662 	case UDP_V4_FLOW:
4663 		rule->tuples.src_ip[3] =
4664 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4665 		rule->tuples_mask.src_ip[3] =
4666 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4667 
4668 		rule->tuples.dst_ip[3] =
4669 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4670 		rule->tuples_mask.dst_ip[3] =
4671 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4672 
4673 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4674 		rule->tuples_mask.src_port =
4675 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4676 
4677 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4678 		rule->tuples_mask.dst_port =
4679 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4680 
4681 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4682 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4683 
4684 		rule->tuples.ether_proto = ETH_P_IP;
4685 		rule->tuples_mask.ether_proto = 0xFFFF;
4686 
4687 		break;
4688 	case IP_USER_FLOW:
4689 		rule->tuples.src_ip[3] =
4690 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
4691 		rule->tuples_mask.src_ip[3] =
4692 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
4693 
4694 		rule->tuples.dst_ip[3] =
4695 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
4696 		rule->tuples_mask.dst_ip[3] =
4697 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
4698 
4699 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
4700 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
4701 
4702 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
4703 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
4704 
4705 		rule->tuples.ether_proto = ETH_P_IP;
4706 		rule->tuples_mask.ether_proto = 0xFFFF;
4707 
4708 		break;
4709 	case SCTP_V6_FLOW:
4710 	case TCP_V6_FLOW:
4711 	case UDP_V6_FLOW:
4712 		be32_to_cpu_array(rule->tuples.src_ip,
4713 				  fs->h_u.tcp_ip6_spec.ip6src, 4);
4714 		be32_to_cpu_array(rule->tuples_mask.src_ip,
4715 				  fs->m_u.tcp_ip6_spec.ip6src, 4);
4716 
4717 		be32_to_cpu_array(rule->tuples.dst_ip,
4718 				  fs->h_u.tcp_ip6_spec.ip6dst, 4);
4719 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
4720 				  fs->m_u.tcp_ip6_spec.ip6dst, 4);
4721 
4722 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
4723 		rule->tuples_mask.src_port =
4724 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
4725 
4726 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
4727 		rule->tuples_mask.dst_port =
4728 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
4729 
4730 		rule->tuples.ether_proto = ETH_P_IPV6;
4731 		rule->tuples_mask.ether_proto = 0xFFFF;
4732 
4733 		break;
4734 	case IPV6_USER_FLOW:
4735 		be32_to_cpu_array(rule->tuples.src_ip,
4736 				  fs->h_u.usr_ip6_spec.ip6src, 4);
4737 		be32_to_cpu_array(rule->tuples_mask.src_ip,
4738 				  fs->m_u.usr_ip6_spec.ip6src, 4);
4739 
4740 		be32_to_cpu_array(rule->tuples.dst_ip,
4741 				  fs->h_u.usr_ip6_spec.ip6dst, 4);
4742 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
4743 				  fs->m_u.usr_ip6_spec.ip6dst, 4);
4744 
4745 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
4746 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
4747 
4748 		rule->tuples.ether_proto = ETH_P_IPV6;
4749 		rule->tuples_mask.ether_proto = 0xFFFF;
4750 
4751 		break;
4752 	case ETHER_FLOW:
4753 		ether_addr_copy(rule->tuples.src_mac,
4754 				fs->h_u.ether_spec.h_source);
4755 		ether_addr_copy(rule->tuples_mask.src_mac,
4756 				fs->m_u.ether_spec.h_source);
4757 
4758 		ether_addr_copy(rule->tuples.dst_mac,
4759 				fs->h_u.ether_spec.h_dest);
4760 		ether_addr_copy(rule->tuples_mask.dst_mac,
4761 				fs->m_u.ether_spec.h_dest);
4762 
4763 		rule->tuples.ether_proto =
4764 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
4765 		rule->tuples_mask.ether_proto =
4766 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
4767 
4768 		break;
4769 	default:
4770 		return -EOPNOTSUPP;
4771 	}
4772 
4773 	switch (flow_type) {
4774 	case SCTP_V4_FLOW:
4775 	case SCTP_V6_FLOW:
4776 		rule->tuples.ip_proto = IPPROTO_SCTP;
4777 		rule->tuples_mask.ip_proto = 0xFF;
4778 		break;
4779 	case TCP_V4_FLOW:
4780 	case TCP_V6_FLOW:
4781 		rule->tuples.ip_proto = IPPROTO_TCP;
4782 		rule->tuples_mask.ip_proto = 0xFF;
4783 		break;
4784 	case UDP_V4_FLOW:
4785 	case UDP_V6_FLOW:
4786 		rule->tuples.ip_proto = IPPROTO_UDP;
4787 		rule->tuples_mask.ip_proto = 0xFF;
4788 		break;
4789 	default:
4790 		break;
4791 	}
4792 
4793 	if ((fs->flow_type & FLOW_EXT)) {
4794 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
4795 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
4796 	}
4797 
4798 	if (fs->flow_type & FLOW_MAC_EXT) {
4799 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
4800 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
4801 	}
4802 
4803 	return 0;
4804 }
4805 
4806 static int hclge_add_fd_entry(struct hnae3_handle *handle,
4807 			      struct ethtool_rxnfc *cmd)
4808 {
4809 	struct hclge_vport *vport = hclge_get_vport(handle);
4810 	struct hclge_dev *hdev = vport->back;
4811 	u16 dst_vport_id = 0, q_index = 0;
4812 	struct ethtool_rx_flow_spec *fs;
4813 	struct hclge_fd_rule *rule;
4814 	u32 unused = 0;
4815 	u8 action;
4816 	int ret;
4817 
4818 	if (!hnae3_dev_fd_supported(hdev))
4819 		return -EOPNOTSUPP;
4820 
4821 	if (!hdev->fd_en) {
4822 		dev_warn(&hdev->pdev->dev,
4823 			 "Please enable flow director first\n");
4824 		return -EOPNOTSUPP;
4825 	}
4826 
4827 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4828 
4829 	ret = hclge_fd_check_spec(hdev, fs, &unused);
4830 	if (ret) {
4831 		dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
4832 		return ret;
4833 	}
4834 
4835 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
4836 		action = HCLGE_FD_ACTION_DROP_PACKET;
4837 	} else {
4838 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
4839 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
4840 		u16 tqps;
4841 
4842 		if (vf > hdev->num_req_vfs) {
4843 			dev_err(&hdev->pdev->dev,
4844 				"Error: vf id (%d) > max vf num (%d)\n",
4845 				vf, hdev->num_req_vfs);
4846 			return -EINVAL;
4847 		}
4848 
4849 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
4850 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
4851 
4852 		if (ring >= tqps) {
4853 			dev_err(&hdev->pdev->dev,
4854 				"Error: queue id (%d) > max tqp num (%d)\n",
4855 				ring, tqps - 1);
4856 			return -EINVAL;
4857 		}
4858 
4859 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
4860 		q_index = ring;
4861 	}
4862 
4863 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
4864 	if (!rule)
4865 		return -ENOMEM;
4866 
4867 	ret = hclge_fd_get_tuple(hdev, fs, rule);
4868 	if (ret)
4869 		goto free_rule;
4870 
4871 	rule->flow_type = fs->flow_type;
4872 
4873 	rule->location = fs->location;
4874 	rule->unused_tuple = unused;
4875 	rule->vf_id = dst_vport_id;
4876 	rule->queue_id = q_index;
4877 	rule->action = action;
4878 
4879 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4880 	if (ret)
4881 		goto free_rule;
4882 
4883 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4884 	if (ret)
4885 		goto free_rule;
4886 
4887 	ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
4888 	if (ret)
4889 		goto free_rule;
4890 
4891 	return ret;
4892 
4893 free_rule:
4894 	kfree(rule);
4895 	return ret;
4896 }
4897 
4898 static int hclge_del_fd_entry(struct hnae3_handle *handle,
4899 			      struct ethtool_rxnfc *cmd)
4900 {
4901 	struct hclge_vport *vport = hclge_get_vport(handle);
4902 	struct hclge_dev *hdev = vport->back;
4903 	struct ethtool_rx_flow_spec *fs;
4904 	int ret;
4905 
4906 	if (!hnae3_dev_fd_supported(hdev))
4907 		return -EOPNOTSUPP;
4908 
4909 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
4910 
4911 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4912 		return -EINVAL;
4913 
4914 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
4915 		dev_err(&hdev->pdev->dev,
4916 			"Delete fail, rule %d is inexistent\n",
4917 			fs->location);
4918 		return -ENOENT;
4919 	}
4920 
4921 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4922 				   fs->location, NULL, false);
4923 	if (ret)
4924 		return ret;
4925 
4926 	return hclge_fd_update_rule_list(hdev, NULL, fs->location,
4927 					 false);
4928 }
4929 
4930 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
4931 				     bool clear_list)
4932 {
4933 	struct hclge_vport *vport = hclge_get_vport(handle);
4934 	struct hclge_dev *hdev = vport->back;
4935 	struct hclge_fd_rule *rule;
4936 	struct hlist_node *node;
4937 
4938 	if (!hnae3_dev_fd_supported(hdev))
4939 		return;
4940 
4941 	if (clear_list) {
4942 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4943 					  rule_node) {
4944 			hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4945 					     rule->location, NULL, false);
4946 			hlist_del(&rule->rule_node);
4947 			kfree(rule);
4948 			hdev->hclge_fd_rule_num--;
4949 		}
4950 	} else {
4951 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
4952 					  rule_node)
4953 			hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
4954 					     rule->location, NULL, false);
4955 	}
4956 }
4957 
4958 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
4959 {
4960 	struct hclge_vport *vport = hclge_get_vport(handle);
4961 	struct hclge_dev *hdev = vport->back;
4962 	struct hclge_fd_rule *rule;
4963 	struct hlist_node *node;
4964 	int ret;
4965 
4966 	/* Return ok here, because reset error handling will check this
4967 	 * return value. If error is returned here, the reset process will
4968 	 * fail.
4969 	 */
4970 	if (!hnae3_dev_fd_supported(hdev))
4971 		return 0;
4972 
4973 	/* if fd is disabled, should not restore it when reset */
4974 	if (!hdev->fd_en)
4975 		return 0;
4976 
4977 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
4978 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
4979 		if (!ret)
4980 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
4981 
4982 		if (ret) {
4983 			dev_warn(&hdev->pdev->dev,
4984 				 "Restore rule %d failed, remove it\n",
4985 				 rule->location);
4986 			hlist_del(&rule->rule_node);
4987 			kfree(rule);
4988 			hdev->hclge_fd_rule_num--;
4989 		}
4990 	}
4991 	return 0;
4992 }
4993 
4994 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
4995 				 struct ethtool_rxnfc *cmd)
4996 {
4997 	struct hclge_vport *vport = hclge_get_vport(handle);
4998 	struct hclge_dev *hdev = vport->back;
4999 
5000 	if (!hnae3_dev_fd_supported(hdev))
5001 		return -EOPNOTSUPP;
5002 
5003 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
5004 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5005 
5006 	return 0;
5007 }
5008 
5009 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5010 				  struct ethtool_rxnfc *cmd)
5011 {
5012 	struct hclge_vport *vport = hclge_get_vport(handle);
5013 	struct hclge_fd_rule *rule = NULL;
5014 	struct hclge_dev *hdev = vport->back;
5015 	struct ethtool_rx_flow_spec *fs;
5016 	struct hlist_node *node2;
5017 
5018 	if (!hnae3_dev_fd_supported(hdev))
5019 		return -EOPNOTSUPP;
5020 
5021 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5022 
5023 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5024 		if (rule->location >= fs->location)
5025 			break;
5026 	}
5027 
5028 	if (!rule || fs->location != rule->location)
5029 		return -ENOENT;
5030 
5031 	fs->flow_type = rule->flow_type;
5032 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5033 	case SCTP_V4_FLOW:
5034 	case TCP_V4_FLOW:
5035 	case UDP_V4_FLOW:
5036 		fs->h_u.tcp_ip4_spec.ip4src =
5037 				cpu_to_be32(rule->tuples.src_ip[3]);
5038 		fs->m_u.tcp_ip4_spec.ip4src =
5039 				rule->unused_tuple & BIT(INNER_SRC_IP) ?
5040 				0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5041 
5042 		fs->h_u.tcp_ip4_spec.ip4dst =
5043 				cpu_to_be32(rule->tuples.dst_ip[3]);
5044 		fs->m_u.tcp_ip4_spec.ip4dst =
5045 				rule->unused_tuple & BIT(INNER_DST_IP) ?
5046 				0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5047 
5048 		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5049 		fs->m_u.tcp_ip4_spec.psrc =
5050 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5051 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5052 
5053 		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5054 		fs->m_u.tcp_ip4_spec.pdst =
5055 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5056 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5057 
5058 		fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5059 		fs->m_u.tcp_ip4_spec.tos =
5060 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5061 				0 : rule->tuples_mask.ip_tos;
5062 
5063 		break;
5064 	case IP_USER_FLOW:
5065 		fs->h_u.usr_ip4_spec.ip4src =
5066 				cpu_to_be32(rule->tuples.src_ip[3]);
5067 		fs->m_u.tcp_ip4_spec.ip4src =
5068 				rule->unused_tuple & BIT(INNER_SRC_IP) ?
5069 				0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5070 
5071 		fs->h_u.usr_ip4_spec.ip4dst =
5072 				cpu_to_be32(rule->tuples.dst_ip[3]);
5073 		fs->m_u.usr_ip4_spec.ip4dst =
5074 				rule->unused_tuple & BIT(INNER_DST_IP) ?
5075 				0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5076 
5077 		fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5078 		fs->m_u.usr_ip4_spec.tos =
5079 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5080 				0 : rule->tuples_mask.ip_tos;
5081 
5082 		fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5083 		fs->m_u.usr_ip4_spec.proto =
5084 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5085 				0 : rule->tuples_mask.ip_proto;
5086 
5087 		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5088 
5089 		break;
5090 	case SCTP_V6_FLOW:
5091 	case TCP_V6_FLOW:
5092 	case UDP_V6_FLOW:
5093 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5094 				  rule->tuples.src_ip, 4);
5095 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5096 			memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5097 		else
5098 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5099 					  rule->tuples_mask.src_ip, 4);
5100 
5101 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5102 				  rule->tuples.dst_ip, 4);
5103 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5104 			memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5105 		else
5106 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5107 					  rule->tuples_mask.dst_ip, 4);
5108 
5109 		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5110 		fs->m_u.tcp_ip6_spec.psrc =
5111 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5112 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5113 
5114 		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5115 		fs->m_u.tcp_ip6_spec.pdst =
5116 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5117 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5118 
5119 		break;
5120 	case IPV6_USER_FLOW:
5121 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5122 				  rule->tuples.src_ip, 4);
5123 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5124 			memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5125 		else
5126 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5127 					  rule->tuples_mask.src_ip, 4);
5128 
5129 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5130 				  rule->tuples.dst_ip, 4);
5131 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5132 			memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5133 		else
5134 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5135 					  rule->tuples_mask.dst_ip, 4);
5136 
5137 		fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5138 		fs->m_u.usr_ip6_spec.l4_proto =
5139 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5140 				0 : rule->tuples_mask.ip_proto;
5141 
5142 		break;
5143 	case ETHER_FLOW:
5144 		ether_addr_copy(fs->h_u.ether_spec.h_source,
5145 				rule->tuples.src_mac);
5146 		if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5147 			eth_zero_addr(fs->m_u.ether_spec.h_source);
5148 		else
5149 			ether_addr_copy(fs->m_u.ether_spec.h_source,
5150 					rule->tuples_mask.src_mac);
5151 
5152 		ether_addr_copy(fs->h_u.ether_spec.h_dest,
5153 				rule->tuples.dst_mac);
5154 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5155 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5156 		else
5157 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5158 					rule->tuples_mask.dst_mac);
5159 
5160 		fs->h_u.ether_spec.h_proto =
5161 				cpu_to_be16(rule->tuples.ether_proto);
5162 		fs->m_u.ether_spec.h_proto =
5163 				rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5164 				0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5165 
5166 		break;
5167 	default:
5168 		return -EOPNOTSUPP;
5169 	}
5170 
5171 	if (fs->flow_type & FLOW_EXT) {
5172 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5173 		fs->m_ext.vlan_tci =
5174 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5175 				cpu_to_be16(VLAN_VID_MASK) :
5176 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
5177 	}
5178 
5179 	if (fs->flow_type & FLOW_MAC_EXT) {
5180 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5181 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5182 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5183 		else
5184 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5185 					rule->tuples_mask.dst_mac);
5186 	}
5187 
5188 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5189 		fs->ring_cookie = RX_CLS_FLOW_DISC;
5190 	} else {
5191 		u64 vf_id;
5192 
5193 		fs->ring_cookie = rule->queue_id;
5194 		vf_id = rule->vf_id;
5195 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5196 		fs->ring_cookie |= vf_id;
5197 	}
5198 
5199 	return 0;
5200 }
5201 
5202 static int hclge_get_all_rules(struct hnae3_handle *handle,
5203 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
5204 {
5205 	struct hclge_vport *vport = hclge_get_vport(handle);
5206 	struct hclge_dev *hdev = vport->back;
5207 	struct hclge_fd_rule *rule;
5208 	struct hlist_node *node2;
5209 	int cnt = 0;
5210 
5211 	if (!hnae3_dev_fd_supported(hdev))
5212 		return -EOPNOTSUPP;
5213 
5214 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5215 
5216 	hlist_for_each_entry_safe(rule, node2,
5217 				  &hdev->fd_rule_list, rule_node) {
5218 		if (cnt == cmd->rule_cnt)
5219 			return -EMSGSIZE;
5220 
5221 		rule_locs[cnt] = rule->location;
5222 		cnt++;
5223 	}
5224 
5225 	cmd->rule_cnt = cnt;
5226 
5227 	return 0;
5228 }
5229 
5230 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5231 {
5232 	struct hclge_vport *vport = hclge_get_vport(handle);
5233 	struct hclge_dev *hdev = vport->back;
5234 
5235 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5236 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5237 }
5238 
5239 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5240 {
5241 	struct hclge_vport *vport = hclge_get_vport(handle);
5242 	struct hclge_dev *hdev = vport->back;
5243 
5244 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5245 }
5246 
5247 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5248 {
5249 	struct hclge_vport *vport = hclge_get_vport(handle);
5250 	struct hclge_dev *hdev = vport->back;
5251 
5252 	return hdev->rst_stats.hw_reset_done_cnt;
5253 }
5254 
5255 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5256 {
5257 	struct hclge_vport *vport = hclge_get_vport(handle);
5258 	struct hclge_dev *hdev = vport->back;
5259 
5260 	hdev->fd_en = enable;
5261 	if (!enable)
5262 		hclge_del_all_fd_entries(handle, false);
5263 	else
5264 		hclge_restore_fd_entries(handle);
5265 }
5266 
5267 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5268 {
5269 	struct hclge_desc desc;
5270 	struct hclge_config_mac_mode_cmd *req =
5271 		(struct hclge_config_mac_mode_cmd *)desc.data;
5272 	u32 loop_en = 0;
5273 	int ret;
5274 
5275 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5276 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5277 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5278 	hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5279 	hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5280 	hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5281 	hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5282 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5283 	hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5284 	hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5285 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5286 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5287 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5288 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5289 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5290 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5291 
5292 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5293 	if (ret)
5294 		dev_err(&hdev->pdev->dev,
5295 			"mac enable fail, ret =%d.\n", ret);
5296 }
5297 
5298 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5299 {
5300 	struct hclge_config_mac_mode_cmd *req;
5301 	struct hclge_desc desc;
5302 	u32 loop_en;
5303 	int ret;
5304 
5305 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5306 	/* 1 Read out the MAC mode config at first */
5307 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5308 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5309 	if (ret) {
5310 		dev_err(&hdev->pdev->dev,
5311 			"mac loopback get fail, ret =%d.\n", ret);
5312 		return ret;
5313 	}
5314 
5315 	/* 2 Then setup the loopback flag */
5316 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5317 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5318 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5319 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5320 
5321 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5322 
5323 	/* 3 Config mac work mode with loopback flag
5324 	 * and its original configure parameters
5325 	 */
5326 	hclge_cmd_reuse_desc(&desc, false);
5327 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5328 	if (ret)
5329 		dev_err(&hdev->pdev->dev,
5330 			"mac loopback set fail, ret =%d.\n", ret);
5331 	return ret;
5332 }
5333 
5334 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5335 				     enum hnae3_loop loop_mode)
5336 {
5337 #define HCLGE_SERDES_RETRY_MS	10
5338 #define HCLGE_SERDES_RETRY_NUM	100
5339 
5340 #define HCLGE_MAC_LINK_STATUS_MS   20
5341 #define HCLGE_MAC_LINK_STATUS_NUM  10
5342 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5343 #define HCLGE_MAC_LINK_STATUS_UP   1
5344 
5345 	struct hclge_serdes_lb_cmd *req;
5346 	struct hclge_desc desc;
5347 	int mac_link_ret = 0;
5348 	int ret, i = 0;
5349 	u8 loop_mode_b;
5350 
5351 	req = (struct hclge_serdes_lb_cmd *)desc.data;
5352 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5353 
5354 	switch (loop_mode) {
5355 	case HNAE3_LOOP_SERIAL_SERDES:
5356 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5357 		break;
5358 	case HNAE3_LOOP_PARALLEL_SERDES:
5359 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5360 		break;
5361 	default:
5362 		dev_err(&hdev->pdev->dev,
5363 			"unsupported serdes loopback mode %d\n", loop_mode);
5364 		return -ENOTSUPP;
5365 	}
5366 
5367 	if (en) {
5368 		req->enable = loop_mode_b;
5369 		req->mask = loop_mode_b;
5370 		mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5371 	} else {
5372 		req->mask = loop_mode_b;
5373 		mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5374 	}
5375 
5376 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5377 	if (ret) {
5378 		dev_err(&hdev->pdev->dev,
5379 			"serdes loopback set fail, ret = %d\n", ret);
5380 		return ret;
5381 	}
5382 
5383 	do {
5384 		msleep(HCLGE_SERDES_RETRY_MS);
5385 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5386 					   true);
5387 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5388 		if (ret) {
5389 			dev_err(&hdev->pdev->dev,
5390 				"serdes loopback get, ret = %d\n", ret);
5391 			return ret;
5392 		}
5393 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
5394 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5395 
5396 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5397 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5398 		return -EBUSY;
5399 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5400 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5401 		return -EIO;
5402 	}
5403 
5404 	hclge_cfg_mac_mode(hdev, en);
5405 
5406 	i = 0;
5407 	do {
5408 		/* serdes Internal loopback, independent of the network cable.*/
5409 		msleep(HCLGE_MAC_LINK_STATUS_MS);
5410 		ret = hclge_get_mac_link_status(hdev);
5411 		if (ret == mac_link_ret)
5412 			return 0;
5413 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5414 
5415 	dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5416 
5417 	return -EBUSY;
5418 }
5419 
5420 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5421 			    int stream_id, bool enable)
5422 {
5423 	struct hclge_desc desc;
5424 	struct hclge_cfg_com_tqp_queue_cmd *req =
5425 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5426 	int ret;
5427 
5428 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5429 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5430 	req->stream_id = cpu_to_le16(stream_id);
5431 	req->enable |= enable << HCLGE_TQP_ENABLE_B;
5432 
5433 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5434 	if (ret)
5435 		dev_err(&hdev->pdev->dev,
5436 			"Tqp enable fail, status =%d.\n", ret);
5437 	return ret;
5438 }
5439 
5440 static int hclge_set_loopback(struct hnae3_handle *handle,
5441 			      enum hnae3_loop loop_mode, bool en)
5442 {
5443 	struct hclge_vport *vport = hclge_get_vport(handle);
5444 	struct hnae3_knic_private_info *kinfo;
5445 	struct hclge_dev *hdev = vport->back;
5446 	int i, ret;
5447 
5448 	switch (loop_mode) {
5449 	case HNAE3_LOOP_APP:
5450 		ret = hclge_set_app_loopback(hdev, en);
5451 		break;
5452 	case HNAE3_LOOP_SERIAL_SERDES:
5453 	case HNAE3_LOOP_PARALLEL_SERDES:
5454 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5455 		break;
5456 	default:
5457 		ret = -ENOTSUPP;
5458 		dev_err(&hdev->pdev->dev,
5459 			"loop_mode %d is not supported\n", loop_mode);
5460 		break;
5461 	}
5462 
5463 	if (ret)
5464 		return ret;
5465 
5466 	kinfo = &vport->nic.kinfo;
5467 	for (i = 0; i < kinfo->num_tqps; i++) {
5468 		ret = hclge_tqp_enable(hdev, i, 0, en);
5469 		if (ret)
5470 			return ret;
5471 	}
5472 
5473 	return 0;
5474 }
5475 
5476 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5477 {
5478 	struct hclge_vport *vport = hclge_get_vport(handle);
5479 	struct hnae3_knic_private_info *kinfo;
5480 	struct hnae3_queue *queue;
5481 	struct hclge_tqp *tqp;
5482 	int i;
5483 
5484 	kinfo = &vport->nic.kinfo;
5485 	for (i = 0; i < kinfo->num_tqps; i++) {
5486 		queue = handle->kinfo.tqp[i];
5487 		tqp = container_of(queue, struct hclge_tqp, q);
5488 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5489 	}
5490 }
5491 
5492 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
5493 {
5494 	struct hclge_vport *vport = hclge_get_vport(handle);
5495 	struct hclge_dev *hdev = vport->back;
5496 
5497 	if (enable) {
5498 		mod_timer(&hdev->service_timer, jiffies + HZ);
5499 	} else {
5500 		del_timer_sync(&hdev->service_timer);
5501 		cancel_work_sync(&hdev->service_task);
5502 		clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5503 	}
5504 }
5505 
5506 static int hclge_ae_start(struct hnae3_handle *handle)
5507 {
5508 	struct hclge_vport *vport = hclge_get_vport(handle);
5509 	struct hclge_dev *hdev = vport->back;
5510 
5511 	/* mac enable */
5512 	hclge_cfg_mac_mode(hdev, true);
5513 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5514 	hdev->hw.mac.link = 0;
5515 
5516 	/* reset tqp stats */
5517 	hclge_reset_tqp_stats(handle);
5518 
5519 	hclge_mac_start_phy(hdev);
5520 
5521 	return 0;
5522 }
5523 
5524 static void hclge_ae_stop(struct hnae3_handle *handle)
5525 {
5526 	struct hclge_vport *vport = hclge_get_vport(handle);
5527 	struct hclge_dev *hdev = vport->back;
5528 	int i;
5529 
5530 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
5531 
5532 	/* If it is not PF reset, the firmware will disable the MAC,
5533 	 * so it only need to stop phy here.
5534 	 */
5535 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5536 	    hdev->reset_type != HNAE3_FUNC_RESET) {
5537 		hclge_mac_stop_phy(hdev);
5538 		return;
5539 	}
5540 
5541 	for (i = 0; i < handle->kinfo.num_tqps; i++)
5542 		hclge_reset_tqp(handle, i);
5543 
5544 	/* Mac disable */
5545 	hclge_cfg_mac_mode(hdev, false);
5546 
5547 	hclge_mac_stop_phy(hdev);
5548 
5549 	/* reset tqp stats */
5550 	hclge_reset_tqp_stats(handle);
5551 	hclge_update_link_status(hdev);
5552 }
5553 
5554 int hclge_vport_start(struct hclge_vport *vport)
5555 {
5556 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5557 	vport->last_active_jiffies = jiffies;
5558 	return 0;
5559 }
5560 
5561 void hclge_vport_stop(struct hclge_vport *vport)
5562 {
5563 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5564 }
5565 
5566 static int hclge_client_start(struct hnae3_handle *handle)
5567 {
5568 	struct hclge_vport *vport = hclge_get_vport(handle);
5569 
5570 	return hclge_vport_start(vport);
5571 }
5572 
5573 static void hclge_client_stop(struct hnae3_handle *handle)
5574 {
5575 	struct hclge_vport *vport = hclge_get_vport(handle);
5576 
5577 	hclge_vport_stop(vport);
5578 }
5579 
5580 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5581 					 u16 cmdq_resp, u8  resp_code,
5582 					 enum hclge_mac_vlan_tbl_opcode op)
5583 {
5584 	struct hclge_dev *hdev = vport->back;
5585 	int return_status = -EIO;
5586 
5587 	if (cmdq_resp) {
5588 		dev_err(&hdev->pdev->dev,
5589 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5590 			cmdq_resp);
5591 		return -EIO;
5592 	}
5593 
5594 	if (op == HCLGE_MAC_VLAN_ADD) {
5595 		if ((!resp_code) || (resp_code == 1)) {
5596 			return_status = 0;
5597 		} else if (resp_code == 2) {
5598 			return_status = -ENOSPC;
5599 			dev_err(&hdev->pdev->dev,
5600 				"add mac addr failed for uc_overflow.\n");
5601 		} else if (resp_code == 3) {
5602 			return_status = -ENOSPC;
5603 			dev_err(&hdev->pdev->dev,
5604 				"add mac addr failed for mc_overflow.\n");
5605 		} else {
5606 			dev_err(&hdev->pdev->dev,
5607 				"add mac addr failed for undefined, code=%d.\n",
5608 				resp_code);
5609 		}
5610 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
5611 		if (!resp_code) {
5612 			return_status = 0;
5613 		} else if (resp_code == 1) {
5614 			return_status = -ENOENT;
5615 			dev_dbg(&hdev->pdev->dev,
5616 				"remove mac addr failed for miss.\n");
5617 		} else {
5618 			dev_err(&hdev->pdev->dev,
5619 				"remove mac addr failed for undefined, code=%d.\n",
5620 				resp_code);
5621 		}
5622 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
5623 		if (!resp_code) {
5624 			return_status = 0;
5625 		} else if (resp_code == 1) {
5626 			return_status = -ENOENT;
5627 			dev_dbg(&hdev->pdev->dev,
5628 				"lookup mac addr failed for miss.\n");
5629 		} else {
5630 			dev_err(&hdev->pdev->dev,
5631 				"lookup mac addr failed for undefined, code=%d.\n",
5632 				resp_code);
5633 		}
5634 	} else {
5635 		return_status = -EINVAL;
5636 		dev_err(&hdev->pdev->dev,
5637 			"unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5638 			op);
5639 	}
5640 
5641 	return return_status;
5642 }
5643 
5644 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5645 {
5646 	int word_num;
5647 	int bit_num;
5648 
5649 	if (vfid > 255 || vfid < 0)
5650 		return -EIO;
5651 
5652 	if (vfid >= 0 && vfid <= 191) {
5653 		word_num = vfid / 32;
5654 		bit_num  = vfid % 32;
5655 		if (clr)
5656 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5657 		else
5658 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5659 	} else {
5660 		word_num = (vfid - 192) / 32;
5661 		bit_num  = vfid % 32;
5662 		if (clr)
5663 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5664 		else
5665 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5666 	}
5667 
5668 	return 0;
5669 }
5670 
5671 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5672 {
5673 #define HCLGE_DESC_NUMBER 3
5674 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5675 	int i, j;
5676 
5677 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5678 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5679 			if (desc[i].data[j])
5680 				return false;
5681 
5682 	return true;
5683 }
5684 
5685 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5686 				   const u8 *addr, bool is_mc)
5687 {
5688 	const unsigned char *mac_addr = addr;
5689 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5690 		       (mac_addr[0]) | (mac_addr[1] << 8);
5691 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
5692 
5693 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5694 	if (is_mc) {
5695 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
5696 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
5697 	}
5698 
5699 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
5700 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
5701 }
5702 
5703 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
5704 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
5705 {
5706 	struct hclge_dev *hdev = vport->back;
5707 	struct hclge_desc desc;
5708 	u8 resp_code;
5709 	u16 retval;
5710 	int ret;
5711 
5712 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
5713 
5714 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5715 
5716 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5717 	if (ret) {
5718 		dev_err(&hdev->pdev->dev,
5719 			"del mac addr failed for cmd_send, ret =%d.\n",
5720 			ret);
5721 		return ret;
5722 	}
5723 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5724 	retval = le16_to_cpu(desc.retval);
5725 
5726 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5727 					     HCLGE_MAC_VLAN_REMOVE);
5728 }
5729 
5730 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
5731 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
5732 				     struct hclge_desc *desc,
5733 				     bool is_mc)
5734 {
5735 	struct hclge_dev *hdev = vport->back;
5736 	u8 resp_code;
5737 	u16 retval;
5738 	int ret;
5739 
5740 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
5741 	if (is_mc) {
5742 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5743 		memcpy(desc[0].data,
5744 		       req,
5745 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5746 		hclge_cmd_setup_basic_desc(&desc[1],
5747 					   HCLGE_OPC_MAC_VLAN_ADD,
5748 					   true);
5749 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5750 		hclge_cmd_setup_basic_desc(&desc[2],
5751 					   HCLGE_OPC_MAC_VLAN_ADD,
5752 					   true);
5753 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
5754 	} else {
5755 		memcpy(desc[0].data,
5756 		       req,
5757 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5758 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
5759 	}
5760 	if (ret) {
5761 		dev_err(&hdev->pdev->dev,
5762 			"lookup mac addr failed for cmd_send, ret =%d.\n",
5763 			ret);
5764 		return ret;
5765 	}
5766 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
5767 	retval = le16_to_cpu(desc[0].retval);
5768 
5769 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
5770 					     HCLGE_MAC_VLAN_LKUP);
5771 }
5772 
5773 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
5774 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
5775 				  struct hclge_desc *mc_desc)
5776 {
5777 	struct hclge_dev *hdev = vport->back;
5778 	int cfg_status;
5779 	u8 resp_code;
5780 	u16 retval;
5781 	int ret;
5782 
5783 	if (!mc_desc) {
5784 		struct hclge_desc desc;
5785 
5786 		hclge_cmd_setup_basic_desc(&desc,
5787 					   HCLGE_OPC_MAC_VLAN_ADD,
5788 					   false);
5789 		memcpy(desc.data, req,
5790 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5791 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5792 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
5793 		retval = le16_to_cpu(desc.retval);
5794 
5795 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5796 							   resp_code,
5797 							   HCLGE_MAC_VLAN_ADD);
5798 	} else {
5799 		hclge_cmd_reuse_desc(&mc_desc[0], false);
5800 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5801 		hclge_cmd_reuse_desc(&mc_desc[1], false);
5802 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5803 		hclge_cmd_reuse_desc(&mc_desc[2], false);
5804 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
5805 		memcpy(mc_desc[0].data, req,
5806 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
5807 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
5808 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
5809 		retval = le16_to_cpu(mc_desc[0].retval);
5810 
5811 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
5812 							   resp_code,
5813 							   HCLGE_MAC_VLAN_ADD);
5814 	}
5815 
5816 	if (ret) {
5817 		dev_err(&hdev->pdev->dev,
5818 			"add mac addr failed for cmd_send, ret =%d.\n",
5819 			ret);
5820 		return ret;
5821 	}
5822 
5823 	return cfg_status;
5824 }
5825 
5826 static int hclge_init_umv_space(struct hclge_dev *hdev)
5827 {
5828 	u16 allocated_size = 0;
5829 	int ret;
5830 
5831 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
5832 				  true);
5833 	if (ret)
5834 		return ret;
5835 
5836 	if (allocated_size < hdev->wanted_umv_size)
5837 		dev_warn(&hdev->pdev->dev,
5838 			 "Alloc umv space failed, want %d, get %d\n",
5839 			 hdev->wanted_umv_size, allocated_size);
5840 
5841 	mutex_init(&hdev->umv_mutex);
5842 	hdev->max_umv_size = allocated_size;
5843 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
5844 	hdev->share_umv_size = hdev->priv_umv_size +
5845 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
5846 
5847 	return 0;
5848 }
5849 
5850 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
5851 {
5852 	int ret;
5853 
5854 	if (hdev->max_umv_size > 0) {
5855 		ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
5856 					  false);
5857 		if (ret)
5858 			return ret;
5859 		hdev->max_umv_size = 0;
5860 	}
5861 	mutex_destroy(&hdev->umv_mutex);
5862 
5863 	return 0;
5864 }
5865 
5866 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
5867 			       u16 *allocated_size, bool is_alloc)
5868 {
5869 	struct hclge_umv_spc_alc_cmd *req;
5870 	struct hclge_desc desc;
5871 	int ret;
5872 
5873 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
5874 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
5875 	hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
5876 	req->space_size = cpu_to_le32(space_size);
5877 
5878 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5879 	if (ret) {
5880 		dev_err(&hdev->pdev->dev,
5881 			"%s umv space failed for cmd_send, ret =%d\n",
5882 			is_alloc ? "allocate" : "free", ret);
5883 		return ret;
5884 	}
5885 
5886 	if (is_alloc && allocated_size)
5887 		*allocated_size = le32_to_cpu(desc.data[1]);
5888 
5889 	return 0;
5890 }
5891 
5892 static void hclge_reset_umv_space(struct hclge_dev *hdev)
5893 {
5894 	struct hclge_vport *vport;
5895 	int i;
5896 
5897 	for (i = 0; i < hdev->num_alloc_vport; i++) {
5898 		vport = &hdev->vport[i];
5899 		vport->used_umv_num = 0;
5900 	}
5901 
5902 	mutex_lock(&hdev->umv_mutex);
5903 	hdev->share_umv_size = hdev->priv_umv_size +
5904 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
5905 	mutex_unlock(&hdev->umv_mutex);
5906 }
5907 
5908 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
5909 {
5910 	struct hclge_dev *hdev = vport->back;
5911 	bool is_full;
5912 
5913 	mutex_lock(&hdev->umv_mutex);
5914 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
5915 		   hdev->share_umv_size == 0);
5916 	mutex_unlock(&hdev->umv_mutex);
5917 
5918 	return is_full;
5919 }
5920 
5921 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
5922 {
5923 	struct hclge_dev *hdev = vport->back;
5924 
5925 	mutex_lock(&hdev->umv_mutex);
5926 	if (is_free) {
5927 		if (vport->used_umv_num > hdev->priv_umv_size)
5928 			hdev->share_umv_size++;
5929 
5930 		if (vport->used_umv_num > 0)
5931 			vport->used_umv_num--;
5932 	} else {
5933 		if (vport->used_umv_num >= hdev->priv_umv_size &&
5934 		    hdev->share_umv_size > 0)
5935 			hdev->share_umv_size--;
5936 		vport->used_umv_num++;
5937 	}
5938 	mutex_unlock(&hdev->umv_mutex);
5939 }
5940 
5941 static int hclge_add_uc_addr(struct hnae3_handle *handle,
5942 			     const unsigned char *addr)
5943 {
5944 	struct hclge_vport *vport = hclge_get_vport(handle);
5945 
5946 	return hclge_add_uc_addr_common(vport, addr);
5947 }
5948 
5949 int hclge_add_uc_addr_common(struct hclge_vport *vport,
5950 			     const unsigned char *addr)
5951 {
5952 	struct hclge_dev *hdev = vport->back;
5953 	struct hclge_mac_vlan_tbl_entry_cmd req;
5954 	struct hclge_desc desc;
5955 	u16 egress_port = 0;
5956 	int ret;
5957 
5958 	/* mac addr check */
5959 	if (is_zero_ether_addr(addr) ||
5960 	    is_broadcast_ether_addr(addr) ||
5961 	    is_multicast_ether_addr(addr)) {
5962 		dev_err(&hdev->pdev->dev,
5963 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
5964 			 addr,
5965 			 is_zero_ether_addr(addr),
5966 			 is_broadcast_ether_addr(addr),
5967 			 is_multicast_ether_addr(addr));
5968 		return -EINVAL;
5969 	}
5970 
5971 	memset(&req, 0, sizeof(req));
5972 
5973 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
5974 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
5975 
5976 	req.egress_port = cpu_to_le16(egress_port);
5977 
5978 	hclge_prepare_mac_addr(&req, addr, false);
5979 
5980 	/* Lookup the mac address in the mac_vlan table, and add
5981 	 * it if the entry is inexistent. Repeated unicast entry
5982 	 * is not allowed in the mac vlan table.
5983 	 */
5984 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
5985 	if (ret == -ENOENT) {
5986 		if (!hclge_is_umv_space_full(vport)) {
5987 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
5988 			if (!ret)
5989 				hclge_update_umv_space(vport, false);
5990 			return ret;
5991 		}
5992 
5993 		dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
5994 			hdev->priv_umv_size);
5995 
5996 		return -ENOSPC;
5997 	}
5998 
5999 	/* check if we just hit the duplicate */
6000 	if (!ret) {
6001 		dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6002 			 vport->vport_id, addr);
6003 		return 0;
6004 	}
6005 
6006 	dev_err(&hdev->pdev->dev,
6007 		"PF failed to add unicast entry(%pM) in the MAC table\n",
6008 		addr);
6009 
6010 	return ret;
6011 }
6012 
6013 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6014 			    const unsigned char *addr)
6015 {
6016 	struct hclge_vport *vport = hclge_get_vport(handle);
6017 
6018 	return hclge_rm_uc_addr_common(vport, addr);
6019 }
6020 
6021 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6022 			    const unsigned char *addr)
6023 {
6024 	struct hclge_dev *hdev = vport->back;
6025 	struct hclge_mac_vlan_tbl_entry_cmd req;
6026 	int ret;
6027 
6028 	/* mac addr check */
6029 	if (is_zero_ether_addr(addr) ||
6030 	    is_broadcast_ether_addr(addr) ||
6031 	    is_multicast_ether_addr(addr)) {
6032 		dev_dbg(&hdev->pdev->dev,
6033 			"Remove mac err! invalid mac:%pM.\n",
6034 			 addr);
6035 		return -EINVAL;
6036 	}
6037 
6038 	memset(&req, 0, sizeof(req));
6039 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6040 	hclge_prepare_mac_addr(&req, addr, false);
6041 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
6042 	if (!ret)
6043 		hclge_update_umv_space(vport, true);
6044 
6045 	return ret;
6046 }
6047 
6048 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6049 			     const unsigned char *addr)
6050 {
6051 	struct hclge_vport *vport = hclge_get_vport(handle);
6052 
6053 	return hclge_add_mc_addr_common(vport, addr);
6054 }
6055 
6056 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6057 			     const unsigned char *addr)
6058 {
6059 	struct hclge_dev *hdev = vport->back;
6060 	struct hclge_mac_vlan_tbl_entry_cmd req;
6061 	struct hclge_desc desc[3];
6062 	int status;
6063 
6064 	/* mac addr check */
6065 	if (!is_multicast_ether_addr(addr)) {
6066 		dev_err(&hdev->pdev->dev,
6067 			"Add mc mac err! invalid mac:%pM.\n",
6068 			 addr);
6069 		return -EINVAL;
6070 	}
6071 	memset(&req, 0, sizeof(req));
6072 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6073 	hclge_prepare_mac_addr(&req, addr, true);
6074 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6075 	if (!status) {
6076 		/* This mac addr exist, update VFID for it */
6077 		hclge_update_desc_vfid(desc, vport->vport_id, false);
6078 		status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6079 	} else {
6080 		/* This mac addr do not exist, add new entry for it */
6081 		memset(desc[0].data, 0, sizeof(desc[0].data));
6082 		memset(desc[1].data, 0, sizeof(desc[0].data));
6083 		memset(desc[2].data, 0, sizeof(desc[0].data));
6084 		hclge_update_desc_vfid(desc, vport->vport_id, false);
6085 		status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6086 	}
6087 
6088 	if (status == -ENOSPC)
6089 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6090 
6091 	return status;
6092 }
6093 
6094 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6095 			    const unsigned char *addr)
6096 {
6097 	struct hclge_vport *vport = hclge_get_vport(handle);
6098 
6099 	return hclge_rm_mc_addr_common(vport, addr);
6100 }
6101 
6102 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6103 			    const unsigned char *addr)
6104 {
6105 	struct hclge_dev *hdev = vport->back;
6106 	struct hclge_mac_vlan_tbl_entry_cmd req;
6107 	enum hclge_cmd_status status;
6108 	struct hclge_desc desc[3];
6109 
6110 	/* mac addr check */
6111 	if (!is_multicast_ether_addr(addr)) {
6112 		dev_dbg(&hdev->pdev->dev,
6113 			"Remove mc mac err! invalid mac:%pM.\n",
6114 			 addr);
6115 		return -EINVAL;
6116 	}
6117 
6118 	memset(&req, 0, sizeof(req));
6119 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6120 	hclge_prepare_mac_addr(&req, addr, true);
6121 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6122 	if (!status) {
6123 		/* This mac addr exist, remove this handle's VFID for it */
6124 		hclge_update_desc_vfid(desc, vport->vport_id, true);
6125 
6126 		if (hclge_is_all_function_id_zero(desc))
6127 			/* All the vfid is zero, so need to delete this entry */
6128 			status = hclge_remove_mac_vlan_tbl(vport, &req);
6129 		else
6130 			/* Not all the vfid is zero, update the vfid */
6131 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6132 
6133 	} else {
6134 		/* Maybe this mac address is in mta table, but it cannot be
6135 		 * deleted here because an entry of mta represents an address
6136 		 * range rather than a specific address. the delete action to
6137 		 * all entries will take effect in update_mta_status called by
6138 		 * hns3_nic_set_rx_mode.
6139 		 */
6140 		status = 0;
6141 	}
6142 
6143 	return status;
6144 }
6145 
6146 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6147 			       enum HCLGE_MAC_ADDR_TYPE mac_type)
6148 {
6149 	struct hclge_vport_mac_addr_cfg *mac_cfg;
6150 	struct list_head *list;
6151 
6152 	if (!vport->vport_id)
6153 		return;
6154 
6155 	mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6156 	if (!mac_cfg)
6157 		return;
6158 
6159 	mac_cfg->hd_tbl_status = true;
6160 	memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6161 
6162 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6163 	       &vport->uc_mac_list : &vport->mc_mac_list;
6164 
6165 	list_add_tail(&mac_cfg->node, list);
6166 }
6167 
6168 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6169 			      bool is_write_tbl,
6170 			      enum HCLGE_MAC_ADDR_TYPE mac_type)
6171 {
6172 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6173 	struct list_head *list;
6174 	bool uc_flag, mc_flag;
6175 
6176 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6177 	       &vport->uc_mac_list : &vport->mc_mac_list;
6178 
6179 	uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6180 	mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6181 
6182 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6183 		if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6184 			if (uc_flag && mac_cfg->hd_tbl_status)
6185 				hclge_rm_uc_addr_common(vport, mac_addr);
6186 
6187 			if (mc_flag && mac_cfg->hd_tbl_status)
6188 				hclge_rm_mc_addr_common(vport, mac_addr);
6189 
6190 			list_del(&mac_cfg->node);
6191 			kfree(mac_cfg);
6192 			break;
6193 		}
6194 	}
6195 }
6196 
6197 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6198 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
6199 {
6200 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6201 	struct list_head *list;
6202 
6203 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6204 	       &vport->uc_mac_list : &vport->mc_mac_list;
6205 
6206 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6207 		if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6208 			hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6209 
6210 		if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6211 			hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6212 
6213 		mac_cfg->hd_tbl_status = false;
6214 		if (is_del_list) {
6215 			list_del(&mac_cfg->node);
6216 			kfree(mac_cfg);
6217 		}
6218 	}
6219 }
6220 
6221 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6222 {
6223 	struct hclge_vport_mac_addr_cfg *mac, *tmp;
6224 	struct hclge_vport *vport;
6225 	int i;
6226 
6227 	mutex_lock(&hdev->vport_cfg_mutex);
6228 	for (i = 0; i < hdev->num_alloc_vport; i++) {
6229 		vport = &hdev->vport[i];
6230 		list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6231 			list_del(&mac->node);
6232 			kfree(mac);
6233 		}
6234 
6235 		list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6236 			list_del(&mac->node);
6237 			kfree(mac);
6238 		}
6239 	}
6240 	mutex_unlock(&hdev->vport_cfg_mutex);
6241 }
6242 
6243 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6244 					      u16 cmdq_resp, u8 resp_code)
6245 {
6246 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
6247 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
6248 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
6249 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
6250 
6251 	int return_status;
6252 
6253 	if (cmdq_resp) {
6254 		dev_err(&hdev->pdev->dev,
6255 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6256 			cmdq_resp);
6257 		return -EIO;
6258 	}
6259 
6260 	switch (resp_code) {
6261 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
6262 	case HCLGE_ETHERTYPE_ALREADY_ADD:
6263 		return_status = 0;
6264 		break;
6265 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6266 		dev_err(&hdev->pdev->dev,
6267 			"add mac ethertype failed for manager table overflow.\n");
6268 		return_status = -EIO;
6269 		break;
6270 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
6271 		dev_err(&hdev->pdev->dev,
6272 			"add mac ethertype failed for key conflict.\n");
6273 		return_status = -EIO;
6274 		break;
6275 	default:
6276 		dev_err(&hdev->pdev->dev,
6277 			"add mac ethertype failed for undefined, code=%d.\n",
6278 			resp_code);
6279 		return_status = -EIO;
6280 	}
6281 
6282 	return return_status;
6283 }
6284 
6285 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6286 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
6287 {
6288 	struct hclge_desc desc;
6289 	u8 resp_code;
6290 	u16 retval;
6291 	int ret;
6292 
6293 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6294 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6295 
6296 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6297 	if (ret) {
6298 		dev_err(&hdev->pdev->dev,
6299 			"add mac ethertype failed for cmd_send, ret =%d.\n",
6300 			ret);
6301 		return ret;
6302 	}
6303 
6304 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6305 	retval = le16_to_cpu(desc.retval);
6306 
6307 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6308 }
6309 
6310 static int init_mgr_tbl(struct hclge_dev *hdev)
6311 {
6312 	int ret;
6313 	int i;
6314 
6315 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6316 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6317 		if (ret) {
6318 			dev_err(&hdev->pdev->dev,
6319 				"add mac ethertype failed, ret =%d.\n",
6320 				ret);
6321 			return ret;
6322 		}
6323 	}
6324 
6325 	return 0;
6326 }
6327 
6328 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6329 {
6330 	struct hclge_vport *vport = hclge_get_vport(handle);
6331 	struct hclge_dev *hdev = vport->back;
6332 
6333 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
6334 }
6335 
6336 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6337 			      bool is_first)
6338 {
6339 	const unsigned char *new_addr = (const unsigned char *)p;
6340 	struct hclge_vport *vport = hclge_get_vport(handle);
6341 	struct hclge_dev *hdev = vport->back;
6342 	int ret;
6343 
6344 	/* mac addr check */
6345 	if (is_zero_ether_addr(new_addr) ||
6346 	    is_broadcast_ether_addr(new_addr) ||
6347 	    is_multicast_ether_addr(new_addr)) {
6348 		dev_err(&hdev->pdev->dev,
6349 			"Change uc mac err! invalid mac:%p.\n",
6350 			 new_addr);
6351 		return -EINVAL;
6352 	}
6353 
6354 	if ((!is_first || is_kdump_kernel()) &&
6355 	    hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6356 		dev_warn(&hdev->pdev->dev,
6357 			 "remove old uc mac address fail.\n");
6358 
6359 	ret = hclge_add_uc_addr(handle, new_addr);
6360 	if (ret) {
6361 		dev_err(&hdev->pdev->dev,
6362 			"add uc mac address fail, ret =%d.\n",
6363 			ret);
6364 
6365 		if (!is_first &&
6366 		    hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6367 			dev_err(&hdev->pdev->dev,
6368 				"restore uc mac address fail.\n");
6369 
6370 		return -EIO;
6371 	}
6372 
6373 	ret = hclge_pause_addr_cfg(hdev, new_addr);
6374 	if (ret) {
6375 		dev_err(&hdev->pdev->dev,
6376 			"configure mac pause address fail, ret =%d.\n",
6377 			ret);
6378 		return -EIO;
6379 	}
6380 
6381 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6382 
6383 	return 0;
6384 }
6385 
6386 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6387 			  int cmd)
6388 {
6389 	struct hclge_vport *vport = hclge_get_vport(handle);
6390 	struct hclge_dev *hdev = vport->back;
6391 
6392 	if (!hdev->hw.mac.phydev)
6393 		return -EOPNOTSUPP;
6394 
6395 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6396 }
6397 
6398 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6399 				      u8 fe_type, bool filter_en, u8 vf_id)
6400 {
6401 	struct hclge_vlan_filter_ctrl_cmd *req;
6402 	struct hclge_desc desc;
6403 	int ret;
6404 
6405 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6406 
6407 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6408 	req->vlan_type = vlan_type;
6409 	req->vlan_fe = filter_en ? fe_type : 0;
6410 	req->vf_id = vf_id;
6411 
6412 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6413 	if (ret)
6414 		dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6415 			ret);
6416 
6417 	return ret;
6418 }
6419 
6420 #define HCLGE_FILTER_TYPE_VF		0
6421 #define HCLGE_FILTER_TYPE_PORT		1
6422 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
6423 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
6424 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
6425 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
6426 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
6427 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
6428 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
6429 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
6430 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
6431 
6432 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6433 {
6434 	struct hclge_vport *vport = hclge_get_vport(handle);
6435 	struct hclge_dev *hdev = vport->back;
6436 
6437 	if (hdev->pdev->revision >= 0x21) {
6438 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6439 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
6440 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6441 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
6442 	} else {
6443 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6444 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
6445 					   0);
6446 	}
6447 	if (enable)
6448 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
6449 	else
6450 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6451 }
6452 
6453 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6454 				    bool is_kill, u16 vlan, u8 qos,
6455 				    __be16 proto)
6456 {
6457 #define HCLGE_MAX_VF_BYTES  16
6458 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
6459 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
6460 	struct hclge_desc desc[2];
6461 	u8 vf_byte_val;
6462 	u8 vf_byte_off;
6463 	int ret;
6464 
6465 	hclge_cmd_setup_basic_desc(&desc[0],
6466 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6467 	hclge_cmd_setup_basic_desc(&desc[1],
6468 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6469 
6470 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6471 
6472 	vf_byte_off = vfid / 8;
6473 	vf_byte_val = 1 << (vfid % 8);
6474 
6475 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6476 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6477 
6478 	req0->vlan_id  = cpu_to_le16(vlan);
6479 	req0->vlan_cfg = is_kill;
6480 
6481 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6482 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6483 	else
6484 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6485 
6486 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
6487 	if (ret) {
6488 		dev_err(&hdev->pdev->dev,
6489 			"Send vf vlan command fail, ret =%d.\n",
6490 			ret);
6491 		return ret;
6492 	}
6493 
6494 	if (!is_kill) {
6495 #define HCLGE_VF_VLAN_NO_ENTRY	2
6496 		if (!req0->resp_code || req0->resp_code == 1)
6497 			return 0;
6498 
6499 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6500 			dev_warn(&hdev->pdev->dev,
6501 				 "vf vlan table is full, vf vlan filter is disabled\n");
6502 			return 0;
6503 		}
6504 
6505 		dev_err(&hdev->pdev->dev,
6506 			"Add vf vlan filter fail, ret =%d.\n",
6507 			req0->resp_code);
6508 	} else {
6509 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
6510 		if (!req0->resp_code)
6511 			return 0;
6512 
6513 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6514 			dev_warn(&hdev->pdev->dev,
6515 				 "vlan %d filter is not in vf vlan table\n",
6516 				 vlan);
6517 			return 0;
6518 		}
6519 
6520 		dev_err(&hdev->pdev->dev,
6521 			"Kill vf vlan filter fail, ret =%d.\n",
6522 			req0->resp_code);
6523 	}
6524 
6525 	return -EIO;
6526 }
6527 
6528 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6529 				      u16 vlan_id, bool is_kill)
6530 {
6531 	struct hclge_vlan_filter_pf_cfg_cmd *req;
6532 	struct hclge_desc desc;
6533 	u8 vlan_offset_byte_val;
6534 	u8 vlan_offset_byte;
6535 	u8 vlan_offset_160;
6536 	int ret;
6537 
6538 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6539 
6540 	vlan_offset_160 = vlan_id / 160;
6541 	vlan_offset_byte = (vlan_id % 160) / 8;
6542 	vlan_offset_byte_val = 1 << (vlan_id % 8);
6543 
6544 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6545 	req->vlan_offset = vlan_offset_160;
6546 	req->vlan_cfg = is_kill;
6547 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6548 
6549 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6550 	if (ret)
6551 		dev_err(&hdev->pdev->dev,
6552 			"port vlan command, send fail, ret =%d.\n", ret);
6553 	return ret;
6554 }
6555 
6556 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6557 				    u16 vport_id, u16 vlan_id, u8 qos,
6558 				    bool is_kill)
6559 {
6560 	u16 vport_idx, vport_num = 0;
6561 	int ret;
6562 
6563 	if (is_kill && !vlan_id)
6564 		return 0;
6565 
6566 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6567 				       0, proto);
6568 	if (ret) {
6569 		dev_err(&hdev->pdev->dev,
6570 			"Set %d vport vlan filter config fail, ret =%d.\n",
6571 			vport_id, ret);
6572 		return ret;
6573 	}
6574 
6575 	/* vlan 0 may be added twice when 8021q module is enabled */
6576 	if (!is_kill && !vlan_id &&
6577 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
6578 		return 0;
6579 
6580 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6581 		dev_err(&hdev->pdev->dev,
6582 			"Add port vlan failed, vport %d is already in vlan %d\n",
6583 			vport_id, vlan_id);
6584 		return -EINVAL;
6585 	}
6586 
6587 	if (is_kill &&
6588 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6589 		dev_err(&hdev->pdev->dev,
6590 			"Delete port vlan failed, vport %d is not in vlan %d\n",
6591 			vport_id, vlan_id);
6592 		return -EINVAL;
6593 	}
6594 
6595 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6596 		vport_num++;
6597 
6598 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6599 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6600 						 is_kill);
6601 
6602 	return ret;
6603 }
6604 
6605 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6606 {
6607 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6608 	struct hclge_vport_vtag_tx_cfg_cmd *req;
6609 	struct hclge_dev *hdev = vport->back;
6610 	struct hclge_desc desc;
6611 	int status;
6612 
6613 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6614 
6615 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6616 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6617 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6618 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6619 		      vcfg->accept_tag1 ? 1 : 0);
6620 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6621 		      vcfg->accept_untag1 ? 1 : 0);
6622 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6623 		      vcfg->accept_tag2 ? 1 : 0);
6624 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6625 		      vcfg->accept_untag2 ? 1 : 0);
6626 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6627 		      vcfg->insert_tag1_en ? 1 : 0);
6628 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6629 		      vcfg->insert_tag2_en ? 1 : 0);
6630 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6631 
6632 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6633 	req->vf_bitmap[req->vf_offset] =
6634 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6635 
6636 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6637 	if (status)
6638 		dev_err(&hdev->pdev->dev,
6639 			"Send port txvlan cfg command fail, ret =%d\n",
6640 			status);
6641 
6642 	return status;
6643 }
6644 
6645 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6646 {
6647 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6648 	struct hclge_vport_vtag_rx_cfg_cmd *req;
6649 	struct hclge_dev *hdev = vport->back;
6650 	struct hclge_desc desc;
6651 	int status;
6652 
6653 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6654 
6655 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6656 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6657 		      vcfg->strip_tag1_en ? 1 : 0);
6658 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6659 		      vcfg->strip_tag2_en ? 1 : 0);
6660 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6661 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
6662 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6663 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
6664 
6665 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6666 	req->vf_bitmap[req->vf_offset] =
6667 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6668 
6669 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6670 	if (status)
6671 		dev_err(&hdev->pdev->dev,
6672 			"Send port rxvlan cfg command fail, ret =%d\n",
6673 			status);
6674 
6675 	return status;
6676 }
6677 
6678 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
6679 				  u16 port_base_vlan_state,
6680 				  u16 vlan_tag)
6681 {
6682 	int ret;
6683 
6684 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6685 		vport->txvlan_cfg.accept_tag1 = true;
6686 		vport->txvlan_cfg.insert_tag1_en = false;
6687 		vport->txvlan_cfg.default_tag1 = 0;
6688 	} else {
6689 		vport->txvlan_cfg.accept_tag1 = false;
6690 		vport->txvlan_cfg.insert_tag1_en = true;
6691 		vport->txvlan_cfg.default_tag1 = vlan_tag;
6692 	}
6693 
6694 	vport->txvlan_cfg.accept_untag1 = true;
6695 
6696 	/* accept_tag2 and accept_untag2 are not supported on
6697 	 * pdev revision(0x20), new revision support them,
6698 	 * this two fields can not be configured by user.
6699 	 */
6700 	vport->txvlan_cfg.accept_tag2 = true;
6701 	vport->txvlan_cfg.accept_untag2 = true;
6702 	vport->txvlan_cfg.insert_tag2_en = false;
6703 	vport->txvlan_cfg.default_tag2 = 0;
6704 
6705 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6706 		vport->rxvlan_cfg.strip_tag1_en = false;
6707 		vport->rxvlan_cfg.strip_tag2_en =
6708 				vport->rxvlan_cfg.rx_vlan_offload_en;
6709 	} else {
6710 		vport->rxvlan_cfg.strip_tag1_en =
6711 				vport->rxvlan_cfg.rx_vlan_offload_en;
6712 		vport->rxvlan_cfg.strip_tag2_en = true;
6713 	}
6714 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6715 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6716 
6717 	ret = hclge_set_vlan_tx_offload_cfg(vport);
6718 	if (ret)
6719 		return ret;
6720 
6721 	return hclge_set_vlan_rx_offload_cfg(vport);
6722 }
6723 
6724 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
6725 {
6726 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
6727 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
6728 	struct hclge_desc desc;
6729 	int status;
6730 
6731 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
6732 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
6733 	rx_req->ot_fst_vlan_type =
6734 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
6735 	rx_req->ot_sec_vlan_type =
6736 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
6737 	rx_req->in_fst_vlan_type =
6738 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
6739 	rx_req->in_sec_vlan_type =
6740 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
6741 
6742 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6743 	if (status) {
6744 		dev_err(&hdev->pdev->dev,
6745 			"Send rxvlan protocol type command fail, ret =%d\n",
6746 			status);
6747 		return status;
6748 	}
6749 
6750 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
6751 
6752 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
6753 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
6754 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
6755 
6756 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6757 	if (status)
6758 		dev_err(&hdev->pdev->dev,
6759 			"Send txvlan protocol type command fail, ret =%d\n",
6760 			status);
6761 
6762 	return status;
6763 }
6764 
6765 static int hclge_init_vlan_config(struct hclge_dev *hdev)
6766 {
6767 #define HCLGE_DEF_VLAN_TYPE		0x8100
6768 
6769 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6770 	struct hclge_vport *vport;
6771 	int ret;
6772 	int i;
6773 
6774 	if (hdev->pdev->revision >= 0x21) {
6775 		/* for revision 0x21, vf vlan filter is per function */
6776 		for (i = 0; i < hdev->num_alloc_vport; i++) {
6777 			vport = &hdev->vport[i];
6778 			ret = hclge_set_vlan_filter_ctrl(hdev,
6779 							 HCLGE_FILTER_TYPE_VF,
6780 							 HCLGE_FILTER_FE_EGRESS,
6781 							 true,
6782 							 vport->vport_id);
6783 			if (ret)
6784 				return ret;
6785 		}
6786 
6787 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6788 						 HCLGE_FILTER_FE_INGRESS, true,
6789 						 0);
6790 		if (ret)
6791 			return ret;
6792 	} else {
6793 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6794 						 HCLGE_FILTER_FE_EGRESS_V1_B,
6795 						 true, 0);
6796 		if (ret)
6797 			return ret;
6798 	}
6799 
6800 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
6801 
6802 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6803 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6804 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
6805 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
6806 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
6807 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
6808 
6809 	ret = hclge_set_vlan_protocol_type(hdev);
6810 	if (ret)
6811 		return ret;
6812 
6813 	for (i = 0; i < hdev->num_alloc_vport; i++) {
6814 		u16 vlan_tag;
6815 
6816 		vport = &hdev->vport[i];
6817 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
6818 
6819 		ret = hclge_vlan_offload_cfg(vport,
6820 					     vport->port_base_vlan_cfg.state,
6821 					     vlan_tag);
6822 		if (ret)
6823 			return ret;
6824 	}
6825 
6826 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
6827 }
6828 
6829 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
6830 				       bool writen_to_tbl)
6831 {
6832 	struct hclge_vport_vlan_cfg *vlan;
6833 
6834 	/* vlan 0 is reserved */
6835 	if (!vlan_id)
6836 		return;
6837 
6838 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
6839 	if (!vlan)
6840 		return;
6841 
6842 	vlan->hd_tbl_status = writen_to_tbl;
6843 	vlan->vlan_id = vlan_id;
6844 
6845 	list_add_tail(&vlan->node, &vport->vlan_list);
6846 }
6847 
6848 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
6849 {
6850 	struct hclge_vport_vlan_cfg *vlan, *tmp;
6851 	struct hclge_dev *hdev = vport->back;
6852 	int ret;
6853 
6854 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6855 		if (!vlan->hd_tbl_status) {
6856 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
6857 						       vport->vport_id,
6858 						       vlan->vlan_id, 0, false);
6859 			if (ret) {
6860 				dev_err(&hdev->pdev->dev,
6861 					"restore vport vlan list failed, ret=%d\n",
6862 					ret);
6863 				return ret;
6864 			}
6865 		}
6866 		vlan->hd_tbl_status = true;
6867 	}
6868 
6869 	return 0;
6870 }
6871 
6872 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
6873 				      bool is_write_tbl)
6874 {
6875 	struct hclge_vport_vlan_cfg *vlan, *tmp;
6876 	struct hclge_dev *hdev = vport->back;
6877 
6878 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6879 		if (vlan->vlan_id == vlan_id) {
6880 			if (is_write_tbl && vlan->hd_tbl_status)
6881 				hclge_set_vlan_filter_hw(hdev,
6882 							 htons(ETH_P_8021Q),
6883 							 vport->vport_id,
6884 							 vlan_id, 0,
6885 							 true);
6886 
6887 			list_del(&vlan->node);
6888 			kfree(vlan);
6889 			break;
6890 		}
6891 	}
6892 }
6893 
6894 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
6895 {
6896 	struct hclge_vport_vlan_cfg *vlan, *tmp;
6897 	struct hclge_dev *hdev = vport->back;
6898 
6899 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6900 		if (vlan->hd_tbl_status)
6901 			hclge_set_vlan_filter_hw(hdev,
6902 						 htons(ETH_P_8021Q),
6903 						 vport->vport_id,
6904 						 vlan->vlan_id, 0,
6905 						 true);
6906 
6907 		vlan->hd_tbl_status = false;
6908 		if (is_del_list) {
6909 			list_del(&vlan->node);
6910 			kfree(vlan);
6911 		}
6912 	}
6913 }
6914 
6915 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
6916 {
6917 	struct hclge_vport_vlan_cfg *vlan, *tmp;
6918 	struct hclge_vport *vport;
6919 	int i;
6920 
6921 	mutex_lock(&hdev->vport_cfg_mutex);
6922 	for (i = 0; i < hdev->num_alloc_vport; i++) {
6923 		vport = &hdev->vport[i];
6924 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
6925 			list_del(&vlan->node);
6926 			kfree(vlan);
6927 		}
6928 	}
6929 	mutex_unlock(&hdev->vport_cfg_mutex);
6930 }
6931 
6932 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
6933 {
6934 	struct hclge_vport *vport = hclge_get_vport(handle);
6935 
6936 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6937 		vport->rxvlan_cfg.strip_tag1_en = false;
6938 		vport->rxvlan_cfg.strip_tag2_en = enable;
6939 	} else {
6940 		vport->rxvlan_cfg.strip_tag1_en = enable;
6941 		vport->rxvlan_cfg.strip_tag2_en = true;
6942 	}
6943 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
6944 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
6945 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
6946 
6947 	return hclge_set_vlan_rx_offload_cfg(vport);
6948 }
6949 
6950 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
6951 					    u16 port_base_vlan_state,
6952 					    struct hclge_vlan_info *new_info,
6953 					    struct hclge_vlan_info *old_info)
6954 {
6955 	struct hclge_dev *hdev = vport->back;
6956 	int ret;
6957 
6958 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
6959 		hclge_rm_vport_all_vlan_table(vport, false);
6960 		return hclge_set_vlan_filter_hw(hdev,
6961 						 htons(new_info->vlan_proto),
6962 						 vport->vport_id,
6963 						 new_info->vlan_tag,
6964 						 new_info->qos, false);
6965 	}
6966 
6967 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
6968 				       vport->vport_id, old_info->vlan_tag,
6969 				       old_info->qos, true);
6970 	if (ret)
6971 		return ret;
6972 
6973 	return hclge_add_vport_all_vlan_table(vport);
6974 }
6975 
6976 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
6977 				    struct hclge_vlan_info *vlan_info)
6978 {
6979 	struct hnae3_handle *nic = &vport->nic;
6980 	struct hclge_vlan_info *old_vlan_info;
6981 	struct hclge_dev *hdev = vport->back;
6982 	int ret;
6983 
6984 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
6985 
6986 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
6987 	if (ret)
6988 		return ret;
6989 
6990 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
6991 		/* add new VLAN tag */
6992 		ret = hclge_set_vlan_filter_hw(hdev,
6993 					       htons(vlan_info->vlan_proto),
6994 					       vport->vport_id,
6995 					       vlan_info->vlan_tag,
6996 					       vlan_info->qos, false);
6997 		if (ret)
6998 			return ret;
6999 
7000 		/* remove old VLAN tag */
7001 		ret = hclge_set_vlan_filter_hw(hdev,
7002 					       htons(old_vlan_info->vlan_proto),
7003 					       vport->vport_id,
7004 					       old_vlan_info->vlan_tag,
7005 					       old_vlan_info->qos, true);
7006 		if (ret)
7007 			return ret;
7008 
7009 		goto update;
7010 	}
7011 
7012 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7013 					       old_vlan_info);
7014 	if (ret)
7015 		return ret;
7016 
7017 	/* update state only when disable/enable port based VLAN */
7018 	vport->port_base_vlan_cfg.state = state;
7019 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7020 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7021 	else
7022 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7023 
7024 update:
7025 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7026 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7027 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7028 
7029 	return 0;
7030 }
7031 
7032 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7033 					  enum hnae3_port_base_vlan_state state,
7034 					  u16 vlan)
7035 {
7036 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7037 		if (!vlan)
7038 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7039 		else
7040 			return HNAE3_PORT_BASE_VLAN_ENABLE;
7041 	} else {
7042 		if (!vlan)
7043 			return HNAE3_PORT_BASE_VLAN_DISABLE;
7044 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7045 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7046 		else
7047 			return HNAE3_PORT_BASE_VLAN_MODIFY;
7048 	}
7049 }
7050 
7051 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7052 				    u16 vlan, u8 qos, __be16 proto)
7053 {
7054 	struct hclge_vport *vport = hclge_get_vport(handle);
7055 	struct hclge_dev *hdev = vport->back;
7056 	struct hclge_vlan_info vlan_info;
7057 	u16 state;
7058 	int ret;
7059 
7060 	if (hdev->pdev->revision == 0x20)
7061 		return -EOPNOTSUPP;
7062 
7063 	/* qos is a 3 bits value, so can not be bigger than 7 */
7064 	if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7065 		return -EINVAL;
7066 	if (proto != htons(ETH_P_8021Q))
7067 		return -EPROTONOSUPPORT;
7068 
7069 	vport = &hdev->vport[vfid];
7070 	state = hclge_get_port_base_vlan_state(vport,
7071 					       vport->port_base_vlan_cfg.state,
7072 					       vlan);
7073 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7074 		return 0;
7075 
7076 	vlan_info.vlan_tag = vlan;
7077 	vlan_info.qos = qos;
7078 	vlan_info.vlan_proto = ntohs(proto);
7079 
7080 	/* update port based VLAN for PF */
7081 	if (!vfid) {
7082 		hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7083 		ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7084 		hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7085 
7086 		return ret;
7087 	}
7088 
7089 	if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7090 		return hclge_update_port_base_vlan_cfg(vport, state,
7091 						       &vlan_info);
7092 	} else {
7093 		ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7094 							(u8)vfid, state,
7095 							vlan, qos,
7096 							ntohs(proto));
7097 		return ret;
7098 	}
7099 }
7100 
7101 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7102 			  u16 vlan_id, bool is_kill)
7103 {
7104 	struct hclge_vport *vport = hclge_get_vport(handle);
7105 	struct hclge_dev *hdev = vport->back;
7106 	bool writen_to_tbl = false;
7107 	int ret = 0;
7108 
7109 	/* when port based VLAN enabled, we use port based VLAN as the VLAN
7110 	 * filter entry. In this case, we don't update VLAN filter table
7111 	 * when user add new VLAN or remove exist VLAN, just update the vport
7112 	 * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7113 	 * table until port based VLAN disabled
7114 	 */
7115 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7116 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7117 					       vlan_id, 0, is_kill);
7118 		writen_to_tbl = true;
7119 	}
7120 
7121 	if (ret)
7122 		return ret;
7123 
7124 	if (is_kill)
7125 		hclge_rm_vport_vlan_table(vport, vlan_id, false);
7126 	else
7127 		hclge_add_vport_vlan_table(vport, vlan_id,
7128 					   writen_to_tbl);
7129 
7130 	return 0;
7131 }
7132 
7133 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7134 {
7135 	struct hclge_config_max_frm_size_cmd *req;
7136 	struct hclge_desc desc;
7137 
7138 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7139 
7140 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7141 	req->max_frm_size = cpu_to_le16(new_mps);
7142 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7143 
7144 	return hclge_cmd_send(&hdev->hw, &desc, 1);
7145 }
7146 
7147 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7148 {
7149 	struct hclge_vport *vport = hclge_get_vport(handle);
7150 
7151 	return hclge_set_vport_mtu(vport, new_mtu);
7152 }
7153 
7154 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7155 {
7156 	struct hclge_dev *hdev = vport->back;
7157 	int i, max_frm_size, ret = 0;
7158 
7159 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7160 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7161 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
7162 		return -EINVAL;
7163 
7164 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7165 	mutex_lock(&hdev->vport_lock);
7166 	/* VF's mps must fit within hdev->mps */
7167 	if (vport->vport_id && max_frm_size > hdev->mps) {
7168 		mutex_unlock(&hdev->vport_lock);
7169 		return -EINVAL;
7170 	} else if (vport->vport_id) {
7171 		vport->mps = max_frm_size;
7172 		mutex_unlock(&hdev->vport_lock);
7173 		return 0;
7174 	}
7175 
7176 	/* PF's mps must be greater then VF's mps */
7177 	for (i = 1; i < hdev->num_alloc_vport; i++)
7178 		if (max_frm_size < hdev->vport[i].mps) {
7179 			mutex_unlock(&hdev->vport_lock);
7180 			return -EINVAL;
7181 		}
7182 
7183 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7184 
7185 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
7186 	if (ret) {
7187 		dev_err(&hdev->pdev->dev,
7188 			"Change mtu fail, ret =%d\n", ret);
7189 		goto out;
7190 	}
7191 
7192 	hdev->mps = max_frm_size;
7193 	vport->mps = max_frm_size;
7194 
7195 	ret = hclge_buffer_alloc(hdev);
7196 	if (ret)
7197 		dev_err(&hdev->pdev->dev,
7198 			"Allocate buffer fail, ret =%d\n", ret);
7199 
7200 out:
7201 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7202 	mutex_unlock(&hdev->vport_lock);
7203 	return ret;
7204 }
7205 
7206 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7207 				    bool enable)
7208 {
7209 	struct hclge_reset_tqp_queue_cmd *req;
7210 	struct hclge_desc desc;
7211 	int ret;
7212 
7213 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7214 
7215 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7216 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7217 	hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7218 
7219 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7220 	if (ret) {
7221 		dev_err(&hdev->pdev->dev,
7222 			"Send tqp reset cmd error, status =%d\n", ret);
7223 		return ret;
7224 	}
7225 
7226 	return 0;
7227 }
7228 
7229 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7230 {
7231 	struct hclge_reset_tqp_queue_cmd *req;
7232 	struct hclge_desc desc;
7233 	int ret;
7234 
7235 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7236 
7237 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7238 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7239 
7240 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7241 	if (ret) {
7242 		dev_err(&hdev->pdev->dev,
7243 			"Get reset status error, status =%d\n", ret);
7244 		return ret;
7245 	}
7246 
7247 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7248 }
7249 
7250 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7251 {
7252 	struct hnae3_queue *queue;
7253 	struct hclge_tqp *tqp;
7254 
7255 	queue = handle->kinfo.tqp[queue_id];
7256 	tqp = container_of(queue, struct hclge_tqp, q);
7257 
7258 	return tqp->index;
7259 }
7260 
7261 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7262 {
7263 	struct hclge_vport *vport = hclge_get_vport(handle);
7264 	struct hclge_dev *hdev = vport->back;
7265 	int reset_try_times = 0;
7266 	int reset_status;
7267 	u16 queue_gid;
7268 	int ret = 0;
7269 
7270 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7271 
7272 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7273 	if (ret) {
7274 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7275 		return ret;
7276 	}
7277 
7278 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7279 	if (ret) {
7280 		dev_err(&hdev->pdev->dev,
7281 			"Send reset tqp cmd fail, ret = %d\n", ret);
7282 		return ret;
7283 	}
7284 
7285 	reset_try_times = 0;
7286 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7287 		/* Wait for tqp hw reset */
7288 		msleep(20);
7289 		reset_status = hclge_get_reset_status(hdev, queue_gid);
7290 		if (reset_status)
7291 			break;
7292 	}
7293 
7294 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7295 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7296 		return ret;
7297 	}
7298 
7299 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7300 	if (ret)
7301 		dev_err(&hdev->pdev->dev,
7302 			"Deassert the soft reset fail, ret = %d\n", ret);
7303 
7304 	return ret;
7305 }
7306 
7307 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7308 {
7309 	struct hclge_dev *hdev = vport->back;
7310 	int reset_try_times = 0;
7311 	int reset_status;
7312 	u16 queue_gid;
7313 	int ret;
7314 
7315 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7316 
7317 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7318 	if (ret) {
7319 		dev_warn(&hdev->pdev->dev,
7320 			 "Send reset tqp cmd fail, ret = %d\n", ret);
7321 		return;
7322 	}
7323 
7324 	reset_try_times = 0;
7325 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7326 		/* Wait for tqp hw reset */
7327 		msleep(20);
7328 		reset_status = hclge_get_reset_status(hdev, queue_gid);
7329 		if (reset_status)
7330 			break;
7331 	}
7332 
7333 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7334 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7335 		return;
7336 	}
7337 
7338 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7339 	if (ret)
7340 		dev_warn(&hdev->pdev->dev,
7341 			 "Deassert the soft reset fail, ret = %d\n", ret);
7342 }
7343 
7344 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7345 {
7346 	struct hclge_vport *vport = hclge_get_vport(handle);
7347 	struct hclge_dev *hdev = vport->back;
7348 
7349 	return hdev->fw_version;
7350 }
7351 
7352 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7353 {
7354 	struct phy_device *phydev = hdev->hw.mac.phydev;
7355 
7356 	if (!phydev)
7357 		return;
7358 
7359 	phy_set_asym_pause(phydev, rx_en, tx_en);
7360 }
7361 
7362 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7363 {
7364 	int ret;
7365 
7366 	if (rx_en && tx_en)
7367 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
7368 	else if (rx_en && !tx_en)
7369 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7370 	else if (!rx_en && tx_en)
7371 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7372 	else
7373 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
7374 
7375 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7376 		return 0;
7377 
7378 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7379 	if (ret) {
7380 		dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7381 			ret);
7382 		return ret;
7383 	}
7384 
7385 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7386 
7387 	return 0;
7388 }
7389 
7390 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7391 {
7392 	struct phy_device *phydev = hdev->hw.mac.phydev;
7393 	u16 remote_advertising = 0;
7394 	u16 local_advertising = 0;
7395 	u32 rx_pause, tx_pause;
7396 	u8 flowctl;
7397 
7398 	if (!phydev->link || !phydev->autoneg)
7399 		return 0;
7400 
7401 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7402 
7403 	if (phydev->pause)
7404 		remote_advertising = LPA_PAUSE_CAP;
7405 
7406 	if (phydev->asym_pause)
7407 		remote_advertising |= LPA_PAUSE_ASYM;
7408 
7409 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7410 					   remote_advertising);
7411 	tx_pause = flowctl & FLOW_CTRL_TX;
7412 	rx_pause = flowctl & FLOW_CTRL_RX;
7413 
7414 	if (phydev->duplex == HCLGE_MAC_HALF) {
7415 		tx_pause = 0;
7416 		rx_pause = 0;
7417 	}
7418 
7419 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
7420 }
7421 
7422 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
7423 				 u32 *rx_en, u32 *tx_en)
7424 {
7425 	struct hclge_vport *vport = hclge_get_vport(handle);
7426 	struct hclge_dev *hdev = vport->back;
7427 
7428 	*auto_neg = hclge_get_autoneg(handle);
7429 
7430 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7431 		*rx_en = 0;
7432 		*tx_en = 0;
7433 		return;
7434 	}
7435 
7436 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
7437 		*rx_en = 1;
7438 		*tx_en = 0;
7439 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
7440 		*tx_en = 1;
7441 		*rx_en = 0;
7442 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
7443 		*rx_en = 1;
7444 		*tx_en = 1;
7445 	} else {
7446 		*rx_en = 0;
7447 		*tx_en = 0;
7448 	}
7449 }
7450 
7451 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
7452 				u32 rx_en, u32 tx_en)
7453 {
7454 	struct hclge_vport *vport = hclge_get_vport(handle);
7455 	struct hclge_dev *hdev = vport->back;
7456 	struct phy_device *phydev = hdev->hw.mac.phydev;
7457 	u32 fc_autoneg;
7458 
7459 	fc_autoneg = hclge_get_autoneg(handle);
7460 	if (auto_neg != fc_autoneg) {
7461 		dev_info(&hdev->pdev->dev,
7462 			 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
7463 		return -EOPNOTSUPP;
7464 	}
7465 
7466 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7467 		dev_info(&hdev->pdev->dev,
7468 			 "Priority flow control enabled. Cannot set link flow control.\n");
7469 		return -EOPNOTSUPP;
7470 	}
7471 
7472 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
7473 
7474 	if (!fc_autoneg)
7475 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
7476 
7477 	/* Only support flow control negotiation for netdev with
7478 	 * phy attached for now.
7479 	 */
7480 	if (!phydev)
7481 		return -EOPNOTSUPP;
7482 
7483 	return phy_start_aneg(phydev);
7484 }
7485 
7486 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
7487 					  u8 *auto_neg, u32 *speed, u8 *duplex)
7488 {
7489 	struct hclge_vport *vport = hclge_get_vport(handle);
7490 	struct hclge_dev *hdev = vport->back;
7491 
7492 	if (speed)
7493 		*speed = hdev->hw.mac.speed;
7494 	if (duplex)
7495 		*duplex = hdev->hw.mac.duplex;
7496 	if (auto_neg)
7497 		*auto_neg = hdev->hw.mac.autoneg;
7498 }
7499 
7500 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
7501 {
7502 	struct hclge_vport *vport = hclge_get_vport(handle);
7503 	struct hclge_dev *hdev = vport->back;
7504 
7505 	if (media_type)
7506 		*media_type = hdev->hw.mac.media_type;
7507 }
7508 
7509 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
7510 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
7511 {
7512 	struct hclge_vport *vport = hclge_get_vport(handle);
7513 	struct hclge_dev *hdev = vport->back;
7514 	struct phy_device *phydev = hdev->hw.mac.phydev;
7515 	int mdix_ctrl, mdix, retval, is_resolved;
7516 
7517 	if (!phydev) {
7518 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7519 		*tp_mdix = ETH_TP_MDI_INVALID;
7520 		return;
7521 	}
7522 
7523 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
7524 
7525 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
7526 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
7527 				    HCLGE_PHY_MDIX_CTRL_S);
7528 
7529 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
7530 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
7531 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
7532 
7533 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
7534 
7535 	switch (mdix_ctrl) {
7536 	case 0x0:
7537 		*tp_mdix_ctrl = ETH_TP_MDI;
7538 		break;
7539 	case 0x1:
7540 		*tp_mdix_ctrl = ETH_TP_MDI_X;
7541 		break;
7542 	case 0x3:
7543 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
7544 		break;
7545 	default:
7546 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7547 		break;
7548 	}
7549 
7550 	if (!is_resolved)
7551 		*tp_mdix = ETH_TP_MDI_INVALID;
7552 	else if (mdix)
7553 		*tp_mdix = ETH_TP_MDI_X;
7554 	else
7555 		*tp_mdix = ETH_TP_MDI;
7556 }
7557 
7558 static void hclge_info_show(struct hclge_dev *hdev)
7559 {
7560 	struct device *dev = &hdev->pdev->dev;
7561 
7562 	dev_info(dev, "PF info begin:\n");
7563 
7564 	dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
7565 	dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
7566 	dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
7567 	dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
7568 	dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
7569 	dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
7570 	dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
7571 	dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
7572 	dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
7573 	dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
7574 	dev_info(dev, "This is %s PF\n",
7575 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
7576 	dev_info(dev, "DCB %s\n",
7577 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
7578 	dev_info(dev, "MQPRIO %s\n",
7579 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
7580 
7581 	dev_info(dev, "PF info end.\n");
7582 }
7583 
7584 static int hclge_init_client_instance(struct hnae3_client *client,
7585 				      struct hnae3_ae_dev *ae_dev)
7586 {
7587 	struct hclge_dev *hdev = ae_dev->priv;
7588 	struct hclge_vport *vport;
7589 	int i, ret;
7590 
7591 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
7592 		vport = &hdev->vport[i];
7593 
7594 		switch (client->type) {
7595 		case HNAE3_CLIENT_KNIC:
7596 
7597 			hdev->nic_client = client;
7598 			vport->nic.client = client;
7599 			ret = client->ops->init_instance(&vport->nic);
7600 			if (ret)
7601 				goto clear_nic;
7602 
7603 			hnae3_set_client_init_flag(client, ae_dev, 1);
7604 
7605 			if (netif_msg_drv(&hdev->vport->nic))
7606 				hclge_info_show(hdev);
7607 
7608 			if (hdev->roce_client &&
7609 			    hnae3_dev_roce_supported(hdev)) {
7610 				struct hnae3_client *rc = hdev->roce_client;
7611 
7612 				ret = hclge_init_roce_base_info(vport);
7613 				if (ret)
7614 					goto clear_roce;
7615 
7616 				ret = rc->ops->init_instance(&vport->roce);
7617 				if (ret)
7618 					goto clear_roce;
7619 
7620 				hnae3_set_client_init_flag(hdev->roce_client,
7621 							   ae_dev, 1);
7622 			}
7623 
7624 			break;
7625 		case HNAE3_CLIENT_UNIC:
7626 			hdev->nic_client = client;
7627 			vport->nic.client = client;
7628 
7629 			ret = client->ops->init_instance(&vport->nic);
7630 			if (ret)
7631 				goto clear_nic;
7632 
7633 			hnae3_set_client_init_flag(client, ae_dev, 1);
7634 
7635 			break;
7636 		case HNAE3_CLIENT_ROCE:
7637 			if (hnae3_dev_roce_supported(hdev)) {
7638 				hdev->roce_client = client;
7639 				vport->roce.client = client;
7640 			}
7641 
7642 			if (hdev->roce_client && hdev->nic_client) {
7643 				ret = hclge_init_roce_base_info(vport);
7644 				if (ret)
7645 					goto clear_roce;
7646 
7647 				ret = client->ops->init_instance(&vport->roce);
7648 				if (ret)
7649 					goto clear_roce;
7650 
7651 				hnae3_set_client_init_flag(client, ae_dev, 1);
7652 			}
7653 
7654 			break;
7655 		default:
7656 			return -EINVAL;
7657 		}
7658 	}
7659 
7660 	return 0;
7661 
7662 clear_nic:
7663 	hdev->nic_client = NULL;
7664 	vport->nic.client = NULL;
7665 	return ret;
7666 clear_roce:
7667 	hdev->roce_client = NULL;
7668 	vport->roce.client = NULL;
7669 	return ret;
7670 }
7671 
7672 static void hclge_uninit_client_instance(struct hnae3_client *client,
7673 					 struct hnae3_ae_dev *ae_dev)
7674 {
7675 	struct hclge_dev *hdev = ae_dev->priv;
7676 	struct hclge_vport *vport;
7677 	int i;
7678 
7679 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7680 		vport = &hdev->vport[i];
7681 		if (hdev->roce_client) {
7682 			hdev->roce_client->ops->uninit_instance(&vport->roce,
7683 								0);
7684 			hdev->roce_client = NULL;
7685 			vport->roce.client = NULL;
7686 		}
7687 		if (client->type == HNAE3_CLIENT_ROCE)
7688 			return;
7689 		if (hdev->nic_client && client->ops->uninit_instance) {
7690 			client->ops->uninit_instance(&vport->nic, 0);
7691 			hdev->nic_client = NULL;
7692 			vport->nic.client = NULL;
7693 		}
7694 	}
7695 }
7696 
7697 static int hclge_pci_init(struct hclge_dev *hdev)
7698 {
7699 	struct pci_dev *pdev = hdev->pdev;
7700 	struct hclge_hw *hw;
7701 	int ret;
7702 
7703 	ret = pci_enable_device(pdev);
7704 	if (ret) {
7705 		dev_err(&pdev->dev, "failed to enable PCI device\n");
7706 		return ret;
7707 	}
7708 
7709 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7710 	if (ret) {
7711 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7712 		if (ret) {
7713 			dev_err(&pdev->dev,
7714 				"can't set consistent PCI DMA");
7715 			goto err_disable_device;
7716 		}
7717 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
7718 	}
7719 
7720 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
7721 	if (ret) {
7722 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
7723 		goto err_disable_device;
7724 	}
7725 
7726 	pci_set_master(pdev);
7727 	hw = &hdev->hw;
7728 	hw->io_base = pcim_iomap(pdev, 2, 0);
7729 	if (!hw->io_base) {
7730 		dev_err(&pdev->dev, "Can't map configuration register space\n");
7731 		ret = -ENOMEM;
7732 		goto err_clr_master;
7733 	}
7734 
7735 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
7736 
7737 	return 0;
7738 err_clr_master:
7739 	pci_clear_master(pdev);
7740 	pci_release_regions(pdev);
7741 err_disable_device:
7742 	pci_disable_device(pdev);
7743 
7744 	return ret;
7745 }
7746 
7747 static void hclge_pci_uninit(struct hclge_dev *hdev)
7748 {
7749 	struct pci_dev *pdev = hdev->pdev;
7750 
7751 	pcim_iounmap(pdev, hdev->hw.io_base);
7752 	pci_free_irq_vectors(pdev);
7753 	pci_clear_master(pdev);
7754 	pci_release_mem_regions(pdev);
7755 	pci_disable_device(pdev);
7756 }
7757 
7758 static void hclge_state_init(struct hclge_dev *hdev)
7759 {
7760 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
7761 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7762 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
7763 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7764 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
7765 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
7766 }
7767 
7768 static void hclge_state_uninit(struct hclge_dev *hdev)
7769 {
7770 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7771 
7772 	if (hdev->service_timer.function)
7773 		del_timer_sync(&hdev->service_timer);
7774 	if (hdev->reset_timer.function)
7775 		del_timer_sync(&hdev->reset_timer);
7776 	if (hdev->service_task.func)
7777 		cancel_work_sync(&hdev->service_task);
7778 	if (hdev->rst_service_task.func)
7779 		cancel_work_sync(&hdev->rst_service_task);
7780 	if (hdev->mbx_service_task.func)
7781 		cancel_work_sync(&hdev->mbx_service_task);
7782 }
7783 
7784 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
7785 {
7786 #define HCLGE_FLR_WAIT_MS	100
7787 #define HCLGE_FLR_WAIT_CNT	50
7788 	struct hclge_dev *hdev = ae_dev->priv;
7789 	int cnt = 0;
7790 
7791 	clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
7792 	clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7793 	set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
7794 	hclge_reset_event(hdev->pdev, NULL);
7795 
7796 	while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
7797 	       cnt++ < HCLGE_FLR_WAIT_CNT)
7798 		msleep(HCLGE_FLR_WAIT_MS);
7799 
7800 	if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
7801 		dev_err(&hdev->pdev->dev,
7802 			"flr wait down timeout: %d\n", cnt);
7803 }
7804 
7805 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
7806 {
7807 	struct hclge_dev *hdev = ae_dev->priv;
7808 
7809 	set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
7810 }
7811 
7812 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
7813 {
7814 	struct pci_dev *pdev = ae_dev->pdev;
7815 	struct hclge_dev *hdev;
7816 	int ret;
7817 
7818 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
7819 	if (!hdev) {
7820 		ret = -ENOMEM;
7821 		goto out;
7822 	}
7823 
7824 	hdev->pdev = pdev;
7825 	hdev->ae_dev = ae_dev;
7826 	hdev->reset_type = HNAE3_NONE_RESET;
7827 	hdev->reset_level = HNAE3_FUNC_RESET;
7828 	ae_dev->priv = hdev;
7829 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7830 
7831 	mutex_init(&hdev->vport_lock);
7832 	mutex_init(&hdev->vport_cfg_mutex);
7833 
7834 	ret = hclge_pci_init(hdev);
7835 	if (ret) {
7836 		dev_err(&pdev->dev, "PCI init failed\n");
7837 		goto out;
7838 	}
7839 
7840 	/* Firmware command queue initialize */
7841 	ret = hclge_cmd_queue_init(hdev);
7842 	if (ret) {
7843 		dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
7844 		goto err_pci_uninit;
7845 	}
7846 
7847 	/* Firmware command initialize */
7848 	ret = hclge_cmd_init(hdev);
7849 	if (ret)
7850 		goto err_cmd_uninit;
7851 
7852 	ret = hclge_get_cap(hdev);
7853 	if (ret) {
7854 		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
7855 			ret);
7856 		goto err_cmd_uninit;
7857 	}
7858 
7859 	ret = hclge_configure(hdev);
7860 	if (ret) {
7861 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
7862 		goto err_cmd_uninit;
7863 	}
7864 
7865 	ret = hclge_init_msi(hdev);
7866 	if (ret) {
7867 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
7868 		goto err_cmd_uninit;
7869 	}
7870 
7871 	ret = hclge_misc_irq_init(hdev);
7872 	if (ret) {
7873 		dev_err(&pdev->dev,
7874 			"Misc IRQ(vector0) init error, ret = %d.\n",
7875 			ret);
7876 		goto err_msi_uninit;
7877 	}
7878 
7879 	ret = hclge_alloc_tqps(hdev);
7880 	if (ret) {
7881 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
7882 		goto err_msi_irq_uninit;
7883 	}
7884 
7885 	ret = hclge_alloc_vport(hdev);
7886 	if (ret) {
7887 		dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
7888 		goto err_msi_irq_uninit;
7889 	}
7890 
7891 	ret = hclge_map_tqp(hdev);
7892 	if (ret) {
7893 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
7894 		goto err_msi_irq_uninit;
7895 	}
7896 
7897 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
7898 		ret = hclge_mac_mdio_config(hdev);
7899 		if (ret) {
7900 			dev_err(&hdev->pdev->dev,
7901 				"mdio config fail ret=%d\n", ret);
7902 			goto err_msi_irq_uninit;
7903 		}
7904 	}
7905 
7906 	ret = hclge_init_umv_space(hdev);
7907 	if (ret) {
7908 		dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
7909 		goto err_mdiobus_unreg;
7910 	}
7911 
7912 	ret = hclge_mac_init(hdev);
7913 	if (ret) {
7914 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
7915 		goto err_mdiobus_unreg;
7916 	}
7917 
7918 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
7919 	if (ret) {
7920 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
7921 		goto err_mdiobus_unreg;
7922 	}
7923 
7924 	ret = hclge_config_gro(hdev, true);
7925 	if (ret)
7926 		goto err_mdiobus_unreg;
7927 
7928 	ret = hclge_init_vlan_config(hdev);
7929 	if (ret) {
7930 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
7931 		goto err_mdiobus_unreg;
7932 	}
7933 
7934 	ret = hclge_tm_schd_init(hdev);
7935 	if (ret) {
7936 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
7937 		goto err_mdiobus_unreg;
7938 	}
7939 
7940 	hclge_rss_init_cfg(hdev);
7941 	ret = hclge_rss_init_hw(hdev);
7942 	if (ret) {
7943 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
7944 		goto err_mdiobus_unreg;
7945 	}
7946 
7947 	ret = init_mgr_tbl(hdev);
7948 	if (ret) {
7949 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
7950 		goto err_mdiobus_unreg;
7951 	}
7952 
7953 	ret = hclge_init_fd_config(hdev);
7954 	if (ret) {
7955 		dev_err(&pdev->dev,
7956 			"fd table init fail, ret=%d\n", ret);
7957 		goto err_mdiobus_unreg;
7958 	}
7959 
7960 	ret = hclge_hw_error_set_state(hdev, true);
7961 	if (ret) {
7962 		dev_err(&pdev->dev,
7963 			"fail(%d) to enable hw error interrupts\n", ret);
7964 		goto err_mdiobus_unreg;
7965 	}
7966 
7967 	INIT_KFIFO(hdev->mac_tnl_log);
7968 
7969 	hclge_dcb_ops_set(hdev);
7970 
7971 	timer_setup(&hdev->service_timer, hclge_service_timer, 0);
7972 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
7973 	INIT_WORK(&hdev->service_task, hclge_service_task);
7974 	INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
7975 	INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
7976 
7977 	hclge_clear_all_event_cause(hdev);
7978 
7979 	/* Enable MISC vector(vector0) */
7980 	hclge_enable_vector(&hdev->misc_vector, true);
7981 
7982 	hclge_state_init(hdev);
7983 	hdev->last_reset_time = jiffies;
7984 
7985 	pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
7986 	return 0;
7987 
7988 err_mdiobus_unreg:
7989 	if (hdev->hw.mac.phydev)
7990 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
7991 err_msi_irq_uninit:
7992 	hclge_misc_irq_uninit(hdev);
7993 err_msi_uninit:
7994 	pci_free_irq_vectors(pdev);
7995 err_cmd_uninit:
7996 	hclge_cmd_uninit(hdev);
7997 err_pci_uninit:
7998 	pcim_iounmap(pdev, hdev->hw.io_base);
7999 	pci_clear_master(pdev);
8000 	pci_release_regions(pdev);
8001 	pci_disable_device(pdev);
8002 out:
8003 	return ret;
8004 }
8005 
8006 static void hclge_stats_clear(struct hclge_dev *hdev)
8007 {
8008 	memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8009 }
8010 
8011 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8012 {
8013 	struct hclge_vport *vport = hdev->vport;
8014 	int i;
8015 
8016 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8017 		hclge_vport_stop(vport);
8018 		vport++;
8019 	}
8020 }
8021 
8022 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8023 {
8024 	struct hclge_dev *hdev = ae_dev->priv;
8025 	struct pci_dev *pdev = ae_dev->pdev;
8026 	int ret;
8027 
8028 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8029 
8030 	hclge_stats_clear(hdev);
8031 	memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8032 
8033 	ret = hclge_cmd_init(hdev);
8034 	if (ret) {
8035 		dev_err(&pdev->dev, "Cmd queue init failed\n");
8036 		return ret;
8037 	}
8038 
8039 	ret = hclge_map_tqp(hdev);
8040 	if (ret) {
8041 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8042 		return ret;
8043 	}
8044 
8045 	hclge_reset_umv_space(hdev);
8046 
8047 	ret = hclge_mac_init(hdev);
8048 	if (ret) {
8049 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8050 		return ret;
8051 	}
8052 
8053 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8054 	if (ret) {
8055 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8056 		return ret;
8057 	}
8058 
8059 	ret = hclge_config_gro(hdev, true);
8060 	if (ret)
8061 		return ret;
8062 
8063 	ret = hclge_init_vlan_config(hdev);
8064 	if (ret) {
8065 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8066 		return ret;
8067 	}
8068 
8069 	ret = hclge_tm_init_hw(hdev, true);
8070 	if (ret) {
8071 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8072 		return ret;
8073 	}
8074 
8075 	ret = hclge_rss_init_hw(hdev);
8076 	if (ret) {
8077 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8078 		return ret;
8079 	}
8080 
8081 	ret = hclge_init_fd_config(hdev);
8082 	if (ret) {
8083 		dev_err(&pdev->dev,
8084 			"fd table init fail, ret=%d\n", ret);
8085 		return ret;
8086 	}
8087 
8088 	/* Re-enable the hw error interrupts because
8089 	 * the interrupts get disabled on core/global reset.
8090 	 */
8091 	ret = hclge_hw_error_set_state(hdev, true);
8092 	if (ret) {
8093 		dev_err(&pdev->dev,
8094 			"fail(%d) to re-enable HNS hw error interrupts\n", ret);
8095 		return ret;
8096 	}
8097 
8098 	hclge_reset_vport_state(hdev);
8099 
8100 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8101 		 HCLGE_DRIVER_NAME);
8102 
8103 	return 0;
8104 }
8105 
8106 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8107 {
8108 	struct hclge_dev *hdev = ae_dev->priv;
8109 	struct hclge_mac *mac = &hdev->hw.mac;
8110 
8111 	hclge_state_uninit(hdev);
8112 
8113 	if (mac->phydev)
8114 		mdiobus_unregister(mac->mdio_bus);
8115 
8116 	hclge_uninit_umv_space(hdev);
8117 
8118 	/* Disable MISC vector(vector0) */
8119 	hclge_enable_vector(&hdev->misc_vector, false);
8120 	synchronize_irq(hdev->misc_vector.vector_irq);
8121 
8122 	hclge_config_mac_tnl_int(hdev, false);
8123 	hclge_hw_error_set_state(hdev, false);
8124 	hclge_cmd_uninit(hdev);
8125 	hclge_misc_irq_uninit(hdev);
8126 	hclge_pci_uninit(hdev);
8127 	mutex_destroy(&hdev->vport_lock);
8128 	hclge_uninit_vport_mac_table(hdev);
8129 	hclge_uninit_vport_vlan_table(hdev);
8130 	mutex_destroy(&hdev->vport_cfg_mutex);
8131 	ae_dev->priv = NULL;
8132 }
8133 
8134 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8135 {
8136 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8137 	struct hclge_vport *vport = hclge_get_vport(handle);
8138 	struct hclge_dev *hdev = vport->back;
8139 
8140 	return min_t(u32, hdev->rss_size_max,
8141 		     vport->alloc_tqps / kinfo->num_tc);
8142 }
8143 
8144 static void hclge_get_channels(struct hnae3_handle *handle,
8145 			       struct ethtool_channels *ch)
8146 {
8147 	ch->max_combined = hclge_get_max_channels(handle);
8148 	ch->other_count = 1;
8149 	ch->max_other = 1;
8150 	ch->combined_count = handle->kinfo.rss_size;
8151 }
8152 
8153 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8154 					u16 *alloc_tqps, u16 *max_rss_size)
8155 {
8156 	struct hclge_vport *vport = hclge_get_vport(handle);
8157 	struct hclge_dev *hdev = vport->back;
8158 
8159 	*alloc_tqps = vport->alloc_tqps;
8160 	*max_rss_size = hdev->rss_size_max;
8161 }
8162 
8163 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8164 			      bool rxfh_configured)
8165 {
8166 	struct hclge_vport *vport = hclge_get_vport(handle);
8167 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8168 	struct hclge_dev *hdev = vport->back;
8169 	int cur_rss_size = kinfo->rss_size;
8170 	int cur_tqps = kinfo->num_tqps;
8171 	u16 tc_offset[HCLGE_MAX_TC_NUM];
8172 	u16 tc_valid[HCLGE_MAX_TC_NUM];
8173 	u16 tc_size[HCLGE_MAX_TC_NUM];
8174 	u16 roundup_size;
8175 	u32 *rss_indir;
8176 	int ret, i;
8177 
8178 	kinfo->req_rss_size = new_tqps_num;
8179 
8180 	ret = hclge_tm_vport_map_update(hdev);
8181 	if (ret) {
8182 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8183 		return ret;
8184 	}
8185 
8186 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
8187 	roundup_size = ilog2(roundup_size);
8188 	/* Set the RSS TC mode according to the new RSS size */
8189 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8190 		tc_valid[i] = 0;
8191 
8192 		if (!(hdev->hw_tc_map & BIT(i)))
8193 			continue;
8194 
8195 		tc_valid[i] = 1;
8196 		tc_size[i] = roundup_size;
8197 		tc_offset[i] = kinfo->rss_size * i;
8198 	}
8199 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8200 	if (ret)
8201 		return ret;
8202 
8203 	/* RSS indirection table has been configuared by user */
8204 	if (rxfh_configured)
8205 		goto out;
8206 
8207 	/* Reinitializes the rss indirect table according to the new RSS size */
8208 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8209 	if (!rss_indir)
8210 		return -ENOMEM;
8211 
8212 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8213 		rss_indir[i] = i % kinfo->rss_size;
8214 
8215 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8216 	if (ret)
8217 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8218 			ret);
8219 
8220 	kfree(rss_indir);
8221 
8222 out:
8223 	if (!ret)
8224 		dev_info(&hdev->pdev->dev,
8225 			 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8226 			 cur_rss_size, kinfo->rss_size,
8227 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
8228 
8229 	return ret;
8230 }
8231 
8232 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8233 			      u32 *regs_num_64_bit)
8234 {
8235 	struct hclge_desc desc;
8236 	u32 total_num;
8237 	int ret;
8238 
8239 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8240 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8241 	if (ret) {
8242 		dev_err(&hdev->pdev->dev,
8243 			"Query register number cmd failed, ret = %d.\n", ret);
8244 		return ret;
8245 	}
8246 
8247 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
8248 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
8249 
8250 	total_num = *regs_num_32_bit + *regs_num_64_bit;
8251 	if (!total_num)
8252 		return -EINVAL;
8253 
8254 	return 0;
8255 }
8256 
8257 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8258 				 void *data)
8259 {
8260 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8261 
8262 	struct hclge_desc *desc;
8263 	u32 *reg_val = data;
8264 	__le32 *desc_data;
8265 	int cmd_num;
8266 	int i, k, n;
8267 	int ret;
8268 
8269 	if (regs_num == 0)
8270 		return 0;
8271 
8272 	cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8273 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8274 	if (!desc)
8275 		return -ENOMEM;
8276 
8277 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8278 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8279 	if (ret) {
8280 		dev_err(&hdev->pdev->dev,
8281 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
8282 		kfree(desc);
8283 		return ret;
8284 	}
8285 
8286 	for (i = 0; i < cmd_num; i++) {
8287 		if (i == 0) {
8288 			desc_data = (__le32 *)(&desc[i].data[0]);
8289 			n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8290 		} else {
8291 			desc_data = (__le32 *)(&desc[i]);
8292 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
8293 		}
8294 		for (k = 0; k < n; k++) {
8295 			*reg_val++ = le32_to_cpu(*desc_data++);
8296 
8297 			regs_num--;
8298 			if (!regs_num)
8299 				break;
8300 		}
8301 	}
8302 
8303 	kfree(desc);
8304 	return 0;
8305 }
8306 
8307 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8308 				 void *data)
8309 {
8310 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8311 
8312 	struct hclge_desc *desc;
8313 	u64 *reg_val = data;
8314 	__le64 *desc_data;
8315 	int cmd_num;
8316 	int i, k, n;
8317 	int ret;
8318 
8319 	if (regs_num == 0)
8320 		return 0;
8321 
8322 	cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8323 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8324 	if (!desc)
8325 		return -ENOMEM;
8326 
8327 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8328 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8329 	if (ret) {
8330 		dev_err(&hdev->pdev->dev,
8331 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
8332 		kfree(desc);
8333 		return ret;
8334 	}
8335 
8336 	for (i = 0; i < cmd_num; i++) {
8337 		if (i == 0) {
8338 			desc_data = (__le64 *)(&desc[i].data[0]);
8339 			n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8340 		} else {
8341 			desc_data = (__le64 *)(&desc[i]);
8342 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
8343 		}
8344 		for (k = 0; k < n; k++) {
8345 			*reg_val++ = le64_to_cpu(*desc_data++);
8346 
8347 			regs_num--;
8348 			if (!regs_num)
8349 				break;
8350 		}
8351 	}
8352 
8353 	kfree(desc);
8354 	return 0;
8355 }
8356 
8357 #define MAX_SEPARATE_NUM	4
8358 #define SEPARATOR_VALUE		0xFFFFFFFF
8359 #define REG_NUM_PER_LINE	4
8360 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
8361 
8362 static int hclge_get_regs_len(struct hnae3_handle *handle)
8363 {
8364 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8365 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8366 	struct hclge_vport *vport = hclge_get_vport(handle);
8367 	struct hclge_dev *hdev = vport->back;
8368 	u32 regs_num_32_bit, regs_num_64_bit;
8369 	int ret;
8370 
8371 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8372 	if (ret) {
8373 		dev_err(&hdev->pdev->dev,
8374 			"Get register number failed, ret = %d.\n", ret);
8375 		return -EOPNOTSUPP;
8376 	}
8377 
8378 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8379 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8380 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8381 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8382 
8383 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8384 		tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8385 		regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8386 }
8387 
8388 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8389 			   void *data)
8390 {
8391 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8392 	struct hclge_vport *vport = hclge_get_vport(handle);
8393 	struct hclge_dev *hdev = vport->back;
8394 	u32 regs_num_32_bit, regs_num_64_bit;
8395 	int i, j, reg_um, separator_num;
8396 	u32 *reg = data;
8397 	int ret;
8398 
8399 	*version = hdev->fw_version;
8400 
8401 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8402 	if (ret) {
8403 		dev_err(&hdev->pdev->dev,
8404 			"Get register number failed, ret = %d.\n", ret);
8405 		return;
8406 	}
8407 
8408 	/* fetching per-PF registers valus from PF PCIe register space */
8409 	reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
8410 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8411 	for (i = 0; i < reg_um; i++)
8412 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
8413 	for (i = 0; i < separator_num; i++)
8414 		*reg++ = SEPARATOR_VALUE;
8415 
8416 	reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
8417 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8418 	for (i = 0; i < reg_um; i++)
8419 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
8420 	for (i = 0; i < separator_num; i++)
8421 		*reg++ = SEPARATOR_VALUE;
8422 
8423 	reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
8424 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8425 	for (j = 0; j < kinfo->num_tqps; j++) {
8426 		for (i = 0; i < reg_um; i++)
8427 			*reg++ = hclge_read_dev(&hdev->hw,
8428 						ring_reg_addr_list[i] +
8429 						0x200 * j);
8430 		for (i = 0; i < separator_num; i++)
8431 			*reg++ = SEPARATOR_VALUE;
8432 	}
8433 
8434 	reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
8435 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8436 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
8437 		for (i = 0; i < reg_um; i++)
8438 			*reg++ = hclge_read_dev(&hdev->hw,
8439 						tqp_intr_reg_addr_list[i] +
8440 						4 * j);
8441 		for (i = 0; i < separator_num; i++)
8442 			*reg++ = SEPARATOR_VALUE;
8443 	}
8444 
8445 	/* fetching PF common registers values from firmware */
8446 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
8447 	if (ret) {
8448 		dev_err(&hdev->pdev->dev,
8449 			"Get 32 bit register failed, ret = %d.\n", ret);
8450 		return;
8451 	}
8452 
8453 	reg += regs_num_32_bit;
8454 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
8455 	if (ret)
8456 		dev_err(&hdev->pdev->dev,
8457 			"Get 64 bit register failed, ret = %d.\n", ret);
8458 }
8459 
8460 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
8461 {
8462 	struct hclge_set_led_state_cmd *req;
8463 	struct hclge_desc desc;
8464 	int ret;
8465 
8466 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
8467 
8468 	req = (struct hclge_set_led_state_cmd *)desc.data;
8469 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
8470 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
8471 
8472 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8473 	if (ret)
8474 		dev_err(&hdev->pdev->dev,
8475 			"Send set led state cmd error, ret =%d\n", ret);
8476 
8477 	return ret;
8478 }
8479 
8480 enum hclge_led_status {
8481 	HCLGE_LED_OFF,
8482 	HCLGE_LED_ON,
8483 	HCLGE_LED_NO_CHANGE = 0xFF,
8484 };
8485 
8486 static int hclge_set_led_id(struct hnae3_handle *handle,
8487 			    enum ethtool_phys_id_state status)
8488 {
8489 	struct hclge_vport *vport = hclge_get_vport(handle);
8490 	struct hclge_dev *hdev = vport->back;
8491 
8492 	switch (status) {
8493 	case ETHTOOL_ID_ACTIVE:
8494 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
8495 	case ETHTOOL_ID_INACTIVE:
8496 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
8497 	default:
8498 		return -EINVAL;
8499 	}
8500 }
8501 
8502 static void hclge_get_link_mode(struct hnae3_handle *handle,
8503 				unsigned long *supported,
8504 				unsigned long *advertising)
8505 {
8506 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
8507 	struct hclge_vport *vport = hclge_get_vport(handle);
8508 	struct hclge_dev *hdev = vport->back;
8509 	unsigned int idx = 0;
8510 
8511 	for (; idx < size; idx++) {
8512 		supported[idx] = hdev->hw.mac.supported[idx];
8513 		advertising[idx] = hdev->hw.mac.advertising[idx];
8514 	}
8515 }
8516 
8517 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
8518 {
8519 	struct hclge_vport *vport = hclge_get_vport(handle);
8520 	struct hclge_dev *hdev = vport->back;
8521 
8522 	return hclge_config_gro(hdev, enable);
8523 }
8524 
8525 static const struct hnae3_ae_ops hclge_ops = {
8526 	.init_ae_dev = hclge_init_ae_dev,
8527 	.uninit_ae_dev = hclge_uninit_ae_dev,
8528 	.flr_prepare = hclge_flr_prepare,
8529 	.flr_done = hclge_flr_done,
8530 	.init_client_instance = hclge_init_client_instance,
8531 	.uninit_client_instance = hclge_uninit_client_instance,
8532 	.map_ring_to_vector = hclge_map_ring_to_vector,
8533 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
8534 	.get_vector = hclge_get_vector,
8535 	.put_vector = hclge_put_vector,
8536 	.set_promisc_mode = hclge_set_promisc_mode,
8537 	.set_loopback = hclge_set_loopback,
8538 	.start = hclge_ae_start,
8539 	.stop = hclge_ae_stop,
8540 	.client_start = hclge_client_start,
8541 	.client_stop = hclge_client_stop,
8542 	.get_status = hclge_get_status,
8543 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
8544 	.update_speed_duplex_h = hclge_update_speed_duplex_h,
8545 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
8546 	.get_media_type = hclge_get_media_type,
8547 	.get_rss_key_size = hclge_get_rss_key_size,
8548 	.get_rss_indir_size = hclge_get_rss_indir_size,
8549 	.get_rss = hclge_get_rss,
8550 	.set_rss = hclge_set_rss,
8551 	.set_rss_tuple = hclge_set_rss_tuple,
8552 	.get_rss_tuple = hclge_get_rss_tuple,
8553 	.get_tc_size = hclge_get_tc_size,
8554 	.get_mac_addr = hclge_get_mac_addr,
8555 	.set_mac_addr = hclge_set_mac_addr,
8556 	.do_ioctl = hclge_do_ioctl,
8557 	.add_uc_addr = hclge_add_uc_addr,
8558 	.rm_uc_addr = hclge_rm_uc_addr,
8559 	.add_mc_addr = hclge_add_mc_addr,
8560 	.rm_mc_addr = hclge_rm_mc_addr,
8561 	.set_autoneg = hclge_set_autoneg,
8562 	.get_autoneg = hclge_get_autoneg,
8563 	.get_pauseparam = hclge_get_pauseparam,
8564 	.set_pauseparam = hclge_set_pauseparam,
8565 	.set_mtu = hclge_set_mtu,
8566 	.reset_queue = hclge_reset_tqp,
8567 	.get_stats = hclge_get_stats,
8568 	.get_mac_pause_stats = hclge_get_mac_pause_stat,
8569 	.update_stats = hclge_update_stats,
8570 	.get_strings = hclge_get_strings,
8571 	.get_sset_count = hclge_get_sset_count,
8572 	.get_fw_version = hclge_get_fw_version,
8573 	.get_mdix_mode = hclge_get_mdix_mode,
8574 	.enable_vlan_filter = hclge_enable_vlan_filter,
8575 	.set_vlan_filter = hclge_set_vlan_filter,
8576 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
8577 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
8578 	.reset_event = hclge_reset_event,
8579 	.set_default_reset_request = hclge_set_def_reset_request,
8580 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
8581 	.set_channels = hclge_set_channels,
8582 	.get_channels = hclge_get_channels,
8583 	.get_regs_len = hclge_get_regs_len,
8584 	.get_regs = hclge_get_regs,
8585 	.set_led_id = hclge_set_led_id,
8586 	.get_link_mode = hclge_get_link_mode,
8587 	.add_fd_entry = hclge_add_fd_entry,
8588 	.del_fd_entry = hclge_del_fd_entry,
8589 	.del_all_fd_entries = hclge_del_all_fd_entries,
8590 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
8591 	.get_fd_rule_info = hclge_get_fd_rule_info,
8592 	.get_fd_all_rules = hclge_get_all_rules,
8593 	.restore_fd_rules = hclge_restore_fd_entries,
8594 	.enable_fd = hclge_enable_fd,
8595 	.dbg_run_cmd = hclge_dbg_run_cmd,
8596 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
8597 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
8598 	.ae_dev_resetting = hclge_ae_dev_resetting,
8599 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
8600 	.set_gro_en = hclge_gro_en,
8601 	.get_global_queue_id = hclge_covert_handle_qid_global,
8602 	.set_timer_task = hclge_set_timer_task,
8603 	.mac_connect_phy = hclge_mac_connect_phy,
8604 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
8605 };
8606 
8607 static struct hnae3_ae_algo ae_algo = {
8608 	.ops = &hclge_ops,
8609 	.pdev_id_table = ae_algo_pci_tbl,
8610 };
8611 
8612 static int hclge_init(void)
8613 {
8614 	pr_info("%s is initializing\n", HCLGE_NAME);
8615 
8616 	hnae3_register_ae_algo(&ae_algo);
8617 
8618 	return 0;
8619 }
8620 
8621 static void hclge_exit(void)
8622 {
8623 	hnae3_unregister_ae_algo(&ae_algo);
8624 }
8625 module_init(hclge_init);
8626 module_exit(hclge_exit);
8627 
8628 MODULE_LICENSE("GPL");
8629 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8630 MODULE_DESCRIPTION("HCLGE Driver");
8631 MODULE_VERSION(HCLGE_MOD_VERSION);
8632