1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256
31 
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37 			       u16 *allocated_size, bool is_alloc);
38 
39 static struct hnae3_ae_algo ae_algo;
40 
41 static const struct pci_device_id ae_algo_pci_tbl[] = {
42 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
49 	/* required last entry */
50 	{0, }
51 };
52 
53 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
54 
55 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
56 					 HCLGE_CMDQ_TX_ADDR_H_REG,
57 					 HCLGE_CMDQ_TX_DEPTH_REG,
58 					 HCLGE_CMDQ_TX_TAIL_REG,
59 					 HCLGE_CMDQ_TX_HEAD_REG,
60 					 HCLGE_CMDQ_RX_ADDR_L_REG,
61 					 HCLGE_CMDQ_RX_ADDR_H_REG,
62 					 HCLGE_CMDQ_RX_DEPTH_REG,
63 					 HCLGE_CMDQ_RX_TAIL_REG,
64 					 HCLGE_CMDQ_RX_HEAD_REG,
65 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
66 					 HCLGE_CMDQ_INTR_STS_REG,
67 					 HCLGE_CMDQ_INTR_EN_REG,
68 					 HCLGE_CMDQ_INTR_GEN_REG};
69 
70 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
71 					   HCLGE_VECTOR0_OTER_EN_REG,
72 					   HCLGE_MISC_RESET_STS_REG,
73 					   HCLGE_MISC_VECTOR_INT_STS,
74 					   HCLGE_GLOBAL_RESET_REG,
75 					   HCLGE_FUN_RST_ING,
76 					   HCLGE_GRO_EN_REG};
77 
78 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
79 					 HCLGE_RING_RX_ADDR_H_REG,
80 					 HCLGE_RING_RX_BD_NUM_REG,
81 					 HCLGE_RING_RX_BD_LENGTH_REG,
82 					 HCLGE_RING_RX_MERGE_EN_REG,
83 					 HCLGE_RING_RX_TAIL_REG,
84 					 HCLGE_RING_RX_HEAD_REG,
85 					 HCLGE_RING_RX_FBD_NUM_REG,
86 					 HCLGE_RING_RX_OFFSET_REG,
87 					 HCLGE_RING_RX_FBD_OFFSET_REG,
88 					 HCLGE_RING_RX_STASH_REG,
89 					 HCLGE_RING_RX_BD_ERR_REG,
90 					 HCLGE_RING_TX_ADDR_L_REG,
91 					 HCLGE_RING_TX_ADDR_H_REG,
92 					 HCLGE_RING_TX_BD_NUM_REG,
93 					 HCLGE_RING_TX_PRIORITY_REG,
94 					 HCLGE_RING_TX_TC_REG,
95 					 HCLGE_RING_TX_MERGE_EN_REG,
96 					 HCLGE_RING_TX_TAIL_REG,
97 					 HCLGE_RING_TX_HEAD_REG,
98 					 HCLGE_RING_TX_FBD_NUM_REG,
99 					 HCLGE_RING_TX_OFFSET_REG,
100 					 HCLGE_RING_TX_EBD_NUM_REG,
101 					 HCLGE_RING_TX_EBD_OFFSET_REG,
102 					 HCLGE_RING_TX_BD_ERR_REG,
103 					 HCLGE_RING_EN_REG};
104 
105 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
106 					     HCLGE_TQP_INTR_GL0_REG,
107 					     HCLGE_TQP_INTR_GL1_REG,
108 					     HCLGE_TQP_INTR_GL2_REG,
109 					     HCLGE_TQP_INTR_RL_REG};
110 
111 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
112 	"App    Loopback test",
113 	"Serdes serial Loopback test",
114 	"Serdes parallel Loopback test",
115 	"Phy    Loopback test"
116 };
117 
118 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
119 	{"mac_tx_mac_pause_num",
120 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
121 	{"mac_rx_mac_pause_num",
122 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
123 	{"mac_tx_control_pkt_num",
124 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
125 	{"mac_rx_control_pkt_num",
126 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
127 	{"mac_tx_pfc_pkt_num",
128 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
129 	{"mac_tx_pfc_pri0_pkt_num",
130 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
131 	{"mac_tx_pfc_pri1_pkt_num",
132 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
133 	{"mac_tx_pfc_pri2_pkt_num",
134 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
135 	{"mac_tx_pfc_pri3_pkt_num",
136 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
137 	{"mac_tx_pfc_pri4_pkt_num",
138 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
139 	{"mac_tx_pfc_pri5_pkt_num",
140 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
141 	{"mac_tx_pfc_pri6_pkt_num",
142 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
143 	{"mac_tx_pfc_pri7_pkt_num",
144 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
145 	{"mac_rx_pfc_pkt_num",
146 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
147 	{"mac_rx_pfc_pri0_pkt_num",
148 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
149 	{"mac_rx_pfc_pri1_pkt_num",
150 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
151 	{"mac_rx_pfc_pri2_pkt_num",
152 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
153 	{"mac_rx_pfc_pri3_pkt_num",
154 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
155 	{"mac_rx_pfc_pri4_pkt_num",
156 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
157 	{"mac_rx_pfc_pri5_pkt_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
159 	{"mac_rx_pfc_pri6_pkt_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
161 	{"mac_rx_pfc_pri7_pkt_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
163 	{"mac_tx_total_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
165 	{"mac_tx_total_oct_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
167 	{"mac_tx_good_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
169 	{"mac_tx_bad_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
171 	{"mac_tx_good_oct_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
173 	{"mac_tx_bad_oct_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
175 	{"mac_tx_uni_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
177 	{"mac_tx_multi_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
179 	{"mac_tx_broad_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
181 	{"mac_tx_undersize_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
183 	{"mac_tx_oversize_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
185 	{"mac_tx_64_oct_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
187 	{"mac_tx_65_127_oct_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
189 	{"mac_tx_128_255_oct_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
191 	{"mac_tx_256_511_oct_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
193 	{"mac_tx_512_1023_oct_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
195 	{"mac_tx_1024_1518_oct_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
197 	{"mac_tx_1519_2047_oct_pkt_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
199 	{"mac_tx_2048_4095_oct_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
201 	{"mac_tx_4096_8191_oct_pkt_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
203 	{"mac_tx_8192_9216_oct_pkt_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
205 	{"mac_tx_9217_12287_oct_pkt_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
207 	{"mac_tx_12288_16383_oct_pkt_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
209 	{"mac_tx_1519_max_good_pkt_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
211 	{"mac_tx_1519_max_bad_pkt_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
213 	{"mac_rx_total_pkt_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
215 	{"mac_rx_total_oct_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
217 	{"mac_rx_good_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
219 	{"mac_rx_bad_pkt_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
221 	{"mac_rx_good_oct_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
223 	{"mac_rx_bad_oct_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
225 	{"mac_rx_uni_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
227 	{"mac_rx_multi_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
229 	{"mac_rx_broad_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
231 	{"mac_rx_undersize_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
233 	{"mac_rx_oversize_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
235 	{"mac_rx_64_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
237 	{"mac_rx_65_127_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
239 	{"mac_rx_128_255_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
241 	{"mac_rx_256_511_oct_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
243 	{"mac_rx_512_1023_oct_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
245 	{"mac_rx_1024_1518_oct_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
247 	{"mac_rx_1519_2047_oct_pkt_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
249 	{"mac_rx_2048_4095_oct_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
251 	{"mac_rx_4096_8191_oct_pkt_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
253 	{"mac_rx_8192_9216_oct_pkt_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
255 	{"mac_rx_9217_12287_oct_pkt_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
257 	{"mac_rx_12288_16383_oct_pkt_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
259 	{"mac_rx_1519_max_good_pkt_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
261 	{"mac_rx_1519_max_bad_pkt_num",
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
263 
264 	{"mac_tx_fragment_pkt_num",
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
266 	{"mac_tx_undermin_pkt_num",
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
268 	{"mac_tx_jabber_pkt_num",
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
270 	{"mac_tx_err_all_pkt_num",
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
272 	{"mac_tx_from_app_good_pkt_num",
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
274 	{"mac_tx_from_app_bad_pkt_num",
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
276 	{"mac_rx_fragment_pkt_num",
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
278 	{"mac_rx_undermin_pkt_num",
279 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
280 	{"mac_rx_jabber_pkt_num",
281 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
282 	{"mac_rx_fcs_err_pkt_num",
283 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
284 	{"mac_rx_send_app_good_pkt_num",
285 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
286 	{"mac_rx_send_app_bad_pkt_num",
287 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
288 };
289 
290 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
291 	{
292 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
293 		.ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
294 		.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
295 		.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
296 		.i_port_bitmap = 0x1,
297 	},
298 };
299 
300 static const u8 hclge_hash_key[] = {
301 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
302 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
303 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
304 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
305 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
306 };
307 
308 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
309 {
310 #define HCLGE_MAC_CMD_NUM 21
311 
312 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
313 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
314 	__le64 *desc_data;
315 	int i, k, n;
316 	int ret;
317 
318 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
319 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
320 	if (ret) {
321 		dev_err(&hdev->pdev->dev,
322 			"Get MAC pkt stats fail, status = %d.\n", ret);
323 
324 		return ret;
325 	}
326 
327 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
328 		/* for special opcode 0032, only the first desc has the head */
329 		if (unlikely(i == 0)) {
330 			desc_data = (__le64 *)(&desc[i].data[0]);
331 			n = HCLGE_RD_FIRST_STATS_NUM;
332 		} else {
333 			desc_data = (__le64 *)(&desc[i]);
334 			n = HCLGE_RD_OTHER_STATS_NUM;
335 		}
336 
337 		for (k = 0; k < n; k++) {
338 			*data += le64_to_cpu(*desc_data);
339 			data++;
340 			desc_data++;
341 		}
342 	}
343 
344 	return 0;
345 }
346 
347 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
348 {
349 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
350 	struct hclge_desc *desc;
351 	__le64 *desc_data;
352 	u16 i, k, n;
353 	int ret;
354 
355 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
356 	if (!desc)
357 		return -ENOMEM;
358 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
359 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
360 	if (ret) {
361 		kfree(desc);
362 		return ret;
363 	}
364 
365 	for (i = 0; i < desc_num; i++) {
366 		/* for special opcode 0034, only the first desc has the head */
367 		if (i == 0) {
368 			desc_data = (__le64 *)(&desc[i].data[0]);
369 			n = HCLGE_RD_FIRST_STATS_NUM;
370 		} else {
371 			desc_data = (__le64 *)(&desc[i]);
372 			n = HCLGE_RD_OTHER_STATS_NUM;
373 		}
374 
375 		for (k = 0; k < n; k++) {
376 			*data += le64_to_cpu(*desc_data);
377 			data++;
378 			desc_data++;
379 		}
380 	}
381 
382 	kfree(desc);
383 
384 	return 0;
385 }
386 
387 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
388 {
389 	struct hclge_desc desc;
390 	__le32 *desc_data;
391 	u32 reg_num;
392 	int ret;
393 
394 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
395 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
396 	if (ret)
397 		return ret;
398 
399 	desc_data = (__le32 *)(&desc.data[0]);
400 	reg_num = le32_to_cpu(*desc_data);
401 
402 	*desc_num = 1 + ((reg_num - 3) >> 2) +
403 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
404 
405 	return 0;
406 }
407 
408 static int hclge_mac_update_stats(struct hclge_dev *hdev)
409 {
410 	u32 desc_num;
411 	int ret;
412 
413 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
414 
415 	/* The firmware supports the new statistics acquisition method */
416 	if (!ret)
417 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
418 	else if (ret == -EOPNOTSUPP)
419 		ret = hclge_mac_update_stats_defective(hdev);
420 	else
421 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
422 
423 	return ret;
424 }
425 
426 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
427 {
428 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
429 	struct hclge_vport *vport = hclge_get_vport(handle);
430 	struct hclge_dev *hdev = vport->back;
431 	struct hnae3_queue *queue;
432 	struct hclge_desc desc[1];
433 	struct hclge_tqp *tqp;
434 	int ret, i;
435 
436 	for (i = 0; i < kinfo->num_tqps; i++) {
437 		queue = handle->kinfo.tqp[i];
438 		tqp = container_of(queue, struct hclge_tqp, q);
439 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
440 		hclge_cmd_setup_basic_desc(&desc[0],
441 					   HCLGE_OPC_QUERY_RX_STATUS,
442 					   true);
443 
444 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
445 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
446 		if (ret) {
447 			dev_err(&hdev->pdev->dev,
448 				"Query tqp stat fail, status = %d,queue = %d\n",
449 				ret,	i);
450 			return ret;
451 		}
452 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
453 			le32_to_cpu(desc[0].data[1]);
454 	}
455 
456 	for (i = 0; i < kinfo->num_tqps; i++) {
457 		queue = handle->kinfo.tqp[i];
458 		tqp = container_of(queue, struct hclge_tqp, q);
459 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
460 		hclge_cmd_setup_basic_desc(&desc[0],
461 					   HCLGE_OPC_QUERY_TX_STATUS,
462 					   true);
463 
464 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
465 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
466 		if (ret) {
467 			dev_err(&hdev->pdev->dev,
468 				"Query tqp stat fail, status = %d,queue = %d\n",
469 				ret, i);
470 			return ret;
471 		}
472 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
473 			le32_to_cpu(desc[0].data[1]);
474 	}
475 
476 	return 0;
477 }
478 
479 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
480 {
481 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
482 	struct hclge_tqp *tqp;
483 	u64 *buff = data;
484 	int i;
485 
486 	for (i = 0; i < kinfo->num_tqps; i++) {
487 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
488 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
489 	}
490 
491 	for (i = 0; i < kinfo->num_tqps; i++) {
492 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
493 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
494 	}
495 
496 	return buff;
497 }
498 
499 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
500 {
501 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
502 
503 	return kinfo->num_tqps * (2);
504 }
505 
506 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
507 {
508 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
509 	u8 *buff = data;
510 	int i = 0;
511 
512 	for (i = 0; i < kinfo->num_tqps; i++) {
513 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
514 			struct hclge_tqp, q);
515 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
516 			 tqp->index);
517 		buff = buff + ETH_GSTRING_LEN;
518 	}
519 
520 	for (i = 0; i < kinfo->num_tqps; i++) {
521 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
522 			struct hclge_tqp, q);
523 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
524 			 tqp->index);
525 		buff = buff + ETH_GSTRING_LEN;
526 	}
527 
528 	return buff;
529 }
530 
531 static u64 *hclge_comm_get_stats(void *comm_stats,
532 				 const struct hclge_comm_stats_str strs[],
533 				 int size, u64 *data)
534 {
535 	u64 *buf = data;
536 	u32 i;
537 
538 	for (i = 0; i < size; i++)
539 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
540 
541 	return buf + size;
542 }
543 
544 static u8 *hclge_comm_get_strings(u32 stringset,
545 				  const struct hclge_comm_stats_str strs[],
546 				  int size, u8 *data)
547 {
548 	char *buff = (char *)data;
549 	u32 i;
550 
551 	if (stringset != ETH_SS_STATS)
552 		return buff;
553 
554 	for (i = 0; i < size; i++) {
555 		snprintf(buff, ETH_GSTRING_LEN,
556 			 strs[i].desc);
557 		buff = buff + ETH_GSTRING_LEN;
558 	}
559 
560 	return (u8 *)buff;
561 }
562 
563 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
564 {
565 	struct hnae3_handle *handle;
566 	int status;
567 
568 	handle = &hdev->vport[0].nic;
569 	if (handle->client) {
570 		status = hclge_tqps_update_stats(handle);
571 		if (status) {
572 			dev_err(&hdev->pdev->dev,
573 				"Update TQPS stats fail, status = %d.\n",
574 				status);
575 		}
576 	}
577 
578 	status = hclge_mac_update_stats(hdev);
579 	if (status)
580 		dev_err(&hdev->pdev->dev,
581 			"Update MAC stats fail, status = %d.\n", status);
582 }
583 
584 static void hclge_update_stats(struct hnae3_handle *handle,
585 			       struct net_device_stats *net_stats)
586 {
587 	struct hclge_vport *vport = hclge_get_vport(handle);
588 	struct hclge_dev *hdev = vport->back;
589 	int status;
590 
591 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
592 		return;
593 
594 	status = hclge_mac_update_stats(hdev);
595 	if (status)
596 		dev_err(&hdev->pdev->dev,
597 			"Update MAC stats fail, status = %d.\n",
598 			status);
599 
600 	status = hclge_tqps_update_stats(handle);
601 	if (status)
602 		dev_err(&hdev->pdev->dev,
603 			"Update TQPS stats fail, status = %d.\n",
604 			status);
605 
606 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
607 }
608 
609 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
610 {
611 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
612 		HNAE3_SUPPORT_PHY_LOOPBACK |\
613 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
614 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
615 
616 	struct hclge_vport *vport = hclge_get_vport(handle);
617 	struct hclge_dev *hdev = vport->back;
618 	int count = 0;
619 
620 	/* Loopback test support rules:
621 	 * mac: only GE mode support
622 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
623 	 * phy: only support when phy device exist on board
624 	 */
625 	if (stringset == ETH_SS_TEST) {
626 		/* clear loopback bit flags at first */
627 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
628 		if (hdev->pdev->revision >= 0x21 ||
629 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
630 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
631 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
632 			count += 1;
633 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
634 		}
635 
636 		count += 2;
637 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
638 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
639 	} else if (stringset == ETH_SS_STATS) {
640 		count = ARRAY_SIZE(g_mac_stats_string) +
641 			hclge_tqps_get_sset_count(handle, stringset);
642 	}
643 
644 	return count;
645 }
646 
647 static void hclge_get_strings(struct hnae3_handle *handle,
648 			      u32 stringset,
649 			      u8 *data)
650 {
651 	u8 *p = (char *)data;
652 	int size;
653 
654 	if (stringset == ETH_SS_STATS) {
655 		size = ARRAY_SIZE(g_mac_stats_string);
656 		p = hclge_comm_get_strings(stringset,
657 					   g_mac_stats_string,
658 					   size,
659 					   p);
660 		p = hclge_tqps_get_strings(handle, p);
661 	} else if (stringset == ETH_SS_TEST) {
662 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
663 			memcpy(p,
664 			       hns3_nic_test_strs[HNAE3_LOOP_APP],
665 			       ETH_GSTRING_LEN);
666 			p += ETH_GSTRING_LEN;
667 		}
668 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
669 			memcpy(p,
670 			       hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
671 			       ETH_GSTRING_LEN);
672 			p += ETH_GSTRING_LEN;
673 		}
674 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
675 			memcpy(p,
676 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
677 			       ETH_GSTRING_LEN);
678 			p += ETH_GSTRING_LEN;
679 		}
680 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
681 			memcpy(p,
682 			       hns3_nic_test_strs[HNAE3_LOOP_PHY],
683 			       ETH_GSTRING_LEN);
684 			p += ETH_GSTRING_LEN;
685 		}
686 	}
687 }
688 
689 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
690 {
691 	struct hclge_vport *vport = hclge_get_vport(handle);
692 	struct hclge_dev *hdev = vport->back;
693 	u64 *p;
694 
695 	p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
696 				 g_mac_stats_string,
697 				 ARRAY_SIZE(g_mac_stats_string),
698 				 data);
699 	p = hclge_tqps_get_stats(handle, p);
700 }
701 
702 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
703 				     u64 *rx_cnt)
704 {
705 	struct hclge_vport *vport = hclge_get_vport(handle);
706 	struct hclge_dev *hdev = vport->back;
707 
708 	*tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
709 	*rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
710 }
711 
712 static int hclge_parse_func_status(struct hclge_dev *hdev,
713 				   struct hclge_func_status_cmd *status)
714 {
715 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
716 		return -EINVAL;
717 
718 	/* Set the pf to main pf */
719 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
720 		hdev->flag |= HCLGE_FLAG_MAIN;
721 	else
722 		hdev->flag &= ~HCLGE_FLAG_MAIN;
723 
724 	return 0;
725 }
726 
727 static int hclge_query_function_status(struct hclge_dev *hdev)
728 {
729 	struct hclge_func_status_cmd *req;
730 	struct hclge_desc desc;
731 	int timeout = 0;
732 	int ret;
733 
734 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
735 	req = (struct hclge_func_status_cmd *)desc.data;
736 
737 	do {
738 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
739 		if (ret) {
740 			dev_err(&hdev->pdev->dev,
741 				"query function status failed %d.\n",
742 				ret);
743 
744 			return ret;
745 		}
746 
747 		/* Check pf reset is done */
748 		if (req->pf_state)
749 			break;
750 		usleep_range(1000, 2000);
751 	} while (timeout++ < 5);
752 
753 	ret = hclge_parse_func_status(hdev, req);
754 
755 	return ret;
756 }
757 
758 static int hclge_query_pf_resource(struct hclge_dev *hdev)
759 {
760 	struct hclge_pf_res_cmd *req;
761 	struct hclge_desc desc;
762 	int ret;
763 
764 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
765 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
766 	if (ret) {
767 		dev_err(&hdev->pdev->dev,
768 			"query pf resource failed %d.\n", ret);
769 		return ret;
770 	}
771 
772 	req = (struct hclge_pf_res_cmd *)desc.data;
773 	hdev->num_tqps = __le16_to_cpu(req->tqp_num);
774 	hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
775 
776 	if (req->tx_buf_size)
777 		hdev->tx_buf_size =
778 			__le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
779 	else
780 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
781 
782 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
783 
784 	if (req->dv_buf_size)
785 		hdev->dv_buf_size =
786 			__le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
787 	else
788 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
789 
790 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
791 
792 	if (hnae3_dev_roce_supported(hdev)) {
793 		hdev->roce_base_msix_offset =
794 		hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
795 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
796 		hdev->num_roce_msi =
797 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
798 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
799 
800 		/* PF should have NIC vectors and Roce vectors,
801 		 * NIC vectors are queued before Roce vectors.
802 		 */
803 		hdev->num_msi = hdev->num_roce_msi  +
804 				hdev->roce_base_msix_offset;
805 	} else {
806 		hdev->num_msi =
807 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
808 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
809 	}
810 
811 	return 0;
812 }
813 
814 static int hclge_parse_speed(int speed_cmd, int *speed)
815 {
816 	switch (speed_cmd) {
817 	case 6:
818 		*speed = HCLGE_MAC_SPEED_10M;
819 		break;
820 	case 7:
821 		*speed = HCLGE_MAC_SPEED_100M;
822 		break;
823 	case 0:
824 		*speed = HCLGE_MAC_SPEED_1G;
825 		break;
826 	case 1:
827 		*speed = HCLGE_MAC_SPEED_10G;
828 		break;
829 	case 2:
830 		*speed = HCLGE_MAC_SPEED_25G;
831 		break;
832 	case 3:
833 		*speed = HCLGE_MAC_SPEED_40G;
834 		break;
835 	case 4:
836 		*speed = HCLGE_MAC_SPEED_50G;
837 		break;
838 	case 5:
839 		*speed = HCLGE_MAC_SPEED_100G;
840 		break;
841 	default:
842 		return -EINVAL;
843 	}
844 
845 	return 0;
846 }
847 
848 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
849 {
850 	struct hclge_vport *vport = hclge_get_vport(handle);
851 	struct hclge_dev *hdev = vport->back;
852 	u32 speed_ability = hdev->hw.mac.speed_ability;
853 	u32 speed_bit = 0;
854 
855 	switch (speed) {
856 	case HCLGE_MAC_SPEED_10M:
857 		speed_bit = HCLGE_SUPPORT_10M_BIT;
858 		break;
859 	case HCLGE_MAC_SPEED_100M:
860 		speed_bit = HCLGE_SUPPORT_100M_BIT;
861 		break;
862 	case HCLGE_MAC_SPEED_1G:
863 		speed_bit = HCLGE_SUPPORT_1G_BIT;
864 		break;
865 	case HCLGE_MAC_SPEED_10G:
866 		speed_bit = HCLGE_SUPPORT_10G_BIT;
867 		break;
868 	case HCLGE_MAC_SPEED_25G:
869 		speed_bit = HCLGE_SUPPORT_25G_BIT;
870 		break;
871 	case HCLGE_MAC_SPEED_40G:
872 		speed_bit = HCLGE_SUPPORT_40G_BIT;
873 		break;
874 	case HCLGE_MAC_SPEED_50G:
875 		speed_bit = HCLGE_SUPPORT_50G_BIT;
876 		break;
877 	case HCLGE_MAC_SPEED_100G:
878 		speed_bit = HCLGE_SUPPORT_100G_BIT;
879 		break;
880 	default:
881 		return -EINVAL;
882 	}
883 
884 	if (speed_bit & speed_ability)
885 		return 0;
886 
887 	return -EINVAL;
888 }
889 
890 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
891 {
892 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
893 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
894 				 mac->supported);
895 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
896 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
897 				 mac->supported);
898 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
899 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
900 				 mac->supported);
901 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
902 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
903 				 mac->supported);
904 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
905 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
906 				 mac->supported);
907 }
908 
909 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
910 {
911 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
912 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
913 				 mac->supported);
914 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
915 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
916 				 mac->supported);
917 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
918 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
919 				 mac->supported);
920 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
921 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
922 				 mac->supported);
923 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
924 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
925 				 mac->supported);
926 }
927 
928 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
929 {
930 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
931 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
932 				 mac->supported);
933 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
934 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
935 				 mac->supported);
936 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
937 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
938 				 mac->supported);
939 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
940 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
941 				 mac->supported);
942 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
943 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
944 				 mac->supported);
945 }
946 
947 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
948 {
949 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
950 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
951 				 mac->supported);
952 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
953 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
954 				 mac->supported);
955 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
956 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
957 				 mac->supported);
958 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
959 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
960 				 mac->supported);
961 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
962 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
963 				 mac->supported);
964 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
965 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
966 				 mac->supported);
967 }
968 
969 static void hclge_convert_setting_fec(struct hclge_mac *mac)
970 {
971 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
972 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
973 
974 	switch (mac->speed) {
975 	case HCLGE_MAC_SPEED_10G:
976 	case HCLGE_MAC_SPEED_40G:
977 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
978 				 mac->supported);
979 		mac->fec_ability =
980 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
981 		break;
982 	case HCLGE_MAC_SPEED_25G:
983 	case HCLGE_MAC_SPEED_50G:
984 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
985 				 mac->supported);
986 		mac->fec_ability =
987 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
988 			BIT(HNAE3_FEC_AUTO);
989 		break;
990 	case HCLGE_MAC_SPEED_100G:
991 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
992 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
993 		break;
994 	default:
995 		mac->fec_ability = 0;
996 		break;
997 	}
998 }
999 
1000 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1001 					u8 speed_ability)
1002 {
1003 	struct hclge_mac *mac = &hdev->hw.mac;
1004 
1005 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1006 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1007 				 mac->supported);
1008 
1009 	hclge_convert_setting_sr(mac, speed_ability);
1010 	hclge_convert_setting_lr(mac, speed_ability);
1011 	hclge_convert_setting_cr(mac, speed_ability);
1012 	if (hdev->pdev->revision >= 0x21)
1013 		hclge_convert_setting_fec(mac);
1014 
1015 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1016 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1017 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1018 }
1019 
1020 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1021 					    u8 speed_ability)
1022 {
1023 	struct hclge_mac *mac = &hdev->hw.mac;
1024 
1025 	hclge_convert_setting_kr(mac, speed_ability);
1026 	if (hdev->pdev->revision >= 0x21)
1027 		hclge_convert_setting_fec(mac);
1028 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1029 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1030 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1031 }
1032 
1033 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1034 					 u8 speed_ability)
1035 {
1036 	unsigned long *supported = hdev->hw.mac.supported;
1037 
1038 	/* default to support all speed for GE port */
1039 	if (!speed_ability)
1040 		speed_ability = HCLGE_SUPPORT_GE;
1041 
1042 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1043 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1044 				 supported);
1045 
1046 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1047 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1048 				 supported);
1049 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1050 				 supported);
1051 	}
1052 
1053 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1054 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1055 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1056 	}
1057 
1058 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1059 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1060 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1061 }
1062 
1063 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1064 {
1065 	u8 media_type = hdev->hw.mac.media_type;
1066 
1067 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1068 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1069 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1070 		hclge_parse_copper_link_mode(hdev, speed_ability);
1071 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1072 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1073 }
1074 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1075 {
1076 	struct hclge_cfg_param_cmd *req;
1077 	u64 mac_addr_tmp_high;
1078 	u64 mac_addr_tmp;
1079 	int i;
1080 
1081 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1082 
1083 	/* get the configuration */
1084 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1085 					      HCLGE_CFG_VMDQ_M,
1086 					      HCLGE_CFG_VMDQ_S);
1087 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1088 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1089 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1090 					    HCLGE_CFG_TQP_DESC_N_M,
1091 					    HCLGE_CFG_TQP_DESC_N_S);
1092 
1093 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1094 					HCLGE_CFG_PHY_ADDR_M,
1095 					HCLGE_CFG_PHY_ADDR_S);
1096 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1097 					  HCLGE_CFG_MEDIA_TP_M,
1098 					  HCLGE_CFG_MEDIA_TP_S);
1099 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1100 					  HCLGE_CFG_RX_BUF_LEN_M,
1101 					  HCLGE_CFG_RX_BUF_LEN_S);
1102 	/* get mac_address */
1103 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1104 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1105 					    HCLGE_CFG_MAC_ADDR_H_M,
1106 					    HCLGE_CFG_MAC_ADDR_H_S);
1107 
1108 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1109 
1110 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1111 					     HCLGE_CFG_DEFAULT_SPEED_M,
1112 					     HCLGE_CFG_DEFAULT_SPEED_S);
1113 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1114 					    HCLGE_CFG_RSS_SIZE_M,
1115 					    HCLGE_CFG_RSS_SIZE_S);
1116 
1117 	for (i = 0; i < ETH_ALEN; i++)
1118 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1119 
1120 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1121 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1122 
1123 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1124 					     HCLGE_CFG_SPEED_ABILITY_M,
1125 					     HCLGE_CFG_SPEED_ABILITY_S);
1126 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1127 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1128 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1129 	if (!cfg->umv_space)
1130 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1131 }
1132 
1133 /* hclge_get_cfg: query the static parameter from flash
1134  * @hdev: pointer to struct hclge_dev
1135  * @hcfg: the config structure to be getted
1136  */
1137 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1138 {
1139 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1140 	struct hclge_cfg_param_cmd *req;
1141 	int i, ret;
1142 
1143 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1144 		u32 offset = 0;
1145 
1146 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1147 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1148 					   true);
1149 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1150 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1151 		/* Len should be united by 4 bytes when send to hardware */
1152 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1153 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1154 		req->offset = cpu_to_le32(offset);
1155 	}
1156 
1157 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1158 	if (ret) {
1159 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1160 		return ret;
1161 	}
1162 
1163 	hclge_parse_cfg(hcfg, desc);
1164 
1165 	return 0;
1166 }
1167 
1168 static int hclge_get_cap(struct hclge_dev *hdev)
1169 {
1170 	int ret;
1171 
1172 	ret = hclge_query_function_status(hdev);
1173 	if (ret) {
1174 		dev_err(&hdev->pdev->dev,
1175 			"query function status error %d.\n", ret);
1176 		return ret;
1177 	}
1178 
1179 	/* get pf resource */
1180 	ret = hclge_query_pf_resource(hdev);
1181 	if (ret)
1182 		dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1183 
1184 	return ret;
1185 }
1186 
1187 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1188 {
1189 #define HCLGE_MIN_TX_DESC	64
1190 #define HCLGE_MIN_RX_DESC	64
1191 
1192 	if (!is_kdump_kernel())
1193 		return;
1194 
1195 	dev_info(&hdev->pdev->dev,
1196 		 "Running kdump kernel. Using minimal resources\n");
1197 
1198 	/* minimal queue pairs equals to the number of vports */
1199 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1200 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1201 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1202 }
1203 
1204 static int hclge_configure(struct hclge_dev *hdev)
1205 {
1206 	struct hclge_cfg cfg;
1207 	int ret, i;
1208 
1209 	ret = hclge_get_cfg(hdev, &cfg);
1210 	if (ret) {
1211 		dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1212 		return ret;
1213 	}
1214 
1215 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1216 	hdev->base_tqp_pid = 0;
1217 	hdev->rss_size_max = cfg.rss_size_max;
1218 	hdev->rx_buf_len = cfg.rx_buf_len;
1219 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1220 	hdev->hw.mac.media_type = cfg.media_type;
1221 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1222 	hdev->num_tx_desc = cfg.tqp_desc_num;
1223 	hdev->num_rx_desc = cfg.tqp_desc_num;
1224 	hdev->tm_info.num_pg = 1;
1225 	hdev->tc_max = cfg.tc_num;
1226 	hdev->tm_info.hw_pfc_map = 0;
1227 	hdev->wanted_umv_size = cfg.umv_space;
1228 
1229 	if (hnae3_dev_fd_supported(hdev))
1230 		hdev->fd_en = true;
1231 
1232 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1233 	if (ret) {
1234 		dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1235 		return ret;
1236 	}
1237 
1238 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1239 
1240 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1241 	    (hdev->tc_max < 1)) {
1242 		dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1243 			 hdev->tc_max);
1244 		hdev->tc_max = 1;
1245 	}
1246 
1247 	/* Dev does not support DCB */
1248 	if (!hnae3_dev_dcb_supported(hdev)) {
1249 		hdev->tc_max = 1;
1250 		hdev->pfc_max = 0;
1251 	} else {
1252 		hdev->pfc_max = hdev->tc_max;
1253 	}
1254 
1255 	hdev->tm_info.num_tc = 1;
1256 
1257 	/* Currently not support uncontiuous tc */
1258 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1259 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1260 
1261 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1262 
1263 	hclge_init_kdump_kernel_config(hdev);
1264 
1265 	return ret;
1266 }
1267 
1268 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1269 			    int tso_mss_max)
1270 {
1271 	struct hclge_cfg_tso_status_cmd *req;
1272 	struct hclge_desc desc;
1273 	u16 tso_mss;
1274 
1275 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1276 
1277 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1278 
1279 	tso_mss = 0;
1280 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1281 			HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1282 	req->tso_mss_min = cpu_to_le16(tso_mss);
1283 
1284 	tso_mss = 0;
1285 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1286 			HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1287 	req->tso_mss_max = cpu_to_le16(tso_mss);
1288 
1289 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1290 }
1291 
1292 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1293 {
1294 	struct hclge_cfg_gro_status_cmd *req;
1295 	struct hclge_desc desc;
1296 	int ret;
1297 
1298 	if (!hnae3_dev_gro_supported(hdev))
1299 		return 0;
1300 
1301 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1302 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1303 
1304 	req->gro_en = cpu_to_le16(en ? 1 : 0);
1305 
1306 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1307 	if (ret)
1308 		dev_err(&hdev->pdev->dev,
1309 			"GRO hardware config cmd failed, ret = %d\n", ret);
1310 
1311 	return ret;
1312 }
1313 
1314 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1315 {
1316 	struct hclge_tqp *tqp;
1317 	int i;
1318 
1319 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1320 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1321 	if (!hdev->htqp)
1322 		return -ENOMEM;
1323 
1324 	tqp = hdev->htqp;
1325 
1326 	for (i = 0; i < hdev->num_tqps; i++) {
1327 		tqp->dev = &hdev->pdev->dev;
1328 		tqp->index = i;
1329 
1330 		tqp->q.ae_algo = &ae_algo;
1331 		tqp->q.buf_size = hdev->rx_buf_len;
1332 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1333 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1334 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1335 			i * HCLGE_TQP_REG_SIZE;
1336 
1337 		tqp++;
1338 	}
1339 
1340 	return 0;
1341 }
1342 
1343 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1344 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1345 {
1346 	struct hclge_tqp_map_cmd *req;
1347 	struct hclge_desc desc;
1348 	int ret;
1349 
1350 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1351 
1352 	req = (struct hclge_tqp_map_cmd *)desc.data;
1353 	req->tqp_id = cpu_to_le16(tqp_pid);
1354 	req->tqp_vf = func_id;
1355 	req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1356 			1 << HCLGE_TQP_MAP_EN_B;
1357 	req->tqp_vid = cpu_to_le16(tqp_vid);
1358 
1359 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1360 	if (ret)
1361 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1362 
1363 	return ret;
1364 }
1365 
1366 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1367 {
1368 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1369 	struct hclge_dev *hdev = vport->back;
1370 	int i, alloced;
1371 
1372 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1373 	     alloced < num_tqps; i++) {
1374 		if (!hdev->htqp[i].alloced) {
1375 			hdev->htqp[i].q.handle = &vport->nic;
1376 			hdev->htqp[i].q.tqp_index = alloced;
1377 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1378 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1379 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1380 			hdev->htqp[i].alloced = true;
1381 			alloced++;
1382 		}
1383 	}
1384 	vport->alloc_tqps = alloced;
1385 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1386 				vport->alloc_tqps / hdev->tm_info.num_tc);
1387 
1388 	return 0;
1389 }
1390 
1391 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1392 			    u16 num_tx_desc, u16 num_rx_desc)
1393 
1394 {
1395 	struct hnae3_handle *nic = &vport->nic;
1396 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1397 	struct hclge_dev *hdev = vport->back;
1398 	int ret;
1399 
1400 	kinfo->num_tx_desc = num_tx_desc;
1401 	kinfo->num_rx_desc = num_rx_desc;
1402 
1403 	kinfo->rx_buf_len = hdev->rx_buf_len;
1404 
1405 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1406 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1407 	if (!kinfo->tqp)
1408 		return -ENOMEM;
1409 
1410 	ret = hclge_assign_tqp(vport, num_tqps);
1411 	if (ret)
1412 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1413 
1414 	return ret;
1415 }
1416 
1417 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1418 				  struct hclge_vport *vport)
1419 {
1420 	struct hnae3_handle *nic = &vport->nic;
1421 	struct hnae3_knic_private_info *kinfo;
1422 	u16 i;
1423 
1424 	kinfo = &nic->kinfo;
1425 	for (i = 0; i < vport->alloc_tqps; i++) {
1426 		struct hclge_tqp *q =
1427 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1428 		bool is_pf;
1429 		int ret;
1430 
1431 		is_pf = !(vport->vport_id);
1432 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1433 					     i, is_pf);
1434 		if (ret)
1435 			return ret;
1436 	}
1437 
1438 	return 0;
1439 }
1440 
1441 static int hclge_map_tqp(struct hclge_dev *hdev)
1442 {
1443 	struct hclge_vport *vport = hdev->vport;
1444 	u16 i, num_vport;
1445 
1446 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1447 	for (i = 0; i < num_vport; i++)	{
1448 		int ret;
1449 
1450 		ret = hclge_map_tqp_to_vport(hdev, vport);
1451 		if (ret)
1452 			return ret;
1453 
1454 		vport++;
1455 	}
1456 
1457 	return 0;
1458 }
1459 
1460 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1461 {
1462 	/* this would be initialized later */
1463 }
1464 
1465 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1466 {
1467 	struct hnae3_handle *nic = &vport->nic;
1468 	struct hclge_dev *hdev = vport->back;
1469 	int ret;
1470 
1471 	nic->pdev = hdev->pdev;
1472 	nic->ae_algo = &ae_algo;
1473 	nic->numa_node_mask = hdev->numa_node_mask;
1474 
1475 	if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1476 		ret = hclge_knic_setup(vport, num_tqps,
1477 				       hdev->num_tx_desc, hdev->num_rx_desc);
1478 
1479 		if (ret) {
1480 			dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1481 				ret);
1482 			return ret;
1483 		}
1484 	} else {
1485 		hclge_unic_setup(vport, num_tqps);
1486 	}
1487 
1488 	return 0;
1489 }
1490 
1491 static int hclge_alloc_vport(struct hclge_dev *hdev)
1492 {
1493 	struct pci_dev *pdev = hdev->pdev;
1494 	struct hclge_vport *vport;
1495 	u32 tqp_main_vport;
1496 	u32 tqp_per_vport;
1497 	int num_vport, i;
1498 	int ret;
1499 
1500 	/* We need to alloc a vport for main NIC of PF */
1501 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1502 
1503 	if (hdev->num_tqps < num_vport) {
1504 		dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1505 			hdev->num_tqps, num_vport);
1506 		return -EINVAL;
1507 	}
1508 
1509 	/* Alloc the same number of TQPs for every vport */
1510 	tqp_per_vport = hdev->num_tqps / num_vport;
1511 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1512 
1513 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1514 			     GFP_KERNEL);
1515 	if (!vport)
1516 		return -ENOMEM;
1517 
1518 	hdev->vport = vport;
1519 	hdev->num_alloc_vport = num_vport;
1520 
1521 	if (IS_ENABLED(CONFIG_PCI_IOV))
1522 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1523 
1524 	for (i = 0; i < num_vport; i++) {
1525 		vport->back = hdev;
1526 		vport->vport_id = i;
1527 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1528 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1529 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1530 		INIT_LIST_HEAD(&vport->vlan_list);
1531 		INIT_LIST_HEAD(&vport->uc_mac_list);
1532 		INIT_LIST_HEAD(&vport->mc_mac_list);
1533 
1534 		if (i == 0)
1535 			ret = hclge_vport_setup(vport, tqp_main_vport);
1536 		else
1537 			ret = hclge_vport_setup(vport, tqp_per_vport);
1538 		if (ret) {
1539 			dev_err(&pdev->dev,
1540 				"vport setup failed for vport %d, %d\n",
1541 				i, ret);
1542 			return ret;
1543 		}
1544 
1545 		vport++;
1546 	}
1547 
1548 	return 0;
1549 }
1550 
1551 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1552 				    struct hclge_pkt_buf_alloc *buf_alloc)
1553 {
1554 /* TX buffer size is unit by 128 byte */
1555 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1556 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1557 	struct hclge_tx_buff_alloc_cmd *req;
1558 	struct hclge_desc desc;
1559 	int ret;
1560 	u8 i;
1561 
1562 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1563 
1564 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1565 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1566 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1567 
1568 		req->tx_pkt_buff[i] =
1569 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1570 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1571 	}
1572 
1573 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1574 	if (ret)
1575 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1576 			ret);
1577 
1578 	return ret;
1579 }
1580 
1581 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1582 				 struct hclge_pkt_buf_alloc *buf_alloc)
1583 {
1584 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1585 
1586 	if (ret)
1587 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1588 
1589 	return ret;
1590 }
1591 
1592 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1593 {
1594 	int i, cnt = 0;
1595 
1596 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1597 		if (hdev->hw_tc_map & BIT(i))
1598 			cnt++;
1599 	return cnt;
1600 }
1601 
1602 /* Get the number of pfc enabled TCs, which have private buffer */
1603 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1604 				  struct hclge_pkt_buf_alloc *buf_alloc)
1605 {
1606 	struct hclge_priv_buf *priv;
1607 	int i, cnt = 0;
1608 
1609 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1610 		priv = &buf_alloc->priv_buf[i];
1611 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1612 		    priv->enable)
1613 			cnt++;
1614 	}
1615 
1616 	return cnt;
1617 }
1618 
1619 /* Get the number of pfc disabled TCs, which have private buffer */
1620 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1621 				     struct hclge_pkt_buf_alloc *buf_alloc)
1622 {
1623 	struct hclge_priv_buf *priv;
1624 	int i, cnt = 0;
1625 
1626 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1627 		priv = &buf_alloc->priv_buf[i];
1628 		if (hdev->hw_tc_map & BIT(i) &&
1629 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1630 		    priv->enable)
1631 			cnt++;
1632 	}
1633 
1634 	return cnt;
1635 }
1636 
1637 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1638 {
1639 	struct hclge_priv_buf *priv;
1640 	u32 rx_priv = 0;
1641 	int i;
1642 
1643 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1644 		priv = &buf_alloc->priv_buf[i];
1645 		if (priv->enable)
1646 			rx_priv += priv->buf_size;
1647 	}
1648 	return rx_priv;
1649 }
1650 
1651 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1652 {
1653 	u32 i, total_tx_size = 0;
1654 
1655 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1656 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1657 
1658 	return total_tx_size;
1659 }
1660 
1661 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1662 				struct hclge_pkt_buf_alloc *buf_alloc,
1663 				u32 rx_all)
1664 {
1665 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1666 	u32 tc_num = hclge_get_tc_num(hdev);
1667 	u32 shared_buf, aligned_mps;
1668 	u32 rx_priv;
1669 	int i;
1670 
1671 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1672 
1673 	if (hnae3_dev_dcb_supported(hdev))
1674 		shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1675 	else
1676 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1677 					+ hdev->dv_buf_size;
1678 
1679 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1680 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1681 			     HCLGE_BUF_SIZE_UNIT);
1682 
1683 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1684 	if (rx_all < rx_priv + shared_std)
1685 		return false;
1686 
1687 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1688 	buf_alloc->s_buf.buf_size = shared_buf;
1689 	if (hnae3_dev_dcb_supported(hdev)) {
1690 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1691 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1692 			- roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1693 	} else {
1694 		buf_alloc->s_buf.self.high = aligned_mps +
1695 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1696 		buf_alloc->s_buf.self.low = aligned_mps;
1697 	}
1698 
1699 	if (hnae3_dev_dcb_supported(hdev)) {
1700 		if (tc_num)
1701 			hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1702 		else
1703 			hi_thrd = shared_buf - hdev->dv_buf_size;
1704 
1705 		hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1706 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1707 		lo_thrd = hi_thrd - aligned_mps / 2;
1708 	} else {
1709 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1710 		lo_thrd = aligned_mps;
1711 	}
1712 
1713 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1714 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1715 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1716 	}
1717 
1718 	return true;
1719 }
1720 
1721 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1722 				struct hclge_pkt_buf_alloc *buf_alloc)
1723 {
1724 	u32 i, total_size;
1725 
1726 	total_size = hdev->pkt_buf_size;
1727 
1728 	/* alloc tx buffer for all enabled tc */
1729 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1730 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1731 
1732 		if (hdev->hw_tc_map & BIT(i)) {
1733 			if (total_size < hdev->tx_buf_size)
1734 				return -ENOMEM;
1735 
1736 			priv->tx_buf_size = hdev->tx_buf_size;
1737 		} else {
1738 			priv->tx_buf_size = 0;
1739 		}
1740 
1741 		total_size -= priv->tx_buf_size;
1742 	}
1743 
1744 	return 0;
1745 }
1746 
1747 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1748 				  struct hclge_pkt_buf_alloc *buf_alloc)
1749 {
1750 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1751 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1752 	int i;
1753 
1754 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1755 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1756 
1757 		priv->enable = 0;
1758 		priv->wl.low = 0;
1759 		priv->wl.high = 0;
1760 		priv->buf_size = 0;
1761 
1762 		if (!(hdev->hw_tc_map & BIT(i)))
1763 			continue;
1764 
1765 		priv->enable = 1;
1766 
1767 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1768 			priv->wl.low = max ? aligned_mps : 256;
1769 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
1770 						HCLGE_BUF_SIZE_UNIT);
1771 		} else {
1772 			priv->wl.low = 0;
1773 			priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1774 		}
1775 
1776 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1777 	}
1778 
1779 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1780 }
1781 
1782 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1783 					  struct hclge_pkt_buf_alloc *buf_alloc)
1784 {
1785 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1786 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1787 	int i;
1788 
1789 	/* let the last to be cleared first */
1790 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1791 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1792 
1793 		if (hdev->hw_tc_map & BIT(i) &&
1794 		    !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1795 			/* Clear the no pfc TC private buffer */
1796 			priv->wl.low = 0;
1797 			priv->wl.high = 0;
1798 			priv->buf_size = 0;
1799 			priv->enable = 0;
1800 			no_pfc_priv_num--;
1801 		}
1802 
1803 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1804 		    no_pfc_priv_num == 0)
1805 			break;
1806 	}
1807 
1808 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1809 }
1810 
1811 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1812 					struct hclge_pkt_buf_alloc *buf_alloc)
1813 {
1814 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1815 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1816 	int i;
1817 
1818 	/* let the last to be cleared first */
1819 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1820 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1821 
1822 		if (hdev->hw_tc_map & BIT(i) &&
1823 		    hdev->tm_info.hw_pfc_map & BIT(i)) {
1824 			/* Reduce the number of pfc TC with private buffer */
1825 			priv->wl.low = 0;
1826 			priv->enable = 0;
1827 			priv->wl.high = 0;
1828 			priv->buf_size = 0;
1829 			pfc_priv_num--;
1830 		}
1831 
1832 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1833 		    pfc_priv_num == 0)
1834 			break;
1835 	}
1836 
1837 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1838 }
1839 
1840 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1841  * @hdev: pointer to struct hclge_dev
1842  * @buf_alloc: pointer to buffer calculation data
1843  * @return: 0: calculate sucessful, negative: fail
1844  */
1845 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1846 				struct hclge_pkt_buf_alloc *buf_alloc)
1847 {
1848 	/* When DCB is not supported, rx private buffer is not allocated. */
1849 	if (!hnae3_dev_dcb_supported(hdev)) {
1850 		u32 rx_all = hdev->pkt_buf_size;
1851 
1852 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1853 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1854 			return -ENOMEM;
1855 
1856 		return 0;
1857 	}
1858 
1859 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1860 		return 0;
1861 
1862 	/* try to decrease the buffer size */
1863 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1864 		return 0;
1865 
1866 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1867 		return 0;
1868 
1869 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1870 		return 0;
1871 
1872 	return -ENOMEM;
1873 }
1874 
1875 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1876 				   struct hclge_pkt_buf_alloc *buf_alloc)
1877 {
1878 	struct hclge_rx_priv_buff_cmd *req;
1879 	struct hclge_desc desc;
1880 	int ret;
1881 	int i;
1882 
1883 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1884 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1885 
1886 	/* Alloc private buffer TCs */
1887 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1888 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1889 
1890 		req->buf_num[i] =
1891 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1892 		req->buf_num[i] |=
1893 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1894 	}
1895 
1896 	req->shared_buf =
1897 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1898 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
1899 
1900 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1901 	if (ret)
1902 		dev_err(&hdev->pdev->dev,
1903 			"rx private buffer alloc cmd failed %d\n", ret);
1904 
1905 	return ret;
1906 }
1907 
1908 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1909 				   struct hclge_pkt_buf_alloc *buf_alloc)
1910 {
1911 	struct hclge_rx_priv_wl_buf *req;
1912 	struct hclge_priv_buf *priv;
1913 	struct hclge_desc desc[2];
1914 	int i, j;
1915 	int ret;
1916 
1917 	for (i = 0; i < 2; i++) {
1918 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1919 					   false);
1920 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1921 
1922 		/* The first descriptor set the NEXT bit to 1 */
1923 		if (i == 0)
1924 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1925 		else
1926 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1927 
1928 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1929 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1930 
1931 			priv = &buf_alloc->priv_buf[idx];
1932 			req->tc_wl[j].high =
1933 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1934 			req->tc_wl[j].high |=
1935 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1936 			req->tc_wl[j].low =
1937 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1938 			req->tc_wl[j].low |=
1939 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1940 		}
1941 	}
1942 
1943 	/* Send 2 descriptor at one time */
1944 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1945 	if (ret)
1946 		dev_err(&hdev->pdev->dev,
1947 			"rx private waterline config cmd failed %d\n",
1948 			ret);
1949 	return ret;
1950 }
1951 
1952 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1953 				    struct hclge_pkt_buf_alloc *buf_alloc)
1954 {
1955 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1956 	struct hclge_rx_com_thrd *req;
1957 	struct hclge_desc desc[2];
1958 	struct hclge_tc_thrd *tc;
1959 	int i, j;
1960 	int ret;
1961 
1962 	for (i = 0; i < 2; i++) {
1963 		hclge_cmd_setup_basic_desc(&desc[i],
1964 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1965 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
1966 
1967 		/* The first descriptor set the NEXT bit to 1 */
1968 		if (i == 0)
1969 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1970 		else
1971 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1972 
1973 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1974 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1975 
1976 			req->com_thrd[j].high =
1977 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1978 			req->com_thrd[j].high |=
1979 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1980 			req->com_thrd[j].low =
1981 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1982 			req->com_thrd[j].low |=
1983 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1984 		}
1985 	}
1986 
1987 	/* Send 2 descriptors at one time */
1988 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
1989 	if (ret)
1990 		dev_err(&hdev->pdev->dev,
1991 			"common threshold config cmd failed %d\n", ret);
1992 	return ret;
1993 }
1994 
1995 static int hclge_common_wl_config(struct hclge_dev *hdev,
1996 				  struct hclge_pkt_buf_alloc *buf_alloc)
1997 {
1998 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1999 	struct hclge_rx_com_wl *req;
2000 	struct hclge_desc desc;
2001 	int ret;
2002 
2003 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2004 
2005 	req = (struct hclge_rx_com_wl *)desc.data;
2006 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2007 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2008 
2009 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2010 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2011 
2012 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2013 	if (ret)
2014 		dev_err(&hdev->pdev->dev,
2015 			"common waterline config cmd failed %d\n", ret);
2016 
2017 	return ret;
2018 }
2019 
2020 int hclge_buffer_alloc(struct hclge_dev *hdev)
2021 {
2022 	struct hclge_pkt_buf_alloc *pkt_buf;
2023 	int ret;
2024 
2025 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2026 	if (!pkt_buf)
2027 		return -ENOMEM;
2028 
2029 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2030 	if (ret) {
2031 		dev_err(&hdev->pdev->dev,
2032 			"could not calc tx buffer size for all TCs %d\n", ret);
2033 		goto out;
2034 	}
2035 
2036 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2037 	if (ret) {
2038 		dev_err(&hdev->pdev->dev,
2039 			"could not alloc tx buffers %d\n", ret);
2040 		goto out;
2041 	}
2042 
2043 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2044 	if (ret) {
2045 		dev_err(&hdev->pdev->dev,
2046 			"could not calc rx priv buffer size for all TCs %d\n",
2047 			ret);
2048 		goto out;
2049 	}
2050 
2051 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2052 	if (ret) {
2053 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2054 			ret);
2055 		goto out;
2056 	}
2057 
2058 	if (hnae3_dev_dcb_supported(hdev)) {
2059 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2060 		if (ret) {
2061 			dev_err(&hdev->pdev->dev,
2062 				"could not configure rx private waterline %d\n",
2063 				ret);
2064 			goto out;
2065 		}
2066 
2067 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2068 		if (ret) {
2069 			dev_err(&hdev->pdev->dev,
2070 				"could not configure common threshold %d\n",
2071 				ret);
2072 			goto out;
2073 		}
2074 	}
2075 
2076 	ret = hclge_common_wl_config(hdev, pkt_buf);
2077 	if (ret)
2078 		dev_err(&hdev->pdev->dev,
2079 			"could not configure common waterline %d\n", ret);
2080 
2081 out:
2082 	kfree(pkt_buf);
2083 	return ret;
2084 }
2085 
2086 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2087 {
2088 	struct hnae3_handle *roce = &vport->roce;
2089 	struct hnae3_handle *nic = &vport->nic;
2090 
2091 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2092 
2093 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2094 	    vport->back->num_msi_left == 0)
2095 		return -EINVAL;
2096 
2097 	roce->rinfo.base_vector = vport->back->roce_base_vector;
2098 
2099 	roce->rinfo.netdev = nic->kinfo.netdev;
2100 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
2101 
2102 	roce->pdev = nic->pdev;
2103 	roce->ae_algo = nic->ae_algo;
2104 	roce->numa_node_mask = nic->numa_node_mask;
2105 
2106 	return 0;
2107 }
2108 
2109 static int hclge_init_msi(struct hclge_dev *hdev)
2110 {
2111 	struct pci_dev *pdev = hdev->pdev;
2112 	int vectors;
2113 	int i;
2114 
2115 	vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2116 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2117 	if (vectors < 0) {
2118 		dev_err(&pdev->dev,
2119 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2120 			vectors);
2121 		return vectors;
2122 	}
2123 	if (vectors < hdev->num_msi)
2124 		dev_warn(&hdev->pdev->dev,
2125 			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2126 			 hdev->num_msi, vectors);
2127 
2128 	hdev->num_msi = vectors;
2129 	hdev->num_msi_left = vectors;
2130 	hdev->base_msi_vector = pdev->irq;
2131 	hdev->roce_base_vector = hdev->base_msi_vector +
2132 				hdev->roce_base_msix_offset;
2133 
2134 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2135 					   sizeof(u16), GFP_KERNEL);
2136 	if (!hdev->vector_status) {
2137 		pci_free_irq_vectors(pdev);
2138 		return -ENOMEM;
2139 	}
2140 
2141 	for (i = 0; i < hdev->num_msi; i++)
2142 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2143 
2144 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2145 					sizeof(int), GFP_KERNEL);
2146 	if (!hdev->vector_irq) {
2147 		pci_free_irq_vectors(pdev);
2148 		return -ENOMEM;
2149 	}
2150 
2151 	return 0;
2152 }
2153 
2154 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2155 {
2156 
2157 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2158 		duplex = HCLGE_MAC_FULL;
2159 
2160 	return duplex;
2161 }
2162 
2163 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2164 				      u8 duplex)
2165 {
2166 	struct hclge_config_mac_speed_dup_cmd *req;
2167 	struct hclge_desc desc;
2168 	int ret;
2169 
2170 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2171 
2172 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2173 
2174 	hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2175 
2176 	switch (speed) {
2177 	case HCLGE_MAC_SPEED_10M:
2178 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2179 				HCLGE_CFG_SPEED_S, 6);
2180 		break;
2181 	case HCLGE_MAC_SPEED_100M:
2182 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2183 				HCLGE_CFG_SPEED_S, 7);
2184 		break;
2185 	case HCLGE_MAC_SPEED_1G:
2186 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2187 				HCLGE_CFG_SPEED_S, 0);
2188 		break;
2189 	case HCLGE_MAC_SPEED_10G:
2190 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2191 				HCLGE_CFG_SPEED_S, 1);
2192 		break;
2193 	case HCLGE_MAC_SPEED_25G:
2194 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2195 				HCLGE_CFG_SPEED_S, 2);
2196 		break;
2197 	case HCLGE_MAC_SPEED_40G:
2198 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2199 				HCLGE_CFG_SPEED_S, 3);
2200 		break;
2201 	case HCLGE_MAC_SPEED_50G:
2202 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2203 				HCLGE_CFG_SPEED_S, 4);
2204 		break;
2205 	case HCLGE_MAC_SPEED_100G:
2206 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2207 				HCLGE_CFG_SPEED_S, 5);
2208 		break;
2209 	default:
2210 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2211 		return -EINVAL;
2212 	}
2213 
2214 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2215 		      1);
2216 
2217 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2218 	if (ret) {
2219 		dev_err(&hdev->pdev->dev,
2220 			"mac speed/duplex config cmd failed %d.\n", ret);
2221 		return ret;
2222 	}
2223 
2224 	return 0;
2225 }
2226 
2227 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2228 {
2229 	int ret;
2230 
2231 	duplex = hclge_check_speed_dup(duplex, speed);
2232 	if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2233 		return 0;
2234 
2235 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2236 	if (ret)
2237 		return ret;
2238 
2239 	hdev->hw.mac.speed = speed;
2240 	hdev->hw.mac.duplex = duplex;
2241 
2242 	return 0;
2243 }
2244 
2245 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2246 				     u8 duplex)
2247 {
2248 	struct hclge_vport *vport = hclge_get_vport(handle);
2249 	struct hclge_dev *hdev = vport->back;
2250 
2251 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2252 }
2253 
2254 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2255 {
2256 	struct hclge_config_auto_neg_cmd *req;
2257 	struct hclge_desc desc;
2258 	u32 flag = 0;
2259 	int ret;
2260 
2261 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2262 
2263 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2264 	hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2265 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2266 
2267 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2268 	if (ret)
2269 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2270 			ret);
2271 
2272 	return ret;
2273 }
2274 
2275 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2276 {
2277 	struct hclge_vport *vport = hclge_get_vport(handle);
2278 	struct hclge_dev *hdev = vport->back;
2279 
2280 	if (!hdev->hw.mac.support_autoneg) {
2281 		if (enable) {
2282 			dev_err(&hdev->pdev->dev,
2283 				"autoneg is not supported by current port\n");
2284 			return -EOPNOTSUPP;
2285 		} else {
2286 			return 0;
2287 		}
2288 	}
2289 
2290 	return hclge_set_autoneg_en(hdev, enable);
2291 }
2292 
2293 static int hclge_get_autoneg(struct hnae3_handle *handle)
2294 {
2295 	struct hclge_vport *vport = hclge_get_vport(handle);
2296 	struct hclge_dev *hdev = vport->back;
2297 	struct phy_device *phydev = hdev->hw.mac.phydev;
2298 
2299 	if (phydev)
2300 		return phydev->autoneg;
2301 
2302 	return hdev->hw.mac.autoneg;
2303 }
2304 
2305 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2306 {
2307 	struct hclge_vport *vport = hclge_get_vport(handle);
2308 	struct hclge_dev *hdev = vport->back;
2309 	int ret;
2310 
2311 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2312 
2313 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2314 	if (ret)
2315 		return ret;
2316 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2317 }
2318 
2319 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2320 {
2321 	struct hclge_config_fec_cmd *req;
2322 	struct hclge_desc desc;
2323 	int ret;
2324 
2325 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2326 
2327 	req = (struct hclge_config_fec_cmd *)desc.data;
2328 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2329 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2330 	if (fec_mode & BIT(HNAE3_FEC_RS))
2331 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2332 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2333 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2334 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2335 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2336 
2337 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2338 	if (ret)
2339 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2340 
2341 	return ret;
2342 }
2343 
2344 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2345 {
2346 	struct hclge_vport *vport = hclge_get_vport(handle);
2347 	struct hclge_dev *hdev = vport->back;
2348 	struct hclge_mac *mac = &hdev->hw.mac;
2349 	int ret;
2350 
2351 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2352 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2353 		return -EINVAL;
2354 	}
2355 
2356 	ret = hclge_set_fec_hw(hdev, fec_mode);
2357 	if (ret)
2358 		return ret;
2359 
2360 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2361 	return 0;
2362 }
2363 
2364 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2365 			  u8 *fec_mode)
2366 {
2367 	struct hclge_vport *vport = hclge_get_vport(handle);
2368 	struct hclge_dev *hdev = vport->back;
2369 	struct hclge_mac *mac = &hdev->hw.mac;
2370 
2371 	if (fec_ability)
2372 		*fec_ability = mac->fec_ability;
2373 	if (fec_mode)
2374 		*fec_mode = mac->fec_mode;
2375 }
2376 
2377 static int hclge_mac_init(struct hclge_dev *hdev)
2378 {
2379 	struct hclge_mac *mac = &hdev->hw.mac;
2380 	int ret;
2381 
2382 	hdev->support_sfp_query = true;
2383 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2384 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2385 					 hdev->hw.mac.duplex);
2386 	if (ret) {
2387 		dev_err(&hdev->pdev->dev,
2388 			"Config mac speed dup fail ret=%d\n", ret);
2389 		return ret;
2390 	}
2391 
2392 	mac->link = 0;
2393 
2394 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2395 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2396 		if (ret) {
2397 			dev_err(&hdev->pdev->dev,
2398 				"Fec mode init fail, ret = %d\n", ret);
2399 			return ret;
2400 		}
2401 	}
2402 
2403 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2404 	if (ret) {
2405 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2406 		return ret;
2407 	}
2408 
2409 	ret = hclge_buffer_alloc(hdev);
2410 	if (ret)
2411 		dev_err(&hdev->pdev->dev,
2412 			"allocate buffer fail, ret=%d\n", ret);
2413 
2414 	return ret;
2415 }
2416 
2417 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2418 {
2419 	if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2420 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2421 		schedule_work(&hdev->mbx_service_task);
2422 }
2423 
2424 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2425 {
2426 	if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2427 		schedule_work(&hdev->rst_service_task);
2428 }
2429 
2430 static void hclge_task_schedule(struct hclge_dev *hdev)
2431 {
2432 	if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2433 	    !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2434 	    !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2435 		(void)schedule_work(&hdev->service_task);
2436 }
2437 
2438 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2439 {
2440 	struct hclge_link_status_cmd *req;
2441 	struct hclge_desc desc;
2442 	int link_status;
2443 	int ret;
2444 
2445 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2446 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2447 	if (ret) {
2448 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2449 			ret);
2450 		return ret;
2451 	}
2452 
2453 	req = (struct hclge_link_status_cmd *)desc.data;
2454 	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2455 
2456 	return !!link_status;
2457 }
2458 
2459 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2460 {
2461 	int mac_state;
2462 	int link_stat;
2463 
2464 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2465 		return 0;
2466 
2467 	mac_state = hclge_get_mac_link_status(hdev);
2468 
2469 	if (hdev->hw.mac.phydev) {
2470 		if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2471 			link_stat = mac_state &
2472 				hdev->hw.mac.phydev->link;
2473 		else
2474 			link_stat = 0;
2475 
2476 	} else {
2477 		link_stat = mac_state;
2478 	}
2479 
2480 	return !!link_stat;
2481 }
2482 
2483 static void hclge_update_link_status(struct hclge_dev *hdev)
2484 {
2485 	struct hnae3_client *rclient = hdev->roce_client;
2486 	struct hnae3_client *client = hdev->nic_client;
2487 	struct hnae3_handle *rhandle;
2488 	struct hnae3_handle *handle;
2489 	int state;
2490 	int i;
2491 
2492 	if (!client)
2493 		return;
2494 	state = hclge_get_mac_phy_link(hdev);
2495 	if (state != hdev->hw.mac.link) {
2496 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2497 			handle = &hdev->vport[i].nic;
2498 			client->ops->link_status_change(handle, state);
2499 			hclge_config_mac_tnl_int(hdev, state);
2500 			rhandle = &hdev->vport[i].roce;
2501 			if (rclient && rclient->ops->link_status_change)
2502 				rclient->ops->link_status_change(rhandle,
2503 								 state);
2504 		}
2505 		hdev->hw.mac.link = state;
2506 	}
2507 }
2508 
2509 static void hclge_update_port_capability(struct hclge_mac *mac)
2510 {
2511 	/* firmware can not identify back plane type, the media type
2512 	 * read from configuration can help deal it
2513 	 */
2514 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2515 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2516 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2517 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2518 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2519 
2520 	if (mac->support_autoneg == true) {
2521 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2522 		linkmode_copy(mac->advertising, mac->supported);
2523 	} else {
2524 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2525 				   mac->supported);
2526 		linkmode_zero(mac->advertising);
2527 	}
2528 }
2529 
2530 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2531 {
2532 	struct hclge_sfp_info_cmd *resp = NULL;
2533 	struct hclge_desc desc;
2534 	int ret;
2535 
2536 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2537 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2538 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2539 	if (ret == -EOPNOTSUPP) {
2540 		dev_warn(&hdev->pdev->dev,
2541 			 "IMP do not support get SFP speed %d\n", ret);
2542 		return ret;
2543 	} else if (ret) {
2544 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2545 		return ret;
2546 	}
2547 
2548 	*speed = le32_to_cpu(resp->speed);
2549 
2550 	return 0;
2551 }
2552 
2553 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2554 {
2555 	struct hclge_sfp_info_cmd *resp;
2556 	struct hclge_desc desc;
2557 	int ret;
2558 
2559 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2560 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2561 
2562 	resp->query_type = QUERY_ACTIVE_SPEED;
2563 
2564 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2565 	if (ret == -EOPNOTSUPP) {
2566 		dev_warn(&hdev->pdev->dev,
2567 			 "IMP does not support get SFP info %d\n", ret);
2568 		return ret;
2569 	} else if (ret) {
2570 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2571 		return ret;
2572 	}
2573 
2574 	mac->speed = le32_to_cpu(resp->speed);
2575 	/* if resp->speed_ability is 0, it means it's an old version
2576 	 * firmware, do not update these params
2577 	 */
2578 	if (resp->speed_ability) {
2579 		mac->module_type = le32_to_cpu(resp->module_type);
2580 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
2581 		mac->autoneg = resp->autoneg;
2582 		mac->support_autoneg = resp->autoneg_ability;
2583 	} else {
2584 		mac->speed_type = QUERY_SFP_SPEED;
2585 	}
2586 
2587 	return 0;
2588 }
2589 
2590 static int hclge_update_port_info(struct hclge_dev *hdev)
2591 {
2592 	struct hclge_mac *mac = &hdev->hw.mac;
2593 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
2594 	int ret;
2595 
2596 	/* get the port info from SFP cmd if not copper port */
2597 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2598 		return 0;
2599 
2600 	/* if IMP does not support get SFP/qSFP info, return directly */
2601 	if (!hdev->support_sfp_query)
2602 		return 0;
2603 
2604 	if (hdev->pdev->revision >= 0x21)
2605 		ret = hclge_get_sfp_info(hdev, mac);
2606 	else
2607 		ret = hclge_get_sfp_speed(hdev, &speed);
2608 
2609 	if (ret == -EOPNOTSUPP) {
2610 		hdev->support_sfp_query = false;
2611 		return ret;
2612 	} else if (ret) {
2613 		return ret;
2614 	}
2615 
2616 	if (hdev->pdev->revision >= 0x21) {
2617 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2618 			hclge_update_port_capability(mac);
2619 			return 0;
2620 		}
2621 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2622 					       HCLGE_MAC_FULL);
2623 	} else {
2624 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2625 			return 0; /* do nothing if no SFP */
2626 
2627 		/* must config full duplex for SFP */
2628 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2629 	}
2630 }
2631 
2632 static int hclge_get_status(struct hnae3_handle *handle)
2633 {
2634 	struct hclge_vport *vport = hclge_get_vport(handle);
2635 	struct hclge_dev *hdev = vport->back;
2636 
2637 	hclge_update_link_status(hdev);
2638 
2639 	return hdev->hw.mac.link;
2640 }
2641 
2642 static void hclge_service_timer(struct timer_list *t)
2643 {
2644 	struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2645 
2646 	mod_timer(&hdev->service_timer, jiffies + HZ);
2647 	hdev->hw_stats.stats_timer++;
2648 	hclge_task_schedule(hdev);
2649 }
2650 
2651 static void hclge_service_complete(struct hclge_dev *hdev)
2652 {
2653 	WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2654 
2655 	/* Flush memory before next watchdog */
2656 	smp_mb__before_atomic();
2657 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2658 }
2659 
2660 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2661 {
2662 	u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2663 
2664 	/* fetch the events from their corresponding regs */
2665 	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2666 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2667 	msix_src_reg = hclge_read_dev(&hdev->hw,
2668 				      HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2669 
2670 	/* Assumption: If by any chance reset and mailbox events are reported
2671 	 * together then we will only process reset event in this go and will
2672 	 * defer the processing of the mailbox events. Since, we would have not
2673 	 * cleared RX CMDQ event this time we would receive again another
2674 	 * interrupt from H/W just for the mailbox.
2675 	 */
2676 
2677 	/* check for vector0 reset event sources */
2678 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2679 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2680 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2681 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2682 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2683 		hdev->rst_stats.imp_rst_cnt++;
2684 		return HCLGE_VECTOR0_EVENT_RST;
2685 	}
2686 
2687 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2688 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2689 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2690 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2691 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2692 		hdev->rst_stats.global_rst_cnt++;
2693 		return HCLGE_VECTOR0_EVENT_RST;
2694 	}
2695 
2696 	if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2697 		dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2698 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2699 		set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2700 		*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2701 		hdev->rst_stats.core_rst_cnt++;
2702 		return HCLGE_VECTOR0_EVENT_RST;
2703 	}
2704 
2705 	/* check for vector0 msix event source */
2706 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2707 		dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2708 			msix_src_reg);
2709 		return HCLGE_VECTOR0_EVENT_ERR;
2710 	}
2711 
2712 	/* check for vector0 mailbox(=CMDQ RX) event source */
2713 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2714 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2715 		*clearval = cmdq_src_reg;
2716 		return HCLGE_VECTOR0_EVENT_MBX;
2717 	}
2718 
2719 	/* print other vector0 event source */
2720 	dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2721 		cmdq_src_reg, msix_src_reg);
2722 	return HCLGE_VECTOR0_EVENT_OTHER;
2723 }
2724 
2725 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2726 				    u32 regclr)
2727 {
2728 	switch (event_type) {
2729 	case HCLGE_VECTOR0_EVENT_RST:
2730 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2731 		break;
2732 	case HCLGE_VECTOR0_EVENT_MBX:
2733 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2734 		break;
2735 	default:
2736 		break;
2737 	}
2738 }
2739 
2740 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2741 {
2742 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2743 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2744 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2745 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2746 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2747 }
2748 
2749 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2750 {
2751 	writel(enable ? 1 : 0, vector->addr);
2752 }
2753 
2754 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2755 {
2756 	struct hclge_dev *hdev = data;
2757 	u32 event_cause;
2758 	u32 clearval;
2759 
2760 	hclge_enable_vector(&hdev->misc_vector, false);
2761 	event_cause = hclge_check_event_cause(hdev, &clearval);
2762 
2763 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
2764 	switch (event_cause) {
2765 	case HCLGE_VECTOR0_EVENT_ERR:
2766 		/* we do not know what type of reset is required now. This could
2767 		 * only be decided after we fetch the type of errors which
2768 		 * caused this event. Therefore, we will do below for now:
2769 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2770 		 *    have defered type of reset to be used.
2771 		 * 2. Schedule the reset serivce task.
2772 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2773 		 *    will fetch the correct type of reset.  This would be done
2774 		 *    by first decoding the types of errors.
2775 		 */
2776 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2777 		/* fall through */
2778 	case HCLGE_VECTOR0_EVENT_RST:
2779 		hclge_reset_task_schedule(hdev);
2780 		break;
2781 	case HCLGE_VECTOR0_EVENT_MBX:
2782 		/* If we are here then,
2783 		 * 1. Either we are not handling any mbx task and we are not
2784 		 *    scheduled as well
2785 		 *                        OR
2786 		 * 2. We could be handling a mbx task but nothing more is
2787 		 *    scheduled.
2788 		 * In both cases, we should schedule mbx task as there are more
2789 		 * mbx messages reported by this interrupt.
2790 		 */
2791 		hclge_mbx_task_schedule(hdev);
2792 		break;
2793 	default:
2794 		dev_warn(&hdev->pdev->dev,
2795 			 "received unknown or unhandled event of vector0\n");
2796 		break;
2797 	}
2798 
2799 	/* clear the source of interrupt if it is not cause by reset */
2800 	if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2801 		hclge_clear_event_cause(hdev, event_cause, clearval);
2802 		hclge_enable_vector(&hdev->misc_vector, true);
2803 	}
2804 
2805 	return IRQ_HANDLED;
2806 }
2807 
2808 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2809 {
2810 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2811 		dev_warn(&hdev->pdev->dev,
2812 			 "vector(vector_id %d) has been freed.\n", vector_id);
2813 		return;
2814 	}
2815 
2816 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2817 	hdev->num_msi_left += 1;
2818 	hdev->num_msi_used -= 1;
2819 }
2820 
2821 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2822 {
2823 	struct hclge_misc_vector *vector = &hdev->misc_vector;
2824 
2825 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2826 
2827 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2828 	hdev->vector_status[0] = 0;
2829 
2830 	hdev->num_msi_left -= 1;
2831 	hdev->num_msi_used += 1;
2832 }
2833 
2834 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2835 {
2836 	int ret;
2837 
2838 	hclge_get_misc_vector(hdev);
2839 
2840 	/* this would be explicitly freed in the end */
2841 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2842 			  0, "hclge_misc", hdev);
2843 	if (ret) {
2844 		hclge_free_vector(hdev, 0);
2845 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2846 			hdev->misc_vector.vector_irq);
2847 	}
2848 
2849 	return ret;
2850 }
2851 
2852 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2853 {
2854 	free_irq(hdev->misc_vector.vector_irq, hdev);
2855 	hclge_free_vector(hdev, 0);
2856 }
2857 
2858 int hclge_notify_client(struct hclge_dev *hdev,
2859 			enum hnae3_reset_notify_type type)
2860 {
2861 	struct hnae3_client *client = hdev->nic_client;
2862 	u16 i;
2863 
2864 	if (!client->ops->reset_notify)
2865 		return -EOPNOTSUPP;
2866 
2867 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2868 		struct hnae3_handle *handle = &hdev->vport[i].nic;
2869 		int ret;
2870 
2871 		ret = client->ops->reset_notify(handle, type);
2872 		if (ret) {
2873 			dev_err(&hdev->pdev->dev,
2874 				"notify nic client failed %d(%d)\n", type, ret);
2875 			return ret;
2876 		}
2877 	}
2878 
2879 	return 0;
2880 }
2881 
2882 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2883 				    enum hnae3_reset_notify_type type)
2884 {
2885 	struct hnae3_client *client = hdev->roce_client;
2886 	int ret = 0;
2887 	u16 i;
2888 
2889 	if (!client)
2890 		return 0;
2891 
2892 	if (!client->ops->reset_notify)
2893 		return -EOPNOTSUPP;
2894 
2895 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2896 		struct hnae3_handle *handle = &hdev->vport[i].roce;
2897 
2898 		ret = client->ops->reset_notify(handle, type);
2899 		if (ret) {
2900 			dev_err(&hdev->pdev->dev,
2901 				"notify roce client failed %d(%d)",
2902 				type, ret);
2903 			return ret;
2904 		}
2905 	}
2906 
2907 	return ret;
2908 }
2909 
2910 static int hclge_reset_wait(struct hclge_dev *hdev)
2911 {
2912 #define HCLGE_RESET_WATI_MS	100
2913 #define HCLGE_RESET_WAIT_CNT	200
2914 	u32 val, reg, reg_bit;
2915 	u32 cnt = 0;
2916 
2917 	switch (hdev->reset_type) {
2918 	case HNAE3_IMP_RESET:
2919 		reg = HCLGE_GLOBAL_RESET_REG;
2920 		reg_bit = HCLGE_IMP_RESET_BIT;
2921 		break;
2922 	case HNAE3_GLOBAL_RESET:
2923 		reg = HCLGE_GLOBAL_RESET_REG;
2924 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
2925 		break;
2926 	case HNAE3_CORE_RESET:
2927 		reg = HCLGE_GLOBAL_RESET_REG;
2928 		reg_bit = HCLGE_CORE_RESET_BIT;
2929 		break;
2930 	case HNAE3_FUNC_RESET:
2931 		reg = HCLGE_FUN_RST_ING;
2932 		reg_bit = HCLGE_FUN_RST_ING_B;
2933 		break;
2934 	case HNAE3_FLR_RESET:
2935 		break;
2936 	default:
2937 		dev_err(&hdev->pdev->dev,
2938 			"Wait for unsupported reset type: %d\n",
2939 			hdev->reset_type);
2940 		return -EINVAL;
2941 	}
2942 
2943 	if (hdev->reset_type == HNAE3_FLR_RESET) {
2944 		while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2945 		       cnt++ < HCLGE_RESET_WAIT_CNT)
2946 			msleep(HCLGE_RESET_WATI_MS);
2947 
2948 		if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2949 			dev_err(&hdev->pdev->dev,
2950 				"flr wait timeout: %d\n", cnt);
2951 			return -EBUSY;
2952 		}
2953 
2954 		return 0;
2955 	}
2956 
2957 	val = hclge_read_dev(&hdev->hw, reg);
2958 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2959 		msleep(HCLGE_RESET_WATI_MS);
2960 		val = hclge_read_dev(&hdev->hw, reg);
2961 		cnt++;
2962 	}
2963 
2964 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
2965 		dev_warn(&hdev->pdev->dev,
2966 			 "Wait for reset timeout: %d\n", hdev->reset_type);
2967 		return -EBUSY;
2968 	}
2969 
2970 	return 0;
2971 }
2972 
2973 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2974 {
2975 	struct hclge_vf_rst_cmd *req;
2976 	struct hclge_desc desc;
2977 
2978 	req = (struct hclge_vf_rst_cmd *)desc.data;
2979 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2980 	req->dest_vfid = func_id;
2981 
2982 	if (reset)
2983 		req->vf_rst = 0x1;
2984 
2985 	return hclge_cmd_send(&hdev->hw, &desc, 1);
2986 }
2987 
2988 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2989 {
2990 	int i;
2991 
2992 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2993 		struct hclge_vport *vport = &hdev->vport[i];
2994 		int ret;
2995 
2996 		/* Send cmd to set/clear VF's FUNC_RST_ING */
2997 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2998 		if (ret) {
2999 			dev_err(&hdev->pdev->dev,
3000 				"set vf(%d) rst failed %d!\n",
3001 				vport->vport_id, ret);
3002 			return ret;
3003 		}
3004 
3005 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3006 			continue;
3007 
3008 		/* Inform VF to process the reset.
3009 		 * hclge_inform_reset_assert_to_vf may fail if VF
3010 		 * driver is not loaded.
3011 		 */
3012 		ret = hclge_inform_reset_assert_to_vf(vport);
3013 		if (ret)
3014 			dev_warn(&hdev->pdev->dev,
3015 				 "inform reset to vf(%d) failed %d!\n",
3016 				 vport->vport_id, ret);
3017 	}
3018 
3019 	return 0;
3020 }
3021 
3022 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3023 {
3024 	struct hclge_desc desc;
3025 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3026 	int ret;
3027 
3028 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3029 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3030 	req->fun_reset_vfid = func_id;
3031 
3032 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3033 	if (ret)
3034 		dev_err(&hdev->pdev->dev,
3035 			"send function reset cmd fail, status =%d\n", ret);
3036 
3037 	return ret;
3038 }
3039 
3040 static void hclge_do_reset(struct hclge_dev *hdev)
3041 {
3042 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3043 	struct pci_dev *pdev = hdev->pdev;
3044 	u32 val;
3045 
3046 	if (hclge_get_hw_reset_stat(handle)) {
3047 		dev_info(&pdev->dev, "Hardware reset not finish\n");
3048 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3049 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3050 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3051 		return;
3052 	}
3053 
3054 	switch (hdev->reset_type) {
3055 	case HNAE3_GLOBAL_RESET:
3056 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3057 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3058 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3059 		dev_info(&pdev->dev, "Global Reset requested\n");
3060 		break;
3061 	case HNAE3_CORE_RESET:
3062 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3063 		hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
3064 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3065 		dev_info(&pdev->dev, "Core Reset requested\n");
3066 		break;
3067 	case HNAE3_FUNC_RESET:
3068 		dev_info(&pdev->dev, "PF Reset requested\n");
3069 		/* schedule again to check later */
3070 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3071 		hclge_reset_task_schedule(hdev);
3072 		break;
3073 	case HNAE3_FLR_RESET:
3074 		dev_info(&pdev->dev, "FLR requested\n");
3075 		/* schedule again to check later */
3076 		set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3077 		hclge_reset_task_schedule(hdev);
3078 		break;
3079 	default:
3080 		dev_warn(&pdev->dev,
3081 			 "Unsupported reset type: %d\n", hdev->reset_type);
3082 		break;
3083 	}
3084 }
3085 
3086 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3087 						   unsigned long *addr)
3088 {
3089 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3090 
3091 	/* first, resolve any unknown reset type to the known type(s) */
3092 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3093 		/* we will intentionally ignore any errors from this function
3094 		 *  as we will end up in *some* reset request in any case
3095 		 */
3096 		hclge_handle_hw_msix_error(hdev, addr);
3097 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3098 		/* We defered the clearing of the error event which caused
3099 		 * interrupt since it was not posssible to do that in
3100 		 * interrupt context (and this is the reason we introduced
3101 		 * new UNKNOWN reset type). Now, the errors have been
3102 		 * handled and cleared in hardware we can safely enable
3103 		 * interrupts. This is an exception to the norm.
3104 		 */
3105 		hclge_enable_vector(&hdev->misc_vector, true);
3106 	}
3107 
3108 	/* return the highest priority reset level amongst all */
3109 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3110 		rst_level = HNAE3_IMP_RESET;
3111 		clear_bit(HNAE3_IMP_RESET, addr);
3112 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3113 		clear_bit(HNAE3_CORE_RESET, addr);
3114 		clear_bit(HNAE3_FUNC_RESET, addr);
3115 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3116 		rst_level = HNAE3_GLOBAL_RESET;
3117 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3118 		clear_bit(HNAE3_CORE_RESET, addr);
3119 		clear_bit(HNAE3_FUNC_RESET, addr);
3120 	} else if (test_bit(HNAE3_CORE_RESET, addr)) {
3121 		rst_level = HNAE3_CORE_RESET;
3122 		clear_bit(HNAE3_CORE_RESET, addr);
3123 		clear_bit(HNAE3_FUNC_RESET, addr);
3124 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3125 		rst_level = HNAE3_FUNC_RESET;
3126 		clear_bit(HNAE3_FUNC_RESET, addr);
3127 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3128 		rst_level = HNAE3_FLR_RESET;
3129 		clear_bit(HNAE3_FLR_RESET, addr);
3130 	}
3131 
3132 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3133 	    rst_level < hdev->reset_type)
3134 		return HNAE3_NONE_RESET;
3135 
3136 	return rst_level;
3137 }
3138 
3139 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3140 {
3141 	u32 clearval = 0;
3142 
3143 	switch (hdev->reset_type) {
3144 	case HNAE3_IMP_RESET:
3145 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3146 		break;
3147 	case HNAE3_GLOBAL_RESET:
3148 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3149 		break;
3150 	case HNAE3_CORE_RESET:
3151 		clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
3152 		break;
3153 	default:
3154 		break;
3155 	}
3156 
3157 	if (!clearval)
3158 		return;
3159 
3160 	hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3161 	hclge_enable_vector(&hdev->misc_vector, true);
3162 }
3163 
3164 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3165 {
3166 	int ret = 0;
3167 
3168 	switch (hdev->reset_type) {
3169 	case HNAE3_FUNC_RESET:
3170 		/* fall through */
3171 	case HNAE3_FLR_RESET:
3172 		ret = hclge_set_all_vf_rst(hdev, true);
3173 		break;
3174 	default:
3175 		break;
3176 	}
3177 
3178 	return ret;
3179 }
3180 
3181 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3182 {
3183 	u32 reg_val;
3184 	int ret = 0;
3185 
3186 	switch (hdev->reset_type) {
3187 	case HNAE3_FUNC_RESET:
3188 		/* There is no mechanism for PF to know if VF has stopped IO
3189 		 * for now, just wait 100 ms for VF to stop IO
3190 		 */
3191 		msleep(100);
3192 		ret = hclge_func_reset_cmd(hdev, 0);
3193 		if (ret) {
3194 			dev_err(&hdev->pdev->dev,
3195 				"asserting function reset fail %d!\n", ret);
3196 			return ret;
3197 		}
3198 
3199 		/* After performaning pf reset, it is not necessary to do the
3200 		 * mailbox handling or send any command to firmware, because
3201 		 * any mailbox handling or command to firmware is only valid
3202 		 * after hclge_cmd_init is called.
3203 		 */
3204 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3205 		hdev->rst_stats.pf_rst_cnt++;
3206 		break;
3207 	case HNAE3_FLR_RESET:
3208 		/* There is no mechanism for PF to know if VF has stopped IO
3209 		 * for now, just wait 100 ms for VF to stop IO
3210 		 */
3211 		msleep(100);
3212 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3213 		set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3214 		hdev->rst_stats.flr_rst_cnt++;
3215 		break;
3216 	case HNAE3_IMP_RESET:
3217 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3218 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3219 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3220 		break;
3221 	default:
3222 		break;
3223 	}
3224 
3225 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3226 
3227 	return ret;
3228 }
3229 
3230 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3231 {
3232 #define MAX_RESET_FAIL_CNT 5
3233 #define RESET_UPGRADE_DELAY_SEC 10
3234 
3235 	if (hdev->reset_pending) {
3236 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3237 			 hdev->reset_pending);
3238 		return true;
3239 	} else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3240 		   (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3241 		    BIT(HCLGE_IMP_RESET_BIT))) {
3242 		dev_info(&hdev->pdev->dev,
3243 			 "reset failed because IMP Reset is pending\n");
3244 		hclge_clear_reset_cause(hdev);
3245 		return false;
3246 	} else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3247 		hdev->reset_fail_cnt++;
3248 		if (is_timeout) {
3249 			set_bit(hdev->reset_type, &hdev->reset_pending);
3250 			dev_info(&hdev->pdev->dev,
3251 				 "re-schedule to wait for hw reset done\n");
3252 			return true;
3253 		}
3254 
3255 		dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3256 		hclge_clear_reset_cause(hdev);
3257 		mod_timer(&hdev->reset_timer,
3258 			  jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3259 
3260 		return false;
3261 	}
3262 
3263 	hclge_clear_reset_cause(hdev);
3264 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3265 	return false;
3266 }
3267 
3268 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3269 {
3270 	int ret = 0;
3271 
3272 	switch (hdev->reset_type) {
3273 	case HNAE3_FUNC_RESET:
3274 		/* fall through */
3275 	case HNAE3_FLR_RESET:
3276 		ret = hclge_set_all_vf_rst(hdev, false);
3277 		break;
3278 	default:
3279 		break;
3280 	}
3281 
3282 	return ret;
3283 }
3284 
3285 static void hclge_reset(struct hclge_dev *hdev)
3286 {
3287 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3288 	bool is_timeout = false;
3289 	int ret;
3290 
3291 	/* Initialize ae_dev reset status as well, in case enet layer wants to
3292 	 * know if device is undergoing reset
3293 	 */
3294 	ae_dev->reset_type = hdev->reset_type;
3295 	hdev->rst_stats.reset_cnt++;
3296 	/* perform reset of the stack & ae device for a client */
3297 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3298 	if (ret)
3299 		goto err_reset;
3300 
3301 	ret = hclge_reset_prepare_down(hdev);
3302 	if (ret)
3303 		goto err_reset;
3304 
3305 	rtnl_lock();
3306 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3307 	if (ret)
3308 		goto err_reset_lock;
3309 
3310 	rtnl_unlock();
3311 
3312 	ret = hclge_reset_prepare_wait(hdev);
3313 	if (ret)
3314 		goto err_reset;
3315 
3316 	if (hclge_reset_wait(hdev)) {
3317 		is_timeout = true;
3318 		goto err_reset;
3319 	}
3320 
3321 	hdev->rst_stats.hw_reset_done_cnt++;
3322 
3323 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3324 	if (ret)
3325 		goto err_reset;
3326 
3327 	rtnl_lock();
3328 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3329 	if (ret)
3330 		goto err_reset_lock;
3331 
3332 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3333 	if (ret)
3334 		goto err_reset_lock;
3335 
3336 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3337 	if (ret)
3338 		goto err_reset_lock;
3339 
3340 	ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3341 	if (ret)
3342 		goto err_reset_lock;
3343 
3344 	hclge_clear_reset_cause(hdev);
3345 
3346 	ret = hclge_reset_prepare_up(hdev);
3347 	if (ret)
3348 		goto err_reset_lock;
3349 
3350 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3351 	if (ret)
3352 		goto err_reset_lock;
3353 
3354 	rtnl_unlock();
3355 
3356 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3357 	if (ret)
3358 		goto err_reset;
3359 
3360 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3361 	if (ret)
3362 		goto err_reset;
3363 
3364 	hdev->last_reset_time = jiffies;
3365 	hdev->reset_fail_cnt = 0;
3366 	hdev->rst_stats.reset_done_cnt++;
3367 	ae_dev->reset_type = HNAE3_NONE_RESET;
3368 	del_timer(&hdev->reset_timer);
3369 
3370 	return;
3371 
3372 err_reset_lock:
3373 	rtnl_unlock();
3374 err_reset:
3375 	if (hclge_reset_err_handle(hdev, is_timeout))
3376 		hclge_reset_task_schedule(hdev);
3377 }
3378 
3379 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3380 {
3381 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3382 	struct hclge_dev *hdev = ae_dev->priv;
3383 
3384 	/* We might end up getting called broadly because of 2 below cases:
3385 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3386 	 *    normalcy is to reset.
3387 	 * 2. A new reset request from the stack due to timeout
3388 	 *
3389 	 * For the first case,error event might not have ae handle available.
3390 	 * check if this is a new reset request and we are not here just because
3391 	 * last reset attempt did not succeed and watchdog hit us again. We will
3392 	 * know this if last reset request did not occur very recently (watchdog
3393 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3394 	 * In case of new request we reset the "reset level" to PF reset.
3395 	 * And if it is a repeat reset request of the most recent one then we
3396 	 * want to make sure we throttle the reset request. Therefore, we will
3397 	 * not allow it again before 3*HZ times.
3398 	 */
3399 	if (!handle)
3400 		handle = &hdev->vport[0].nic;
3401 
3402 	if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3403 		return;
3404 	else if (hdev->default_reset_request)
3405 		hdev->reset_level =
3406 			hclge_get_reset_level(hdev,
3407 					      &hdev->default_reset_request);
3408 	else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3409 		hdev->reset_level = HNAE3_FUNC_RESET;
3410 
3411 	dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3412 		 hdev->reset_level);
3413 
3414 	/* request reset & schedule reset task */
3415 	set_bit(hdev->reset_level, &hdev->reset_request);
3416 	hclge_reset_task_schedule(hdev);
3417 
3418 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3419 		hdev->reset_level++;
3420 }
3421 
3422 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3423 					enum hnae3_reset_type rst_type)
3424 {
3425 	struct hclge_dev *hdev = ae_dev->priv;
3426 
3427 	set_bit(rst_type, &hdev->default_reset_request);
3428 }
3429 
3430 static void hclge_reset_timer(struct timer_list *t)
3431 {
3432 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3433 
3434 	dev_info(&hdev->pdev->dev,
3435 		 "triggering global reset in reset timer\n");
3436 	set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3437 	hclge_reset_event(hdev->pdev, NULL);
3438 }
3439 
3440 static void hclge_reset_subtask(struct hclge_dev *hdev)
3441 {
3442 	/* check if there is any ongoing reset in the hardware. This status can
3443 	 * be checked from reset_pending. If there is then, we need to wait for
3444 	 * hardware to complete reset.
3445 	 *    a. If we are able to figure out in reasonable time that hardware
3446 	 *       has fully resetted then, we can proceed with driver, client
3447 	 *       reset.
3448 	 *    b. else, we can come back later to check this status so re-sched
3449 	 *       now.
3450 	 */
3451 	hdev->last_reset_time = jiffies;
3452 	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3453 	if (hdev->reset_type != HNAE3_NONE_RESET)
3454 		hclge_reset(hdev);
3455 
3456 	/* check if we got any *new* reset requests to be honored */
3457 	hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3458 	if (hdev->reset_type != HNAE3_NONE_RESET)
3459 		hclge_do_reset(hdev);
3460 
3461 	hdev->reset_type = HNAE3_NONE_RESET;
3462 }
3463 
3464 static void hclge_reset_service_task(struct work_struct *work)
3465 {
3466 	struct hclge_dev *hdev =
3467 		container_of(work, struct hclge_dev, rst_service_task);
3468 
3469 	if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3470 		return;
3471 
3472 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3473 
3474 	hclge_reset_subtask(hdev);
3475 
3476 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3477 }
3478 
3479 static void hclge_mailbox_service_task(struct work_struct *work)
3480 {
3481 	struct hclge_dev *hdev =
3482 		container_of(work, struct hclge_dev, mbx_service_task);
3483 
3484 	if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3485 		return;
3486 
3487 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3488 
3489 	hclge_mbx_handler(hdev);
3490 
3491 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3492 }
3493 
3494 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3495 {
3496 	int i;
3497 
3498 	/* start from vport 1 for PF is always alive */
3499 	for (i = 1; i < hdev->num_alloc_vport; i++) {
3500 		struct hclge_vport *vport = &hdev->vport[i];
3501 
3502 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3503 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3504 
3505 		/* If vf is not alive, set to default value */
3506 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3507 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3508 	}
3509 }
3510 
3511 static void hclge_service_task(struct work_struct *work)
3512 {
3513 	struct hclge_dev *hdev =
3514 		container_of(work, struct hclge_dev, service_task);
3515 
3516 	if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3517 		hclge_update_stats_for_all(hdev);
3518 		hdev->hw_stats.stats_timer = 0;
3519 	}
3520 
3521 	hclge_update_port_info(hdev);
3522 	hclge_update_link_status(hdev);
3523 	hclge_update_vport_alive(hdev);
3524 	hclge_service_complete(hdev);
3525 }
3526 
3527 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3528 {
3529 	/* VF handle has no client */
3530 	if (!handle->client)
3531 		return container_of(handle, struct hclge_vport, nic);
3532 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
3533 		return container_of(handle, struct hclge_vport, roce);
3534 	else
3535 		return container_of(handle, struct hclge_vport, nic);
3536 }
3537 
3538 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3539 			    struct hnae3_vector_info *vector_info)
3540 {
3541 	struct hclge_vport *vport = hclge_get_vport(handle);
3542 	struct hnae3_vector_info *vector = vector_info;
3543 	struct hclge_dev *hdev = vport->back;
3544 	int alloc = 0;
3545 	int i, j;
3546 
3547 	vector_num = min(hdev->num_msi_left, vector_num);
3548 
3549 	for (j = 0; j < vector_num; j++) {
3550 		for (i = 1; i < hdev->num_msi; i++) {
3551 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3552 				vector->vector = pci_irq_vector(hdev->pdev, i);
3553 				vector->io_addr = hdev->hw.io_base +
3554 					HCLGE_VECTOR_REG_BASE +
3555 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
3556 					vport->vport_id *
3557 					HCLGE_VECTOR_VF_OFFSET;
3558 				hdev->vector_status[i] = vport->vport_id;
3559 				hdev->vector_irq[i] = vector->vector;
3560 
3561 				vector++;
3562 				alloc++;
3563 
3564 				break;
3565 			}
3566 		}
3567 	}
3568 	hdev->num_msi_left -= alloc;
3569 	hdev->num_msi_used += alloc;
3570 
3571 	return alloc;
3572 }
3573 
3574 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3575 {
3576 	int i;
3577 
3578 	for (i = 0; i < hdev->num_msi; i++)
3579 		if (vector == hdev->vector_irq[i])
3580 			return i;
3581 
3582 	return -EINVAL;
3583 }
3584 
3585 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3586 {
3587 	struct hclge_vport *vport = hclge_get_vport(handle);
3588 	struct hclge_dev *hdev = vport->back;
3589 	int vector_id;
3590 
3591 	vector_id = hclge_get_vector_index(hdev, vector);
3592 	if (vector_id < 0) {
3593 		dev_err(&hdev->pdev->dev,
3594 			"Get vector index fail. vector_id =%d\n", vector_id);
3595 		return vector_id;
3596 	}
3597 
3598 	hclge_free_vector(hdev, vector_id);
3599 
3600 	return 0;
3601 }
3602 
3603 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3604 {
3605 	return HCLGE_RSS_KEY_SIZE;
3606 }
3607 
3608 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3609 {
3610 	return HCLGE_RSS_IND_TBL_SIZE;
3611 }
3612 
3613 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3614 				  const u8 hfunc, const u8 *key)
3615 {
3616 	struct hclge_rss_config_cmd *req;
3617 	struct hclge_desc desc;
3618 	int key_offset;
3619 	int key_size;
3620 	int ret;
3621 
3622 	req = (struct hclge_rss_config_cmd *)desc.data;
3623 
3624 	for (key_offset = 0; key_offset < 3; key_offset++) {
3625 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3626 					   false);
3627 
3628 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3629 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3630 
3631 		if (key_offset == 2)
3632 			key_size =
3633 			HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3634 		else
3635 			key_size = HCLGE_RSS_HASH_KEY_NUM;
3636 
3637 		memcpy(req->hash_key,
3638 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3639 
3640 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3641 		if (ret) {
3642 			dev_err(&hdev->pdev->dev,
3643 				"Configure RSS config fail, status = %d\n",
3644 				ret);
3645 			return ret;
3646 		}
3647 	}
3648 	return 0;
3649 }
3650 
3651 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3652 {
3653 	struct hclge_rss_indirection_table_cmd *req;
3654 	struct hclge_desc desc;
3655 	int i, j;
3656 	int ret;
3657 
3658 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3659 
3660 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3661 		hclge_cmd_setup_basic_desc
3662 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3663 
3664 		req->start_table_index =
3665 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3666 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3667 
3668 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3669 			req->rss_result[j] =
3670 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3671 
3672 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3673 		if (ret) {
3674 			dev_err(&hdev->pdev->dev,
3675 				"Configure rss indir table fail,status = %d\n",
3676 				ret);
3677 			return ret;
3678 		}
3679 	}
3680 	return 0;
3681 }
3682 
3683 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3684 				 u16 *tc_size, u16 *tc_offset)
3685 {
3686 	struct hclge_rss_tc_mode_cmd *req;
3687 	struct hclge_desc desc;
3688 	int ret;
3689 	int i;
3690 
3691 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3692 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3693 
3694 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3695 		u16 mode = 0;
3696 
3697 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3698 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3699 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3700 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3701 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3702 
3703 		req->rss_tc_mode[i] = cpu_to_le16(mode);
3704 	}
3705 
3706 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3707 	if (ret)
3708 		dev_err(&hdev->pdev->dev,
3709 			"Configure rss tc mode fail, status = %d\n", ret);
3710 
3711 	return ret;
3712 }
3713 
3714 static void hclge_get_rss_type(struct hclge_vport *vport)
3715 {
3716 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
3717 	    vport->rss_tuple_sets.ipv4_udp_en ||
3718 	    vport->rss_tuple_sets.ipv4_sctp_en ||
3719 	    vport->rss_tuple_sets.ipv6_tcp_en ||
3720 	    vport->rss_tuple_sets.ipv6_udp_en ||
3721 	    vport->rss_tuple_sets.ipv6_sctp_en)
3722 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3723 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3724 		 vport->rss_tuple_sets.ipv6_fragment_en)
3725 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3726 	else
3727 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3728 }
3729 
3730 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3731 {
3732 	struct hclge_rss_input_tuple_cmd *req;
3733 	struct hclge_desc desc;
3734 	int ret;
3735 
3736 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3737 
3738 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3739 
3740 	/* Get the tuple cfg from pf */
3741 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3742 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3743 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3744 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3745 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3746 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3747 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3748 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3749 	hclge_get_rss_type(&hdev->vport[0]);
3750 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3751 	if (ret)
3752 		dev_err(&hdev->pdev->dev,
3753 			"Configure rss input fail, status = %d\n", ret);
3754 	return ret;
3755 }
3756 
3757 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3758 			 u8 *key, u8 *hfunc)
3759 {
3760 	struct hclge_vport *vport = hclge_get_vport(handle);
3761 	int i;
3762 
3763 	/* Get hash algorithm */
3764 	if (hfunc) {
3765 		switch (vport->rss_algo) {
3766 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3767 			*hfunc = ETH_RSS_HASH_TOP;
3768 			break;
3769 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
3770 			*hfunc = ETH_RSS_HASH_XOR;
3771 			break;
3772 		default:
3773 			*hfunc = ETH_RSS_HASH_UNKNOWN;
3774 			break;
3775 		}
3776 	}
3777 
3778 	/* Get the RSS Key required by the user */
3779 	if (key)
3780 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3781 
3782 	/* Get indirect table */
3783 	if (indir)
3784 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3785 			indir[i] =  vport->rss_indirection_tbl[i];
3786 
3787 	return 0;
3788 }
3789 
3790 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3791 			 const  u8 *key, const  u8 hfunc)
3792 {
3793 	struct hclge_vport *vport = hclge_get_vport(handle);
3794 	struct hclge_dev *hdev = vport->back;
3795 	u8 hash_algo;
3796 	int ret, i;
3797 
3798 	/* Set the RSS Hash Key if specififed by the user */
3799 	if (key) {
3800 		switch (hfunc) {
3801 		case ETH_RSS_HASH_TOP:
3802 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3803 			break;
3804 		case ETH_RSS_HASH_XOR:
3805 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3806 			break;
3807 		case ETH_RSS_HASH_NO_CHANGE:
3808 			hash_algo = vport->rss_algo;
3809 			break;
3810 		default:
3811 			return -EINVAL;
3812 		}
3813 
3814 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3815 		if (ret)
3816 			return ret;
3817 
3818 		/* Update the shadow RSS key with user specified qids */
3819 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3820 		vport->rss_algo = hash_algo;
3821 	}
3822 
3823 	/* Update the shadow RSS table with user specified qids */
3824 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3825 		vport->rss_indirection_tbl[i] = indir[i];
3826 
3827 	/* Update the hardware */
3828 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3829 }
3830 
3831 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3832 {
3833 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3834 
3835 	if (nfc->data & RXH_L4_B_2_3)
3836 		hash_sets |= HCLGE_D_PORT_BIT;
3837 	else
3838 		hash_sets &= ~HCLGE_D_PORT_BIT;
3839 
3840 	if (nfc->data & RXH_IP_SRC)
3841 		hash_sets |= HCLGE_S_IP_BIT;
3842 	else
3843 		hash_sets &= ~HCLGE_S_IP_BIT;
3844 
3845 	if (nfc->data & RXH_IP_DST)
3846 		hash_sets |= HCLGE_D_IP_BIT;
3847 	else
3848 		hash_sets &= ~HCLGE_D_IP_BIT;
3849 
3850 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3851 		hash_sets |= HCLGE_V_TAG_BIT;
3852 
3853 	return hash_sets;
3854 }
3855 
3856 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3857 			       struct ethtool_rxnfc *nfc)
3858 {
3859 	struct hclge_vport *vport = hclge_get_vport(handle);
3860 	struct hclge_dev *hdev = vport->back;
3861 	struct hclge_rss_input_tuple_cmd *req;
3862 	struct hclge_desc desc;
3863 	u8 tuple_sets;
3864 	int ret;
3865 
3866 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3867 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
3868 		return -EINVAL;
3869 
3870 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3871 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3872 
3873 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3874 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3875 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3876 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3877 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3878 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3879 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3880 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3881 
3882 	tuple_sets = hclge_get_rss_hash_bits(nfc);
3883 	switch (nfc->flow_type) {
3884 	case TCP_V4_FLOW:
3885 		req->ipv4_tcp_en = tuple_sets;
3886 		break;
3887 	case TCP_V6_FLOW:
3888 		req->ipv6_tcp_en = tuple_sets;
3889 		break;
3890 	case UDP_V4_FLOW:
3891 		req->ipv4_udp_en = tuple_sets;
3892 		break;
3893 	case UDP_V6_FLOW:
3894 		req->ipv6_udp_en = tuple_sets;
3895 		break;
3896 	case SCTP_V4_FLOW:
3897 		req->ipv4_sctp_en = tuple_sets;
3898 		break;
3899 	case SCTP_V6_FLOW:
3900 		if ((nfc->data & RXH_L4_B_0_1) ||
3901 		    (nfc->data & RXH_L4_B_2_3))
3902 			return -EINVAL;
3903 
3904 		req->ipv6_sctp_en = tuple_sets;
3905 		break;
3906 	case IPV4_FLOW:
3907 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3908 		break;
3909 	case IPV6_FLOW:
3910 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3911 		break;
3912 	default:
3913 		return -EINVAL;
3914 	}
3915 
3916 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3917 	if (ret) {
3918 		dev_err(&hdev->pdev->dev,
3919 			"Set rss tuple fail, status = %d\n", ret);
3920 		return ret;
3921 	}
3922 
3923 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3924 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3925 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3926 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3927 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3928 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3929 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3930 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3931 	hclge_get_rss_type(vport);
3932 	return 0;
3933 }
3934 
3935 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3936 			       struct ethtool_rxnfc *nfc)
3937 {
3938 	struct hclge_vport *vport = hclge_get_vport(handle);
3939 	u8 tuple_sets;
3940 
3941 	nfc->data = 0;
3942 
3943 	switch (nfc->flow_type) {
3944 	case TCP_V4_FLOW:
3945 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3946 		break;
3947 	case UDP_V4_FLOW:
3948 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3949 		break;
3950 	case TCP_V6_FLOW:
3951 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3952 		break;
3953 	case UDP_V6_FLOW:
3954 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3955 		break;
3956 	case SCTP_V4_FLOW:
3957 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3958 		break;
3959 	case SCTP_V6_FLOW:
3960 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3961 		break;
3962 	case IPV4_FLOW:
3963 	case IPV6_FLOW:
3964 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3965 		break;
3966 	default:
3967 		return -EINVAL;
3968 	}
3969 
3970 	if (!tuple_sets)
3971 		return 0;
3972 
3973 	if (tuple_sets & HCLGE_D_PORT_BIT)
3974 		nfc->data |= RXH_L4_B_2_3;
3975 	if (tuple_sets & HCLGE_S_PORT_BIT)
3976 		nfc->data |= RXH_L4_B_0_1;
3977 	if (tuple_sets & HCLGE_D_IP_BIT)
3978 		nfc->data |= RXH_IP_DST;
3979 	if (tuple_sets & HCLGE_S_IP_BIT)
3980 		nfc->data |= RXH_IP_SRC;
3981 
3982 	return 0;
3983 }
3984 
3985 static int hclge_get_tc_size(struct hnae3_handle *handle)
3986 {
3987 	struct hclge_vport *vport = hclge_get_vport(handle);
3988 	struct hclge_dev *hdev = vport->back;
3989 
3990 	return hdev->rss_size_max;
3991 }
3992 
3993 int hclge_rss_init_hw(struct hclge_dev *hdev)
3994 {
3995 	struct hclge_vport *vport = hdev->vport;
3996 	u8 *rss_indir = vport[0].rss_indirection_tbl;
3997 	u16 rss_size = vport[0].alloc_rss_size;
3998 	u8 *key = vport[0].rss_hash_key;
3999 	u8 hfunc = vport[0].rss_algo;
4000 	u16 tc_offset[HCLGE_MAX_TC_NUM];
4001 	u16 tc_valid[HCLGE_MAX_TC_NUM];
4002 	u16 tc_size[HCLGE_MAX_TC_NUM];
4003 	u16 roundup_size;
4004 	int i, ret;
4005 
4006 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4007 	if (ret)
4008 		return ret;
4009 
4010 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4011 	if (ret)
4012 		return ret;
4013 
4014 	ret = hclge_set_rss_input_tuple(hdev);
4015 	if (ret)
4016 		return ret;
4017 
4018 	/* Each TC have the same queue size, and tc_size set to hardware is
4019 	 * the log2 of roundup power of two of rss_size, the acutal queue
4020 	 * size is limited by indirection table.
4021 	 */
4022 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4023 		dev_err(&hdev->pdev->dev,
4024 			"Configure rss tc size failed, invalid TC_SIZE = %d\n",
4025 			rss_size);
4026 		return -EINVAL;
4027 	}
4028 
4029 	roundup_size = roundup_pow_of_two(rss_size);
4030 	roundup_size = ilog2(roundup_size);
4031 
4032 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4033 		tc_valid[i] = 0;
4034 
4035 		if (!(hdev->hw_tc_map & BIT(i)))
4036 			continue;
4037 
4038 		tc_valid[i] = 1;
4039 		tc_size[i] = roundup_size;
4040 		tc_offset[i] = rss_size * i;
4041 	}
4042 
4043 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4044 }
4045 
4046 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4047 {
4048 	struct hclge_vport *vport = hdev->vport;
4049 	int i, j;
4050 
4051 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4052 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4053 			vport[j].rss_indirection_tbl[i] =
4054 				i % vport[j].alloc_rss_size;
4055 	}
4056 }
4057 
4058 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4059 {
4060 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4061 	struct hclge_vport *vport = hdev->vport;
4062 
4063 	if (hdev->pdev->revision >= 0x21)
4064 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4065 
4066 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4067 		vport[i].rss_tuple_sets.ipv4_tcp_en =
4068 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4069 		vport[i].rss_tuple_sets.ipv4_udp_en =
4070 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4071 		vport[i].rss_tuple_sets.ipv4_sctp_en =
4072 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4073 		vport[i].rss_tuple_sets.ipv4_fragment_en =
4074 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4075 		vport[i].rss_tuple_sets.ipv6_tcp_en =
4076 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4077 		vport[i].rss_tuple_sets.ipv6_udp_en =
4078 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4079 		vport[i].rss_tuple_sets.ipv6_sctp_en =
4080 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4081 		vport[i].rss_tuple_sets.ipv6_fragment_en =
4082 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4083 
4084 		vport[i].rss_algo = rss_algo;
4085 
4086 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
4087 		       HCLGE_RSS_KEY_SIZE);
4088 	}
4089 
4090 	hclge_rss_indir_init_cfg(hdev);
4091 }
4092 
4093 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4094 				int vector_id, bool en,
4095 				struct hnae3_ring_chain_node *ring_chain)
4096 {
4097 	struct hclge_dev *hdev = vport->back;
4098 	struct hnae3_ring_chain_node *node;
4099 	struct hclge_desc desc;
4100 	struct hclge_ctrl_vector_chain_cmd *req
4101 		= (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4102 	enum hclge_cmd_status status;
4103 	enum hclge_opcode_type op;
4104 	u16 tqp_type_and_id;
4105 	int i;
4106 
4107 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4108 	hclge_cmd_setup_basic_desc(&desc, op, false);
4109 	req->int_vector_id = vector_id;
4110 
4111 	i = 0;
4112 	for (node = ring_chain; node; node = node->next) {
4113 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4114 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4115 				HCLGE_INT_TYPE_S,
4116 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4117 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4118 				HCLGE_TQP_ID_S, node->tqp_index);
4119 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4120 				HCLGE_INT_GL_IDX_S,
4121 				hnae3_get_field(node->int_gl_idx,
4122 						HNAE3_RING_GL_IDX_M,
4123 						HNAE3_RING_GL_IDX_S));
4124 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4125 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4126 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4127 			req->vfid = vport->vport_id;
4128 
4129 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
4130 			if (status) {
4131 				dev_err(&hdev->pdev->dev,
4132 					"Map TQP fail, status is %d.\n",
4133 					status);
4134 				return -EIO;
4135 			}
4136 			i = 0;
4137 
4138 			hclge_cmd_setup_basic_desc(&desc,
4139 						   op,
4140 						   false);
4141 			req->int_vector_id = vector_id;
4142 		}
4143 	}
4144 
4145 	if (i > 0) {
4146 		req->int_cause_num = i;
4147 		req->vfid = vport->vport_id;
4148 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
4149 		if (status) {
4150 			dev_err(&hdev->pdev->dev,
4151 				"Map TQP fail, status is %d.\n", status);
4152 			return -EIO;
4153 		}
4154 	}
4155 
4156 	return 0;
4157 }
4158 
4159 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4160 				    int vector,
4161 				    struct hnae3_ring_chain_node *ring_chain)
4162 {
4163 	struct hclge_vport *vport = hclge_get_vport(handle);
4164 	struct hclge_dev *hdev = vport->back;
4165 	int vector_id;
4166 
4167 	vector_id = hclge_get_vector_index(hdev, vector);
4168 	if (vector_id < 0) {
4169 		dev_err(&hdev->pdev->dev,
4170 			"Get vector index fail. vector_id =%d\n", vector_id);
4171 		return vector_id;
4172 	}
4173 
4174 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4175 }
4176 
4177 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4178 				       int vector,
4179 				       struct hnae3_ring_chain_node *ring_chain)
4180 {
4181 	struct hclge_vport *vport = hclge_get_vport(handle);
4182 	struct hclge_dev *hdev = vport->back;
4183 	int vector_id, ret;
4184 
4185 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4186 		return 0;
4187 
4188 	vector_id = hclge_get_vector_index(hdev, vector);
4189 	if (vector_id < 0) {
4190 		dev_err(&handle->pdev->dev,
4191 			"Get vector index fail. ret =%d\n", vector_id);
4192 		return vector_id;
4193 	}
4194 
4195 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4196 	if (ret)
4197 		dev_err(&handle->pdev->dev,
4198 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4199 			vector_id,
4200 			ret);
4201 
4202 	return ret;
4203 }
4204 
4205 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4206 			       struct hclge_promisc_param *param)
4207 {
4208 	struct hclge_promisc_cfg_cmd *req;
4209 	struct hclge_desc desc;
4210 	int ret;
4211 
4212 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4213 
4214 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
4215 	req->vf_id = param->vf_id;
4216 
4217 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4218 	 * pdev revision(0x20), new revision support them. The
4219 	 * value of this two fields will not return error when driver
4220 	 * send command to fireware in revision(0x20).
4221 	 */
4222 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4223 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4224 
4225 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4226 	if (ret)
4227 		dev_err(&hdev->pdev->dev,
4228 			"Set promisc mode fail, status is %d.\n", ret);
4229 
4230 	return ret;
4231 }
4232 
4233 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4234 			      bool en_mc, bool en_bc, int vport_id)
4235 {
4236 	if (!param)
4237 		return;
4238 
4239 	memset(param, 0, sizeof(struct hclge_promisc_param));
4240 	if (en_uc)
4241 		param->enable = HCLGE_PROMISC_EN_UC;
4242 	if (en_mc)
4243 		param->enable |= HCLGE_PROMISC_EN_MC;
4244 	if (en_bc)
4245 		param->enable |= HCLGE_PROMISC_EN_BC;
4246 	param->vf_id = vport_id;
4247 }
4248 
4249 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4250 				  bool en_mc_pmc)
4251 {
4252 	struct hclge_vport *vport = hclge_get_vport(handle);
4253 	struct hclge_dev *hdev = vport->back;
4254 	struct hclge_promisc_param param;
4255 	bool en_bc_pmc = true;
4256 
4257 	/* For revision 0x20, if broadcast promisc enabled, vlan filter is
4258 	 * always bypassed. So broadcast promisc should be disabled until
4259 	 * user enable promisc mode
4260 	 */
4261 	if (handle->pdev->revision == 0x20)
4262 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4263 
4264 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4265 				 vport->vport_id);
4266 	return hclge_cmd_set_promisc_mode(hdev, &param);
4267 }
4268 
4269 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4270 {
4271 	struct hclge_get_fd_mode_cmd *req;
4272 	struct hclge_desc desc;
4273 	int ret;
4274 
4275 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4276 
4277 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
4278 
4279 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4280 	if (ret) {
4281 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4282 		return ret;
4283 	}
4284 
4285 	*fd_mode = req->mode;
4286 
4287 	return ret;
4288 }
4289 
4290 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4291 				   u32 *stage1_entry_num,
4292 				   u32 *stage2_entry_num,
4293 				   u16 *stage1_counter_num,
4294 				   u16 *stage2_counter_num)
4295 {
4296 	struct hclge_get_fd_allocation_cmd *req;
4297 	struct hclge_desc desc;
4298 	int ret;
4299 
4300 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4301 
4302 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4303 
4304 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4305 	if (ret) {
4306 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4307 			ret);
4308 		return ret;
4309 	}
4310 
4311 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4312 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4313 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4314 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4315 
4316 	return ret;
4317 }
4318 
4319 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4320 {
4321 	struct hclge_set_fd_key_config_cmd *req;
4322 	struct hclge_fd_key_cfg *stage;
4323 	struct hclge_desc desc;
4324 	int ret;
4325 
4326 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4327 
4328 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4329 	stage = &hdev->fd_cfg.key_cfg[stage_num];
4330 	req->stage = stage_num;
4331 	req->key_select = stage->key_sel;
4332 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4333 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4334 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4335 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4336 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4337 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4338 
4339 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4340 	if (ret)
4341 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4342 
4343 	return ret;
4344 }
4345 
4346 static int hclge_init_fd_config(struct hclge_dev *hdev)
4347 {
4348 #define LOW_2_WORDS		0x03
4349 	struct hclge_fd_key_cfg *key_cfg;
4350 	int ret;
4351 
4352 	if (!hnae3_dev_fd_supported(hdev))
4353 		return 0;
4354 
4355 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4356 	if (ret)
4357 		return ret;
4358 
4359 	switch (hdev->fd_cfg.fd_mode) {
4360 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4361 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4362 		break;
4363 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4364 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4365 		break;
4366 	default:
4367 		dev_err(&hdev->pdev->dev,
4368 			"Unsupported flow director mode %d\n",
4369 			hdev->fd_cfg.fd_mode);
4370 		return -EOPNOTSUPP;
4371 	}
4372 
4373 	hdev->fd_cfg.proto_support =
4374 		TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4375 		UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4376 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4377 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4378 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4379 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4380 	key_cfg->outer_sipv6_word_en = 0;
4381 	key_cfg->outer_dipv6_word_en = 0;
4382 
4383 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4384 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4385 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4386 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4387 
4388 	/* If use max 400bit key, we can support tuples for ether type */
4389 	if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4390 		hdev->fd_cfg.proto_support |= ETHER_FLOW;
4391 		key_cfg->tuple_active |=
4392 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4393 	}
4394 
4395 	/* roce_type is used to filter roce frames
4396 	 * dst_vport is used to specify the rule
4397 	 */
4398 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4399 
4400 	ret = hclge_get_fd_allocation(hdev,
4401 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4402 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4403 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4404 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4405 	if (ret)
4406 		return ret;
4407 
4408 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4409 }
4410 
4411 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4412 				int loc, u8 *key, bool is_add)
4413 {
4414 	struct hclge_fd_tcam_config_1_cmd *req1;
4415 	struct hclge_fd_tcam_config_2_cmd *req2;
4416 	struct hclge_fd_tcam_config_3_cmd *req3;
4417 	struct hclge_desc desc[3];
4418 	int ret;
4419 
4420 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4421 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4422 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4423 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4424 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4425 
4426 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4427 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4428 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4429 
4430 	req1->stage = stage;
4431 	req1->xy_sel = sel_x ? 1 : 0;
4432 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4433 	req1->index = cpu_to_le32(loc);
4434 	req1->entry_vld = sel_x ? is_add : 0;
4435 
4436 	if (key) {
4437 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4438 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4439 		       sizeof(req2->tcam_data));
4440 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4441 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4442 	}
4443 
4444 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
4445 	if (ret)
4446 		dev_err(&hdev->pdev->dev,
4447 			"config tcam key fail, ret=%d\n",
4448 			ret);
4449 
4450 	return ret;
4451 }
4452 
4453 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4454 			      struct hclge_fd_ad_data *action)
4455 {
4456 	struct hclge_fd_ad_config_cmd *req;
4457 	struct hclge_desc desc;
4458 	u64 ad_data = 0;
4459 	int ret;
4460 
4461 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4462 
4463 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
4464 	req->index = cpu_to_le32(loc);
4465 	req->stage = stage;
4466 
4467 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4468 		      action->write_rule_id_to_bd);
4469 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4470 			action->rule_id);
4471 	ad_data <<= 32;
4472 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4473 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4474 		      action->forward_to_direct_queue);
4475 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4476 			action->queue_id);
4477 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4478 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4479 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4480 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4481 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4482 			action->counter_id);
4483 
4484 	req->ad_data = cpu_to_le64(ad_data);
4485 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4486 	if (ret)
4487 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4488 
4489 	return ret;
4490 }
4491 
4492 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4493 				   struct hclge_fd_rule *rule)
4494 {
4495 	u16 tmp_x_s, tmp_y_s;
4496 	u32 tmp_x_l, tmp_y_l;
4497 	int i;
4498 
4499 	if (rule->unused_tuple & tuple_bit)
4500 		return true;
4501 
4502 	switch (tuple_bit) {
4503 	case 0:
4504 		return false;
4505 	case BIT(INNER_DST_MAC):
4506 		for (i = 0; i < 6; i++) {
4507 			calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4508 			       rule->tuples_mask.dst_mac[i]);
4509 			calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4510 			       rule->tuples_mask.dst_mac[i]);
4511 		}
4512 
4513 		return true;
4514 	case BIT(INNER_SRC_MAC):
4515 		for (i = 0; i < 6; i++) {
4516 			calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4517 			       rule->tuples.src_mac[i]);
4518 			calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4519 			       rule->tuples.src_mac[i]);
4520 		}
4521 
4522 		return true;
4523 	case BIT(INNER_VLAN_TAG_FST):
4524 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4525 		       rule->tuples_mask.vlan_tag1);
4526 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4527 		       rule->tuples_mask.vlan_tag1);
4528 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4529 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4530 
4531 		return true;
4532 	case BIT(INNER_ETH_TYPE):
4533 		calc_x(tmp_x_s, rule->tuples.ether_proto,
4534 		       rule->tuples_mask.ether_proto);
4535 		calc_y(tmp_y_s, rule->tuples.ether_proto,
4536 		       rule->tuples_mask.ether_proto);
4537 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4538 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4539 
4540 		return true;
4541 	case BIT(INNER_IP_TOS):
4542 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4543 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4544 
4545 		return true;
4546 	case BIT(INNER_IP_PROTO):
4547 		calc_x(*key_x, rule->tuples.ip_proto,
4548 		       rule->tuples_mask.ip_proto);
4549 		calc_y(*key_y, rule->tuples.ip_proto,
4550 		       rule->tuples_mask.ip_proto);
4551 
4552 		return true;
4553 	case BIT(INNER_SRC_IP):
4554 		calc_x(tmp_x_l, rule->tuples.src_ip[3],
4555 		       rule->tuples_mask.src_ip[3]);
4556 		calc_y(tmp_y_l, rule->tuples.src_ip[3],
4557 		       rule->tuples_mask.src_ip[3]);
4558 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4559 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4560 
4561 		return true;
4562 	case BIT(INNER_DST_IP):
4563 		calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4564 		       rule->tuples_mask.dst_ip[3]);
4565 		calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4566 		       rule->tuples_mask.dst_ip[3]);
4567 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4568 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4569 
4570 		return true;
4571 	case BIT(INNER_SRC_PORT):
4572 		calc_x(tmp_x_s, rule->tuples.src_port,
4573 		       rule->tuples_mask.src_port);
4574 		calc_y(tmp_y_s, rule->tuples.src_port,
4575 		       rule->tuples_mask.src_port);
4576 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4577 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4578 
4579 		return true;
4580 	case BIT(INNER_DST_PORT):
4581 		calc_x(tmp_x_s, rule->tuples.dst_port,
4582 		       rule->tuples_mask.dst_port);
4583 		calc_y(tmp_y_s, rule->tuples.dst_port,
4584 		       rule->tuples_mask.dst_port);
4585 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4586 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4587 
4588 		return true;
4589 	default:
4590 		return false;
4591 	}
4592 }
4593 
4594 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4595 				 u8 vf_id, u8 network_port_id)
4596 {
4597 	u32 port_number = 0;
4598 
4599 	if (port_type == HOST_PORT) {
4600 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4601 				pf_id);
4602 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4603 				vf_id);
4604 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4605 	} else {
4606 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4607 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
4608 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4609 	}
4610 
4611 	return port_number;
4612 }
4613 
4614 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4615 				       __le32 *key_x, __le32 *key_y,
4616 				       struct hclge_fd_rule *rule)
4617 {
4618 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4619 	u8 cur_pos = 0, tuple_size, shift_bits;
4620 	int i;
4621 
4622 	for (i = 0; i < MAX_META_DATA; i++) {
4623 		tuple_size = meta_data_key_info[i].key_length;
4624 		tuple_bit = key_cfg->meta_data_active & BIT(i);
4625 
4626 		switch (tuple_bit) {
4627 		case BIT(ROCE_TYPE):
4628 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4629 			cur_pos += tuple_size;
4630 			break;
4631 		case BIT(DST_VPORT):
4632 			port_number = hclge_get_port_number(HOST_PORT, 0,
4633 							    rule->vf_id, 0);
4634 			hnae3_set_field(meta_data,
4635 					GENMASK(cur_pos + tuple_size, cur_pos),
4636 					cur_pos, port_number);
4637 			cur_pos += tuple_size;
4638 			break;
4639 		default:
4640 			break;
4641 		}
4642 	}
4643 
4644 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4645 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4646 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
4647 
4648 	*key_x = cpu_to_le32(tmp_x << shift_bits);
4649 	*key_y = cpu_to_le32(tmp_y << shift_bits);
4650 }
4651 
4652 /* A complete key is combined with meta data key and tuple key.
4653  * Meta data key is stored at the MSB region, and tuple key is stored at
4654  * the LSB region, unused bits will be filled 0.
4655  */
4656 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4657 			    struct hclge_fd_rule *rule)
4658 {
4659 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4660 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4661 	u8 *cur_key_x, *cur_key_y;
4662 	int i, ret, tuple_size;
4663 	u8 meta_data_region;
4664 
4665 	memset(key_x, 0, sizeof(key_x));
4666 	memset(key_y, 0, sizeof(key_y));
4667 	cur_key_x = key_x;
4668 	cur_key_y = key_y;
4669 
4670 	for (i = 0 ; i < MAX_TUPLE; i++) {
4671 		bool tuple_valid;
4672 		u32 check_tuple;
4673 
4674 		tuple_size = tuple_key_info[i].key_length / 8;
4675 		check_tuple = key_cfg->tuple_active & BIT(i);
4676 
4677 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4678 						     cur_key_y, rule);
4679 		if (tuple_valid) {
4680 			cur_key_x += tuple_size;
4681 			cur_key_y += tuple_size;
4682 		}
4683 	}
4684 
4685 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4686 			MAX_META_DATA_LENGTH / 8;
4687 
4688 	hclge_fd_convert_meta_data(key_cfg,
4689 				   (__le32 *)(key_x + meta_data_region),
4690 				   (__le32 *)(key_y + meta_data_region),
4691 				   rule);
4692 
4693 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4694 				   true);
4695 	if (ret) {
4696 		dev_err(&hdev->pdev->dev,
4697 			"fd key_y config fail, loc=%d, ret=%d\n",
4698 			rule->queue_id, ret);
4699 		return ret;
4700 	}
4701 
4702 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4703 				   true);
4704 	if (ret)
4705 		dev_err(&hdev->pdev->dev,
4706 			"fd key_x config fail, loc=%d, ret=%d\n",
4707 			rule->queue_id, ret);
4708 	return ret;
4709 }
4710 
4711 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4712 			       struct hclge_fd_rule *rule)
4713 {
4714 	struct hclge_fd_ad_data ad_data;
4715 
4716 	ad_data.ad_id = rule->location;
4717 
4718 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4719 		ad_data.drop_packet = true;
4720 		ad_data.forward_to_direct_queue = false;
4721 		ad_data.queue_id = 0;
4722 	} else {
4723 		ad_data.drop_packet = false;
4724 		ad_data.forward_to_direct_queue = true;
4725 		ad_data.queue_id = rule->queue_id;
4726 	}
4727 
4728 	ad_data.use_counter = false;
4729 	ad_data.counter_id = 0;
4730 
4731 	ad_data.use_next_stage = false;
4732 	ad_data.next_input_key = 0;
4733 
4734 	ad_data.write_rule_id_to_bd = true;
4735 	ad_data.rule_id = rule->location;
4736 
4737 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4738 }
4739 
4740 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4741 			       struct ethtool_rx_flow_spec *fs, u32 *unused)
4742 {
4743 	struct ethtool_tcpip4_spec *tcp_ip4_spec;
4744 	struct ethtool_usrip4_spec *usr_ip4_spec;
4745 	struct ethtool_tcpip6_spec *tcp_ip6_spec;
4746 	struct ethtool_usrip6_spec *usr_ip6_spec;
4747 	struct ethhdr *ether_spec;
4748 
4749 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4750 		return -EINVAL;
4751 
4752 	if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4753 		return -EOPNOTSUPP;
4754 
4755 	if ((fs->flow_type & FLOW_EXT) &&
4756 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4757 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4758 		return -EOPNOTSUPP;
4759 	}
4760 
4761 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4762 	case SCTP_V4_FLOW:
4763 	case TCP_V4_FLOW:
4764 	case UDP_V4_FLOW:
4765 		tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4766 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4767 
4768 		if (!tcp_ip4_spec->ip4src)
4769 			*unused |= BIT(INNER_SRC_IP);
4770 
4771 		if (!tcp_ip4_spec->ip4dst)
4772 			*unused |= BIT(INNER_DST_IP);
4773 
4774 		if (!tcp_ip4_spec->psrc)
4775 			*unused |= BIT(INNER_SRC_PORT);
4776 
4777 		if (!tcp_ip4_spec->pdst)
4778 			*unused |= BIT(INNER_DST_PORT);
4779 
4780 		if (!tcp_ip4_spec->tos)
4781 			*unused |= BIT(INNER_IP_TOS);
4782 
4783 		break;
4784 	case IP_USER_FLOW:
4785 		usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4786 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4787 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4788 
4789 		if (!usr_ip4_spec->ip4src)
4790 			*unused |= BIT(INNER_SRC_IP);
4791 
4792 		if (!usr_ip4_spec->ip4dst)
4793 			*unused |= BIT(INNER_DST_IP);
4794 
4795 		if (!usr_ip4_spec->tos)
4796 			*unused |= BIT(INNER_IP_TOS);
4797 
4798 		if (!usr_ip4_spec->proto)
4799 			*unused |= BIT(INNER_IP_PROTO);
4800 
4801 		if (usr_ip4_spec->l4_4_bytes)
4802 			return -EOPNOTSUPP;
4803 
4804 		if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4805 			return -EOPNOTSUPP;
4806 
4807 		break;
4808 	case SCTP_V6_FLOW:
4809 	case TCP_V6_FLOW:
4810 	case UDP_V6_FLOW:
4811 		tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4812 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4813 			BIT(INNER_IP_TOS);
4814 
4815 		if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4816 		    !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4817 			*unused |= BIT(INNER_SRC_IP);
4818 
4819 		if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4820 		    !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4821 			*unused |= BIT(INNER_DST_IP);
4822 
4823 		if (!tcp_ip6_spec->psrc)
4824 			*unused |= BIT(INNER_SRC_PORT);
4825 
4826 		if (!tcp_ip6_spec->pdst)
4827 			*unused |= BIT(INNER_DST_PORT);
4828 
4829 		if (tcp_ip6_spec->tclass)
4830 			return -EOPNOTSUPP;
4831 
4832 		break;
4833 	case IPV6_USER_FLOW:
4834 		usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4835 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4836 			BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4837 			BIT(INNER_DST_PORT);
4838 
4839 		if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4840 		    !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4841 			*unused |= BIT(INNER_SRC_IP);
4842 
4843 		if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4844 		    !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4845 			*unused |= BIT(INNER_DST_IP);
4846 
4847 		if (!usr_ip6_spec->l4_proto)
4848 			*unused |= BIT(INNER_IP_PROTO);
4849 
4850 		if (usr_ip6_spec->tclass)
4851 			return -EOPNOTSUPP;
4852 
4853 		if (usr_ip6_spec->l4_4_bytes)
4854 			return -EOPNOTSUPP;
4855 
4856 		break;
4857 	case ETHER_FLOW:
4858 		ether_spec = &fs->h_u.ether_spec;
4859 		*unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4860 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4861 			BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4862 
4863 		if (is_zero_ether_addr(ether_spec->h_source))
4864 			*unused |= BIT(INNER_SRC_MAC);
4865 
4866 		if (is_zero_ether_addr(ether_spec->h_dest))
4867 			*unused |= BIT(INNER_DST_MAC);
4868 
4869 		if (!ether_spec->h_proto)
4870 			*unused |= BIT(INNER_ETH_TYPE);
4871 
4872 		break;
4873 	default:
4874 		return -EOPNOTSUPP;
4875 	}
4876 
4877 	if ((fs->flow_type & FLOW_EXT)) {
4878 		if (fs->h_ext.vlan_etype)
4879 			return -EOPNOTSUPP;
4880 		if (!fs->h_ext.vlan_tci)
4881 			*unused |= BIT(INNER_VLAN_TAG_FST);
4882 
4883 		if (fs->m_ext.vlan_tci) {
4884 			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4885 				return -EINVAL;
4886 		}
4887 	} else {
4888 		*unused |= BIT(INNER_VLAN_TAG_FST);
4889 	}
4890 
4891 	if (fs->flow_type & FLOW_MAC_EXT) {
4892 		if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4893 			return -EOPNOTSUPP;
4894 
4895 		if (is_zero_ether_addr(fs->h_ext.h_dest))
4896 			*unused |= BIT(INNER_DST_MAC);
4897 		else
4898 			*unused &= ~(BIT(INNER_DST_MAC));
4899 	}
4900 
4901 	return 0;
4902 }
4903 
4904 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4905 {
4906 	struct hclge_fd_rule *rule = NULL;
4907 	struct hlist_node *node2;
4908 
4909 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4910 		if (rule->location >= location)
4911 			break;
4912 	}
4913 
4914 	return  rule && rule->location == location;
4915 }
4916 
4917 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4918 				     struct hclge_fd_rule *new_rule,
4919 				     u16 location,
4920 				     bool is_add)
4921 {
4922 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
4923 	struct hlist_node *node2;
4924 
4925 	if (is_add && !new_rule)
4926 		return -EINVAL;
4927 
4928 	hlist_for_each_entry_safe(rule, node2,
4929 				  &hdev->fd_rule_list, rule_node) {
4930 		if (rule->location >= location)
4931 			break;
4932 		parent = rule;
4933 	}
4934 
4935 	if (rule && rule->location == location) {
4936 		hlist_del(&rule->rule_node);
4937 		kfree(rule);
4938 		hdev->hclge_fd_rule_num--;
4939 
4940 		if (!is_add)
4941 			return 0;
4942 
4943 	} else if (!is_add) {
4944 		dev_err(&hdev->pdev->dev,
4945 			"delete fail, rule %d is inexistent\n",
4946 			location);
4947 		return -EINVAL;
4948 	}
4949 
4950 	INIT_HLIST_NODE(&new_rule->rule_node);
4951 
4952 	if (parent)
4953 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4954 	else
4955 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4956 
4957 	hdev->hclge_fd_rule_num++;
4958 
4959 	return 0;
4960 }
4961 
4962 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4963 			      struct ethtool_rx_flow_spec *fs,
4964 			      struct hclge_fd_rule *rule)
4965 {
4966 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4967 
4968 	switch (flow_type) {
4969 	case SCTP_V4_FLOW:
4970 	case TCP_V4_FLOW:
4971 	case UDP_V4_FLOW:
4972 		rule->tuples.src_ip[3] =
4973 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4974 		rule->tuples_mask.src_ip[3] =
4975 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4976 
4977 		rule->tuples.dst_ip[3] =
4978 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4979 		rule->tuples_mask.dst_ip[3] =
4980 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4981 
4982 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4983 		rule->tuples_mask.src_port =
4984 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4985 
4986 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4987 		rule->tuples_mask.dst_port =
4988 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4989 
4990 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4991 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4992 
4993 		rule->tuples.ether_proto = ETH_P_IP;
4994 		rule->tuples_mask.ether_proto = 0xFFFF;
4995 
4996 		break;
4997 	case IP_USER_FLOW:
4998 		rule->tuples.src_ip[3] =
4999 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5000 		rule->tuples_mask.src_ip[3] =
5001 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5002 
5003 		rule->tuples.dst_ip[3] =
5004 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5005 		rule->tuples_mask.dst_ip[3] =
5006 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5007 
5008 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5009 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5010 
5011 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5012 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5013 
5014 		rule->tuples.ether_proto = ETH_P_IP;
5015 		rule->tuples_mask.ether_proto = 0xFFFF;
5016 
5017 		break;
5018 	case SCTP_V6_FLOW:
5019 	case TCP_V6_FLOW:
5020 	case UDP_V6_FLOW:
5021 		be32_to_cpu_array(rule->tuples.src_ip,
5022 				  fs->h_u.tcp_ip6_spec.ip6src, 4);
5023 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5024 				  fs->m_u.tcp_ip6_spec.ip6src, 4);
5025 
5026 		be32_to_cpu_array(rule->tuples.dst_ip,
5027 				  fs->h_u.tcp_ip6_spec.ip6dst, 4);
5028 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5029 				  fs->m_u.tcp_ip6_spec.ip6dst, 4);
5030 
5031 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5032 		rule->tuples_mask.src_port =
5033 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5034 
5035 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5036 		rule->tuples_mask.dst_port =
5037 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5038 
5039 		rule->tuples.ether_proto = ETH_P_IPV6;
5040 		rule->tuples_mask.ether_proto = 0xFFFF;
5041 
5042 		break;
5043 	case IPV6_USER_FLOW:
5044 		be32_to_cpu_array(rule->tuples.src_ip,
5045 				  fs->h_u.usr_ip6_spec.ip6src, 4);
5046 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5047 				  fs->m_u.usr_ip6_spec.ip6src, 4);
5048 
5049 		be32_to_cpu_array(rule->tuples.dst_ip,
5050 				  fs->h_u.usr_ip6_spec.ip6dst, 4);
5051 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5052 				  fs->m_u.usr_ip6_spec.ip6dst, 4);
5053 
5054 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5055 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5056 
5057 		rule->tuples.ether_proto = ETH_P_IPV6;
5058 		rule->tuples_mask.ether_proto = 0xFFFF;
5059 
5060 		break;
5061 	case ETHER_FLOW:
5062 		ether_addr_copy(rule->tuples.src_mac,
5063 				fs->h_u.ether_spec.h_source);
5064 		ether_addr_copy(rule->tuples_mask.src_mac,
5065 				fs->m_u.ether_spec.h_source);
5066 
5067 		ether_addr_copy(rule->tuples.dst_mac,
5068 				fs->h_u.ether_spec.h_dest);
5069 		ether_addr_copy(rule->tuples_mask.dst_mac,
5070 				fs->m_u.ether_spec.h_dest);
5071 
5072 		rule->tuples.ether_proto =
5073 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
5074 		rule->tuples_mask.ether_proto =
5075 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
5076 
5077 		break;
5078 	default:
5079 		return -EOPNOTSUPP;
5080 	}
5081 
5082 	switch (flow_type) {
5083 	case SCTP_V4_FLOW:
5084 	case SCTP_V6_FLOW:
5085 		rule->tuples.ip_proto = IPPROTO_SCTP;
5086 		rule->tuples_mask.ip_proto = 0xFF;
5087 		break;
5088 	case TCP_V4_FLOW:
5089 	case TCP_V6_FLOW:
5090 		rule->tuples.ip_proto = IPPROTO_TCP;
5091 		rule->tuples_mask.ip_proto = 0xFF;
5092 		break;
5093 	case UDP_V4_FLOW:
5094 	case UDP_V6_FLOW:
5095 		rule->tuples.ip_proto = IPPROTO_UDP;
5096 		rule->tuples_mask.ip_proto = 0xFF;
5097 		break;
5098 	default:
5099 		break;
5100 	}
5101 
5102 	if ((fs->flow_type & FLOW_EXT)) {
5103 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5104 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5105 	}
5106 
5107 	if (fs->flow_type & FLOW_MAC_EXT) {
5108 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5109 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5110 	}
5111 
5112 	return 0;
5113 }
5114 
5115 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5116 			      struct ethtool_rxnfc *cmd)
5117 {
5118 	struct hclge_vport *vport = hclge_get_vport(handle);
5119 	struct hclge_dev *hdev = vport->back;
5120 	u16 dst_vport_id = 0, q_index = 0;
5121 	struct ethtool_rx_flow_spec *fs;
5122 	struct hclge_fd_rule *rule;
5123 	u32 unused = 0;
5124 	u8 action;
5125 	int ret;
5126 
5127 	if (!hnae3_dev_fd_supported(hdev))
5128 		return -EOPNOTSUPP;
5129 
5130 	if (!hdev->fd_en) {
5131 		dev_warn(&hdev->pdev->dev,
5132 			 "Please enable flow director first\n");
5133 		return -EOPNOTSUPP;
5134 	}
5135 
5136 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5137 
5138 	ret = hclge_fd_check_spec(hdev, fs, &unused);
5139 	if (ret) {
5140 		dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5141 		return ret;
5142 	}
5143 
5144 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5145 		action = HCLGE_FD_ACTION_DROP_PACKET;
5146 	} else {
5147 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5148 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5149 		u16 tqps;
5150 
5151 		if (vf > hdev->num_req_vfs) {
5152 			dev_err(&hdev->pdev->dev,
5153 				"Error: vf id (%d) > max vf num (%d)\n",
5154 				vf, hdev->num_req_vfs);
5155 			return -EINVAL;
5156 		}
5157 
5158 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5159 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5160 
5161 		if (ring >= tqps) {
5162 			dev_err(&hdev->pdev->dev,
5163 				"Error: queue id (%d) > max tqp num (%d)\n",
5164 				ring, tqps - 1);
5165 			return -EINVAL;
5166 		}
5167 
5168 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5169 		q_index = ring;
5170 	}
5171 
5172 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5173 	if (!rule)
5174 		return -ENOMEM;
5175 
5176 	ret = hclge_fd_get_tuple(hdev, fs, rule);
5177 	if (ret)
5178 		goto free_rule;
5179 
5180 	rule->flow_type = fs->flow_type;
5181 
5182 	rule->location = fs->location;
5183 	rule->unused_tuple = unused;
5184 	rule->vf_id = dst_vport_id;
5185 	rule->queue_id = q_index;
5186 	rule->action = action;
5187 
5188 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5189 	if (ret)
5190 		goto free_rule;
5191 
5192 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5193 	if (ret)
5194 		goto free_rule;
5195 
5196 	ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
5197 	if (ret)
5198 		goto free_rule;
5199 
5200 	return ret;
5201 
5202 free_rule:
5203 	kfree(rule);
5204 	return ret;
5205 }
5206 
5207 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5208 			      struct ethtool_rxnfc *cmd)
5209 {
5210 	struct hclge_vport *vport = hclge_get_vport(handle);
5211 	struct hclge_dev *hdev = vport->back;
5212 	struct ethtool_rx_flow_spec *fs;
5213 	int ret;
5214 
5215 	if (!hnae3_dev_fd_supported(hdev))
5216 		return -EOPNOTSUPP;
5217 
5218 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5219 
5220 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5221 		return -EINVAL;
5222 
5223 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
5224 		dev_err(&hdev->pdev->dev,
5225 			"Delete fail, rule %d is inexistent\n",
5226 			fs->location);
5227 		return -ENOENT;
5228 	}
5229 
5230 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5231 				   fs->location, NULL, false);
5232 	if (ret)
5233 		return ret;
5234 
5235 	return hclge_fd_update_rule_list(hdev, NULL, fs->location,
5236 					 false);
5237 }
5238 
5239 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5240 				     bool clear_list)
5241 {
5242 	struct hclge_vport *vport = hclge_get_vport(handle);
5243 	struct hclge_dev *hdev = vport->back;
5244 	struct hclge_fd_rule *rule;
5245 	struct hlist_node *node;
5246 
5247 	if (!hnae3_dev_fd_supported(hdev))
5248 		return;
5249 
5250 	if (clear_list) {
5251 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5252 					  rule_node) {
5253 			hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5254 					     rule->location, NULL, false);
5255 			hlist_del(&rule->rule_node);
5256 			kfree(rule);
5257 			hdev->hclge_fd_rule_num--;
5258 		}
5259 	} else {
5260 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5261 					  rule_node)
5262 			hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5263 					     rule->location, NULL, false);
5264 	}
5265 }
5266 
5267 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5268 {
5269 	struct hclge_vport *vport = hclge_get_vport(handle);
5270 	struct hclge_dev *hdev = vport->back;
5271 	struct hclge_fd_rule *rule;
5272 	struct hlist_node *node;
5273 	int ret;
5274 
5275 	/* Return ok here, because reset error handling will check this
5276 	 * return value. If error is returned here, the reset process will
5277 	 * fail.
5278 	 */
5279 	if (!hnae3_dev_fd_supported(hdev))
5280 		return 0;
5281 
5282 	/* if fd is disabled, should not restore it when reset */
5283 	if (!hdev->fd_en)
5284 		return 0;
5285 
5286 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5287 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5288 		if (!ret)
5289 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5290 
5291 		if (ret) {
5292 			dev_warn(&hdev->pdev->dev,
5293 				 "Restore rule %d failed, remove it\n",
5294 				 rule->location);
5295 			hlist_del(&rule->rule_node);
5296 			kfree(rule);
5297 			hdev->hclge_fd_rule_num--;
5298 		}
5299 	}
5300 	return 0;
5301 }
5302 
5303 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5304 				 struct ethtool_rxnfc *cmd)
5305 {
5306 	struct hclge_vport *vport = hclge_get_vport(handle);
5307 	struct hclge_dev *hdev = vport->back;
5308 
5309 	if (!hnae3_dev_fd_supported(hdev))
5310 		return -EOPNOTSUPP;
5311 
5312 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
5313 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5314 
5315 	return 0;
5316 }
5317 
5318 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5319 				  struct ethtool_rxnfc *cmd)
5320 {
5321 	struct hclge_vport *vport = hclge_get_vport(handle);
5322 	struct hclge_fd_rule *rule = NULL;
5323 	struct hclge_dev *hdev = vport->back;
5324 	struct ethtool_rx_flow_spec *fs;
5325 	struct hlist_node *node2;
5326 
5327 	if (!hnae3_dev_fd_supported(hdev))
5328 		return -EOPNOTSUPP;
5329 
5330 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5331 
5332 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5333 		if (rule->location >= fs->location)
5334 			break;
5335 	}
5336 
5337 	if (!rule || fs->location != rule->location)
5338 		return -ENOENT;
5339 
5340 	fs->flow_type = rule->flow_type;
5341 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5342 	case SCTP_V4_FLOW:
5343 	case TCP_V4_FLOW:
5344 	case UDP_V4_FLOW:
5345 		fs->h_u.tcp_ip4_spec.ip4src =
5346 				cpu_to_be32(rule->tuples.src_ip[3]);
5347 		fs->m_u.tcp_ip4_spec.ip4src =
5348 				rule->unused_tuple & BIT(INNER_SRC_IP) ?
5349 				0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5350 
5351 		fs->h_u.tcp_ip4_spec.ip4dst =
5352 				cpu_to_be32(rule->tuples.dst_ip[3]);
5353 		fs->m_u.tcp_ip4_spec.ip4dst =
5354 				rule->unused_tuple & BIT(INNER_DST_IP) ?
5355 				0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5356 
5357 		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5358 		fs->m_u.tcp_ip4_spec.psrc =
5359 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5360 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5361 
5362 		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5363 		fs->m_u.tcp_ip4_spec.pdst =
5364 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5365 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5366 
5367 		fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5368 		fs->m_u.tcp_ip4_spec.tos =
5369 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5370 				0 : rule->tuples_mask.ip_tos;
5371 
5372 		break;
5373 	case IP_USER_FLOW:
5374 		fs->h_u.usr_ip4_spec.ip4src =
5375 				cpu_to_be32(rule->tuples.src_ip[3]);
5376 		fs->m_u.tcp_ip4_spec.ip4src =
5377 				rule->unused_tuple & BIT(INNER_SRC_IP) ?
5378 				0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5379 
5380 		fs->h_u.usr_ip4_spec.ip4dst =
5381 				cpu_to_be32(rule->tuples.dst_ip[3]);
5382 		fs->m_u.usr_ip4_spec.ip4dst =
5383 				rule->unused_tuple & BIT(INNER_DST_IP) ?
5384 				0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5385 
5386 		fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5387 		fs->m_u.usr_ip4_spec.tos =
5388 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5389 				0 : rule->tuples_mask.ip_tos;
5390 
5391 		fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5392 		fs->m_u.usr_ip4_spec.proto =
5393 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5394 				0 : rule->tuples_mask.ip_proto;
5395 
5396 		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5397 
5398 		break;
5399 	case SCTP_V6_FLOW:
5400 	case TCP_V6_FLOW:
5401 	case UDP_V6_FLOW:
5402 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5403 				  rule->tuples.src_ip, 4);
5404 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5405 			memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5406 		else
5407 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5408 					  rule->tuples_mask.src_ip, 4);
5409 
5410 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5411 				  rule->tuples.dst_ip, 4);
5412 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5413 			memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5414 		else
5415 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5416 					  rule->tuples_mask.dst_ip, 4);
5417 
5418 		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5419 		fs->m_u.tcp_ip6_spec.psrc =
5420 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5421 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5422 
5423 		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5424 		fs->m_u.tcp_ip6_spec.pdst =
5425 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5426 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5427 
5428 		break;
5429 	case IPV6_USER_FLOW:
5430 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5431 				  rule->tuples.src_ip, 4);
5432 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5433 			memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5434 		else
5435 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5436 					  rule->tuples_mask.src_ip, 4);
5437 
5438 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5439 				  rule->tuples.dst_ip, 4);
5440 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5441 			memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5442 		else
5443 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5444 					  rule->tuples_mask.dst_ip, 4);
5445 
5446 		fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5447 		fs->m_u.usr_ip6_spec.l4_proto =
5448 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5449 				0 : rule->tuples_mask.ip_proto;
5450 
5451 		break;
5452 	case ETHER_FLOW:
5453 		ether_addr_copy(fs->h_u.ether_spec.h_source,
5454 				rule->tuples.src_mac);
5455 		if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5456 			eth_zero_addr(fs->m_u.ether_spec.h_source);
5457 		else
5458 			ether_addr_copy(fs->m_u.ether_spec.h_source,
5459 					rule->tuples_mask.src_mac);
5460 
5461 		ether_addr_copy(fs->h_u.ether_spec.h_dest,
5462 				rule->tuples.dst_mac);
5463 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5464 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5465 		else
5466 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5467 					rule->tuples_mask.dst_mac);
5468 
5469 		fs->h_u.ether_spec.h_proto =
5470 				cpu_to_be16(rule->tuples.ether_proto);
5471 		fs->m_u.ether_spec.h_proto =
5472 				rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5473 				0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5474 
5475 		break;
5476 	default:
5477 		return -EOPNOTSUPP;
5478 	}
5479 
5480 	if (fs->flow_type & FLOW_EXT) {
5481 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5482 		fs->m_ext.vlan_tci =
5483 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5484 				cpu_to_be16(VLAN_VID_MASK) :
5485 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
5486 	}
5487 
5488 	if (fs->flow_type & FLOW_MAC_EXT) {
5489 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5490 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5491 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5492 		else
5493 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5494 					rule->tuples_mask.dst_mac);
5495 	}
5496 
5497 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5498 		fs->ring_cookie = RX_CLS_FLOW_DISC;
5499 	} else {
5500 		u64 vf_id;
5501 
5502 		fs->ring_cookie = rule->queue_id;
5503 		vf_id = rule->vf_id;
5504 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5505 		fs->ring_cookie |= vf_id;
5506 	}
5507 
5508 	return 0;
5509 }
5510 
5511 static int hclge_get_all_rules(struct hnae3_handle *handle,
5512 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
5513 {
5514 	struct hclge_vport *vport = hclge_get_vport(handle);
5515 	struct hclge_dev *hdev = vport->back;
5516 	struct hclge_fd_rule *rule;
5517 	struct hlist_node *node2;
5518 	int cnt = 0;
5519 
5520 	if (!hnae3_dev_fd_supported(hdev))
5521 		return -EOPNOTSUPP;
5522 
5523 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5524 
5525 	hlist_for_each_entry_safe(rule, node2,
5526 				  &hdev->fd_rule_list, rule_node) {
5527 		if (cnt == cmd->rule_cnt)
5528 			return -EMSGSIZE;
5529 
5530 		rule_locs[cnt] = rule->location;
5531 		cnt++;
5532 	}
5533 
5534 	cmd->rule_cnt = cnt;
5535 
5536 	return 0;
5537 }
5538 
5539 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5540 {
5541 	struct hclge_vport *vport = hclge_get_vport(handle);
5542 	struct hclge_dev *hdev = vport->back;
5543 
5544 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5545 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5546 }
5547 
5548 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5549 {
5550 	struct hclge_vport *vport = hclge_get_vport(handle);
5551 	struct hclge_dev *hdev = vport->back;
5552 
5553 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5554 }
5555 
5556 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5557 {
5558 	struct hclge_vport *vport = hclge_get_vport(handle);
5559 	struct hclge_dev *hdev = vport->back;
5560 
5561 	return hdev->rst_stats.hw_reset_done_cnt;
5562 }
5563 
5564 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5565 {
5566 	struct hclge_vport *vport = hclge_get_vport(handle);
5567 	struct hclge_dev *hdev = vport->back;
5568 
5569 	hdev->fd_en = enable;
5570 	if (!enable)
5571 		hclge_del_all_fd_entries(handle, false);
5572 	else
5573 		hclge_restore_fd_entries(handle);
5574 }
5575 
5576 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5577 {
5578 	struct hclge_desc desc;
5579 	struct hclge_config_mac_mode_cmd *req =
5580 		(struct hclge_config_mac_mode_cmd *)desc.data;
5581 	u32 loop_en = 0;
5582 	int ret;
5583 
5584 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5585 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5586 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5587 	hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5588 	hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5589 	hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5590 	hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5591 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5592 	hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5593 	hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5594 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5595 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5596 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5597 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5598 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5599 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5600 
5601 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5602 	if (ret)
5603 		dev_err(&hdev->pdev->dev,
5604 			"mac enable fail, ret =%d.\n", ret);
5605 }
5606 
5607 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5608 {
5609 	struct hclge_config_mac_mode_cmd *req;
5610 	struct hclge_desc desc;
5611 	u32 loop_en;
5612 	int ret;
5613 
5614 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5615 	/* 1 Read out the MAC mode config at first */
5616 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5617 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5618 	if (ret) {
5619 		dev_err(&hdev->pdev->dev,
5620 			"mac loopback get fail, ret =%d.\n", ret);
5621 		return ret;
5622 	}
5623 
5624 	/* 2 Then setup the loopback flag */
5625 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5626 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5627 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5628 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5629 
5630 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5631 
5632 	/* 3 Config mac work mode with loopback flag
5633 	 * and its original configure parameters
5634 	 */
5635 	hclge_cmd_reuse_desc(&desc, false);
5636 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5637 	if (ret)
5638 		dev_err(&hdev->pdev->dev,
5639 			"mac loopback set fail, ret =%d.\n", ret);
5640 	return ret;
5641 }
5642 
5643 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5644 				     enum hnae3_loop loop_mode)
5645 {
5646 #define HCLGE_SERDES_RETRY_MS	10
5647 #define HCLGE_SERDES_RETRY_NUM	100
5648 
5649 #define HCLGE_MAC_LINK_STATUS_MS   10
5650 #define HCLGE_MAC_LINK_STATUS_NUM  100
5651 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5652 #define HCLGE_MAC_LINK_STATUS_UP   1
5653 
5654 	struct hclge_serdes_lb_cmd *req;
5655 	struct hclge_desc desc;
5656 	int mac_link_ret = 0;
5657 	int ret, i = 0;
5658 	u8 loop_mode_b;
5659 
5660 	req = (struct hclge_serdes_lb_cmd *)desc.data;
5661 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5662 
5663 	switch (loop_mode) {
5664 	case HNAE3_LOOP_SERIAL_SERDES:
5665 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5666 		break;
5667 	case HNAE3_LOOP_PARALLEL_SERDES:
5668 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5669 		break;
5670 	default:
5671 		dev_err(&hdev->pdev->dev,
5672 			"unsupported serdes loopback mode %d\n", loop_mode);
5673 		return -ENOTSUPP;
5674 	}
5675 
5676 	if (en) {
5677 		req->enable = loop_mode_b;
5678 		req->mask = loop_mode_b;
5679 		mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5680 	} else {
5681 		req->mask = loop_mode_b;
5682 		mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5683 	}
5684 
5685 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5686 	if (ret) {
5687 		dev_err(&hdev->pdev->dev,
5688 			"serdes loopback set fail, ret = %d\n", ret);
5689 		return ret;
5690 	}
5691 
5692 	do {
5693 		msleep(HCLGE_SERDES_RETRY_MS);
5694 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5695 					   true);
5696 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5697 		if (ret) {
5698 			dev_err(&hdev->pdev->dev,
5699 				"serdes loopback get, ret = %d\n", ret);
5700 			return ret;
5701 		}
5702 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
5703 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5704 
5705 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5706 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5707 		return -EBUSY;
5708 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5709 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5710 		return -EIO;
5711 	}
5712 
5713 	hclge_cfg_mac_mode(hdev, en);
5714 
5715 	i = 0;
5716 	do {
5717 		/* serdes Internal loopback, independent of the network cable.*/
5718 		msleep(HCLGE_MAC_LINK_STATUS_MS);
5719 		ret = hclge_get_mac_link_status(hdev);
5720 		if (ret == mac_link_ret)
5721 			return 0;
5722 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5723 
5724 	dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5725 
5726 	return -EBUSY;
5727 }
5728 
5729 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5730 			    int stream_id, bool enable)
5731 {
5732 	struct hclge_desc desc;
5733 	struct hclge_cfg_com_tqp_queue_cmd *req =
5734 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5735 	int ret;
5736 
5737 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5738 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5739 	req->stream_id = cpu_to_le16(stream_id);
5740 	req->enable |= enable << HCLGE_TQP_ENABLE_B;
5741 
5742 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5743 	if (ret)
5744 		dev_err(&hdev->pdev->dev,
5745 			"Tqp enable fail, status =%d.\n", ret);
5746 	return ret;
5747 }
5748 
5749 static int hclge_set_loopback(struct hnae3_handle *handle,
5750 			      enum hnae3_loop loop_mode, bool en)
5751 {
5752 	struct hclge_vport *vport = hclge_get_vport(handle);
5753 	struct hnae3_knic_private_info *kinfo;
5754 	struct hclge_dev *hdev = vport->back;
5755 	int i, ret;
5756 
5757 	switch (loop_mode) {
5758 	case HNAE3_LOOP_APP:
5759 		ret = hclge_set_app_loopback(hdev, en);
5760 		break;
5761 	case HNAE3_LOOP_SERIAL_SERDES:
5762 	case HNAE3_LOOP_PARALLEL_SERDES:
5763 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5764 		break;
5765 	default:
5766 		ret = -ENOTSUPP;
5767 		dev_err(&hdev->pdev->dev,
5768 			"loop_mode %d is not supported\n", loop_mode);
5769 		break;
5770 	}
5771 
5772 	if (ret)
5773 		return ret;
5774 
5775 	kinfo = &vport->nic.kinfo;
5776 	for (i = 0; i < kinfo->num_tqps; i++) {
5777 		ret = hclge_tqp_enable(hdev, i, 0, en);
5778 		if (ret)
5779 			return ret;
5780 	}
5781 
5782 	return 0;
5783 }
5784 
5785 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5786 {
5787 	struct hclge_vport *vport = hclge_get_vport(handle);
5788 	struct hnae3_knic_private_info *kinfo;
5789 	struct hnae3_queue *queue;
5790 	struct hclge_tqp *tqp;
5791 	int i;
5792 
5793 	kinfo = &vport->nic.kinfo;
5794 	for (i = 0; i < kinfo->num_tqps; i++) {
5795 		queue = handle->kinfo.tqp[i];
5796 		tqp = container_of(queue, struct hclge_tqp, q);
5797 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5798 	}
5799 }
5800 
5801 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
5802 {
5803 	struct hclge_vport *vport = hclge_get_vport(handle);
5804 	struct hclge_dev *hdev = vport->back;
5805 
5806 	if (enable) {
5807 		mod_timer(&hdev->service_timer, jiffies + HZ);
5808 	} else {
5809 		del_timer_sync(&hdev->service_timer);
5810 		cancel_work_sync(&hdev->service_task);
5811 		clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5812 	}
5813 }
5814 
5815 static int hclge_ae_start(struct hnae3_handle *handle)
5816 {
5817 	struct hclge_vport *vport = hclge_get_vport(handle);
5818 	struct hclge_dev *hdev = vport->back;
5819 
5820 	/* mac enable */
5821 	hclge_cfg_mac_mode(hdev, true);
5822 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5823 	hdev->hw.mac.link = 0;
5824 
5825 	/* reset tqp stats */
5826 	hclge_reset_tqp_stats(handle);
5827 
5828 	hclge_mac_start_phy(hdev);
5829 
5830 	return 0;
5831 }
5832 
5833 static void hclge_ae_stop(struct hnae3_handle *handle)
5834 {
5835 	struct hclge_vport *vport = hclge_get_vport(handle);
5836 	struct hclge_dev *hdev = vport->back;
5837 	int i;
5838 
5839 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
5840 
5841 	/* If it is not PF reset, the firmware will disable the MAC,
5842 	 * so it only need to stop phy here.
5843 	 */
5844 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5845 	    hdev->reset_type != HNAE3_FUNC_RESET) {
5846 		hclge_mac_stop_phy(hdev);
5847 		return;
5848 	}
5849 
5850 	for (i = 0; i < handle->kinfo.num_tqps; i++)
5851 		hclge_reset_tqp(handle, i);
5852 
5853 	/* Mac disable */
5854 	hclge_cfg_mac_mode(hdev, false);
5855 
5856 	hclge_mac_stop_phy(hdev);
5857 
5858 	/* reset tqp stats */
5859 	hclge_reset_tqp_stats(handle);
5860 	hclge_update_link_status(hdev);
5861 }
5862 
5863 int hclge_vport_start(struct hclge_vport *vport)
5864 {
5865 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5866 	vport->last_active_jiffies = jiffies;
5867 	return 0;
5868 }
5869 
5870 void hclge_vport_stop(struct hclge_vport *vport)
5871 {
5872 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5873 }
5874 
5875 static int hclge_client_start(struct hnae3_handle *handle)
5876 {
5877 	struct hclge_vport *vport = hclge_get_vport(handle);
5878 
5879 	return hclge_vport_start(vport);
5880 }
5881 
5882 static void hclge_client_stop(struct hnae3_handle *handle)
5883 {
5884 	struct hclge_vport *vport = hclge_get_vport(handle);
5885 
5886 	hclge_vport_stop(vport);
5887 }
5888 
5889 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5890 					 u16 cmdq_resp, u8  resp_code,
5891 					 enum hclge_mac_vlan_tbl_opcode op)
5892 {
5893 	struct hclge_dev *hdev = vport->back;
5894 	int return_status = -EIO;
5895 
5896 	if (cmdq_resp) {
5897 		dev_err(&hdev->pdev->dev,
5898 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5899 			cmdq_resp);
5900 		return -EIO;
5901 	}
5902 
5903 	if (op == HCLGE_MAC_VLAN_ADD) {
5904 		if ((!resp_code) || (resp_code == 1)) {
5905 			return_status = 0;
5906 		} else if (resp_code == 2) {
5907 			return_status = -ENOSPC;
5908 			dev_err(&hdev->pdev->dev,
5909 				"add mac addr failed for uc_overflow.\n");
5910 		} else if (resp_code == 3) {
5911 			return_status = -ENOSPC;
5912 			dev_err(&hdev->pdev->dev,
5913 				"add mac addr failed for mc_overflow.\n");
5914 		} else {
5915 			dev_err(&hdev->pdev->dev,
5916 				"add mac addr failed for undefined, code=%d.\n",
5917 				resp_code);
5918 		}
5919 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
5920 		if (!resp_code) {
5921 			return_status = 0;
5922 		} else if (resp_code == 1) {
5923 			return_status = -ENOENT;
5924 			dev_dbg(&hdev->pdev->dev,
5925 				"remove mac addr failed for miss.\n");
5926 		} else {
5927 			dev_err(&hdev->pdev->dev,
5928 				"remove mac addr failed for undefined, code=%d.\n",
5929 				resp_code);
5930 		}
5931 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
5932 		if (!resp_code) {
5933 			return_status = 0;
5934 		} else if (resp_code == 1) {
5935 			return_status = -ENOENT;
5936 			dev_dbg(&hdev->pdev->dev,
5937 				"lookup mac addr failed for miss.\n");
5938 		} else {
5939 			dev_err(&hdev->pdev->dev,
5940 				"lookup mac addr failed for undefined, code=%d.\n",
5941 				resp_code);
5942 		}
5943 	} else {
5944 		return_status = -EINVAL;
5945 		dev_err(&hdev->pdev->dev,
5946 			"unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5947 			op);
5948 	}
5949 
5950 	return return_status;
5951 }
5952 
5953 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5954 {
5955 	int word_num;
5956 	int bit_num;
5957 
5958 	if (vfid > 255 || vfid < 0)
5959 		return -EIO;
5960 
5961 	if (vfid >= 0 && vfid <= 191) {
5962 		word_num = vfid / 32;
5963 		bit_num  = vfid % 32;
5964 		if (clr)
5965 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5966 		else
5967 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5968 	} else {
5969 		word_num = (vfid - 192) / 32;
5970 		bit_num  = vfid % 32;
5971 		if (clr)
5972 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5973 		else
5974 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5975 	}
5976 
5977 	return 0;
5978 }
5979 
5980 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5981 {
5982 #define HCLGE_DESC_NUMBER 3
5983 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5984 	int i, j;
5985 
5986 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5987 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5988 			if (desc[i].data[j])
5989 				return false;
5990 
5991 	return true;
5992 }
5993 
5994 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5995 				   const u8 *addr, bool is_mc)
5996 {
5997 	const unsigned char *mac_addr = addr;
5998 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5999 		       (mac_addr[0]) | (mac_addr[1] << 8);
6000 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6001 
6002 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6003 	if (is_mc) {
6004 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6005 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6006 	}
6007 
6008 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6009 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6010 }
6011 
6012 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6013 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
6014 {
6015 	struct hclge_dev *hdev = vport->back;
6016 	struct hclge_desc desc;
6017 	u8 resp_code;
6018 	u16 retval;
6019 	int ret;
6020 
6021 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6022 
6023 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6024 
6025 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6026 	if (ret) {
6027 		dev_err(&hdev->pdev->dev,
6028 			"del mac addr failed for cmd_send, ret =%d.\n",
6029 			ret);
6030 		return ret;
6031 	}
6032 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6033 	retval = le16_to_cpu(desc.retval);
6034 
6035 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6036 					     HCLGE_MAC_VLAN_REMOVE);
6037 }
6038 
6039 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6040 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
6041 				     struct hclge_desc *desc,
6042 				     bool is_mc)
6043 {
6044 	struct hclge_dev *hdev = vport->back;
6045 	u8 resp_code;
6046 	u16 retval;
6047 	int ret;
6048 
6049 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6050 	if (is_mc) {
6051 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6052 		memcpy(desc[0].data,
6053 		       req,
6054 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6055 		hclge_cmd_setup_basic_desc(&desc[1],
6056 					   HCLGE_OPC_MAC_VLAN_ADD,
6057 					   true);
6058 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6059 		hclge_cmd_setup_basic_desc(&desc[2],
6060 					   HCLGE_OPC_MAC_VLAN_ADD,
6061 					   true);
6062 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
6063 	} else {
6064 		memcpy(desc[0].data,
6065 		       req,
6066 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6067 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
6068 	}
6069 	if (ret) {
6070 		dev_err(&hdev->pdev->dev,
6071 			"lookup mac addr failed for cmd_send, ret =%d.\n",
6072 			ret);
6073 		return ret;
6074 	}
6075 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6076 	retval = le16_to_cpu(desc[0].retval);
6077 
6078 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6079 					     HCLGE_MAC_VLAN_LKUP);
6080 }
6081 
6082 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6083 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
6084 				  struct hclge_desc *mc_desc)
6085 {
6086 	struct hclge_dev *hdev = vport->back;
6087 	int cfg_status;
6088 	u8 resp_code;
6089 	u16 retval;
6090 	int ret;
6091 
6092 	if (!mc_desc) {
6093 		struct hclge_desc desc;
6094 
6095 		hclge_cmd_setup_basic_desc(&desc,
6096 					   HCLGE_OPC_MAC_VLAN_ADD,
6097 					   false);
6098 		memcpy(desc.data, req,
6099 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6100 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6101 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6102 		retval = le16_to_cpu(desc.retval);
6103 
6104 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6105 							   resp_code,
6106 							   HCLGE_MAC_VLAN_ADD);
6107 	} else {
6108 		hclge_cmd_reuse_desc(&mc_desc[0], false);
6109 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6110 		hclge_cmd_reuse_desc(&mc_desc[1], false);
6111 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6112 		hclge_cmd_reuse_desc(&mc_desc[2], false);
6113 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6114 		memcpy(mc_desc[0].data, req,
6115 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6116 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6117 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6118 		retval = le16_to_cpu(mc_desc[0].retval);
6119 
6120 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6121 							   resp_code,
6122 							   HCLGE_MAC_VLAN_ADD);
6123 	}
6124 
6125 	if (ret) {
6126 		dev_err(&hdev->pdev->dev,
6127 			"add mac addr failed for cmd_send, ret =%d.\n",
6128 			ret);
6129 		return ret;
6130 	}
6131 
6132 	return cfg_status;
6133 }
6134 
6135 static int hclge_init_umv_space(struct hclge_dev *hdev)
6136 {
6137 	u16 allocated_size = 0;
6138 	int ret;
6139 
6140 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6141 				  true);
6142 	if (ret)
6143 		return ret;
6144 
6145 	if (allocated_size < hdev->wanted_umv_size)
6146 		dev_warn(&hdev->pdev->dev,
6147 			 "Alloc umv space failed, want %d, get %d\n",
6148 			 hdev->wanted_umv_size, allocated_size);
6149 
6150 	mutex_init(&hdev->umv_mutex);
6151 	hdev->max_umv_size = allocated_size;
6152 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6153 	hdev->share_umv_size = hdev->priv_umv_size +
6154 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
6155 
6156 	return 0;
6157 }
6158 
6159 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6160 {
6161 	int ret;
6162 
6163 	if (hdev->max_umv_size > 0) {
6164 		ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6165 					  false);
6166 		if (ret)
6167 			return ret;
6168 		hdev->max_umv_size = 0;
6169 	}
6170 	mutex_destroy(&hdev->umv_mutex);
6171 
6172 	return 0;
6173 }
6174 
6175 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6176 			       u16 *allocated_size, bool is_alloc)
6177 {
6178 	struct hclge_umv_spc_alc_cmd *req;
6179 	struct hclge_desc desc;
6180 	int ret;
6181 
6182 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6183 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6184 	hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6185 	req->space_size = cpu_to_le32(space_size);
6186 
6187 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6188 	if (ret) {
6189 		dev_err(&hdev->pdev->dev,
6190 			"%s umv space failed for cmd_send, ret =%d\n",
6191 			is_alloc ? "allocate" : "free", ret);
6192 		return ret;
6193 	}
6194 
6195 	if (is_alloc && allocated_size)
6196 		*allocated_size = le32_to_cpu(desc.data[1]);
6197 
6198 	return 0;
6199 }
6200 
6201 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6202 {
6203 	struct hclge_vport *vport;
6204 	int i;
6205 
6206 	for (i = 0; i < hdev->num_alloc_vport; i++) {
6207 		vport = &hdev->vport[i];
6208 		vport->used_umv_num = 0;
6209 	}
6210 
6211 	mutex_lock(&hdev->umv_mutex);
6212 	hdev->share_umv_size = hdev->priv_umv_size +
6213 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
6214 	mutex_unlock(&hdev->umv_mutex);
6215 }
6216 
6217 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6218 {
6219 	struct hclge_dev *hdev = vport->back;
6220 	bool is_full;
6221 
6222 	mutex_lock(&hdev->umv_mutex);
6223 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6224 		   hdev->share_umv_size == 0);
6225 	mutex_unlock(&hdev->umv_mutex);
6226 
6227 	return is_full;
6228 }
6229 
6230 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6231 {
6232 	struct hclge_dev *hdev = vport->back;
6233 
6234 	mutex_lock(&hdev->umv_mutex);
6235 	if (is_free) {
6236 		if (vport->used_umv_num > hdev->priv_umv_size)
6237 			hdev->share_umv_size++;
6238 
6239 		if (vport->used_umv_num > 0)
6240 			vport->used_umv_num--;
6241 	} else {
6242 		if (vport->used_umv_num >= hdev->priv_umv_size &&
6243 		    hdev->share_umv_size > 0)
6244 			hdev->share_umv_size--;
6245 		vport->used_umv_num++;
6246 	}
6247 	mutex_unlock(&hdev->umv_mutex);
6248 }
6249 
6250 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6251 			     const unsigned char *addr)
6252 {
6253 	struct hclge_vport *vport = hclge_get_vport(handle);
6254 
6255 	return hclge_add_uc_addr_common(vport, addr);
6256 }
6257 
6258 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6259 			     const unsigned char *addr)
6260 {
6261 	struct hclge_dev *hdev = vport->back;
6262 	struct hclge_mac_vlan_tbl_entry_cmd req;
6263 	struct hclge_desc desc;
6264 	u16 egress_port = 0;
6265 	int ret;
6266 
6267 	/* mac addr check */
6268 	if (is_zero_ether_addr(addr) ||
6269 	    is_broadcast_ether_addr(addr) ||
6270 	    is_multicast_ether_addr(addr)) {
6271 		dev_err(&hdev->pdev->dev,
6272 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6273 			 addr,
6274 			 is_zero_ether_addr(addr),
6275 			 is_broadcast_ether_addr(addr),
6276 			 is_multicast_ether_addr(addr));
6277 		return -EINVAL;
6278 	}
6279 
6280 	memset(&req, 0, sizeof(req));
6281 
6282 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6283 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6284 
6285 	req.egress_port = cpu_to_le16(egress_port);
6286 
6287 	hclge_prepare_mac_addr(&req, addr, false);
6288 
6289 	/* Lookup the mac address in the mac_vlan table, and add
6290 	 * it if the entry is inexistent. Repeated unicast entry
6291 	 * is not allowed in the mac vlan table.
6292 	 */
6293 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6294 	if (ret == -ENOENT) {
6295 		if (!hclge_is_umv_space_full(vport)) {
6296 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6297 			if (!ret)
6298 				hclge_update_umv_space(vport, false);
6299 			return ret;
6300 		}
6301 
6302 		dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6303 			hdev->priv_umv_size);
6304 
6305 		return -ENOSPC;
6306 	}
6307 
6308 	/* check if we just hit the duplicate */
6309 	if (!ret) {
6310 		dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6311 			 vport->vport_id, addr);
6312 		return 0;
6313 	}
6314 
6315 	dev_err(&hdev->pdev->dev,
6316 		"PF failed to add unicast entry(%pM) in the MAC table\n",
6317 		addr);
6318 
6319 	return ret;
6320 }
6321 
6322 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6323 			    const unsigned char *addr)
6324 {
6325 	struct hclge_vport *vport = hclge_get_vport(handle);
6326 
6327 	return hclge_rm_uc_addr_common(vport, addr);
6328 }
6329 
6330 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6331 			    const unsigned char *addr)
6332 {
6333 	struct hclge_dev *hdev = vport->back;
6334 	struct hclge_mac_vlan_tbl_entry_cmd req;
6335 	int ret;
6336 
6337 	/* mac addr check */
6338 	if (is_zero_ether_addr(addr) ||
6339 	    is_broadcast_ether_addr(addr) ||
6340 	    is_multicast_ether_addr(addr)) {
6341 		dev_dbg(&hdev->pdev->dev,
6342 			"Remove mac err! invalid mac:%pM.\n",
6343 			 addr);
6344 		return -EINVAL;
6345 	}
6346 
6347 	memset(&req, 0, sizeof(req));
6348 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6349 	hclge_prepare_mac_addr(&req, addr, false);
6350 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
6351 	if (!ret)
6352 		hclge_update_umv_space(vport, true);
6353 
6354 	return ret;
6355 }
6356 
6357 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6358 			     const unsigned char *addr)
6359 {
6360 	struct hclge_vport *vport = hclge_get_vport(handle);
6361 
6362 	return hclge_add_mc_addr_common(vport, addr);
6363 }
6364 
6365 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6366 			     const unsigned char *addr)
6367 {
6368 	struct hclge_dev *hdev = vport->back;
6369 	struct hclge_mac_vlan_tbl_entry_cmd req;
6370 	struct hclge_desc desc[3];
6371 	int status;
6372 
6373 	/* mac addr check */
6374 	if (!is_multicast_ether_addr(addr)) {
6375 		dev_err(&hdev->pdev->dev,
6376 			"Add mc mac err! invalid mac:%pM.\n",
6377 			 addr);
6378 		return -EINVAL;
6379 	}
6380 	memset(&req, 0, sizeof(req));
6381 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6382 	hclge_prepare_mac_addr(&req, addr, true);
6383 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6384 	if (!status) {
6385 		/* This mac addr exist, update VFID for it */
6386 		hclge_update_desc_vfid(desc, vport->vport_id, false);
6387 		status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6388 	} else {
6389 		/* This mac addr do not exist, add new entry for it */
6390 		memset(desc[0].data, 0, sizeof(desc[0].data));
6391 		memset(desc[1].data, 0, sizeof(desc[0].data));
6392 		memset(desc[2].data, 0, sizeof(desc[0].data));
6393 		hclge_update_desc_vfid(desc, vport->vport_id, false);
6394 		status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6395 	}
6396 
6397 	if (status == -ENOSPC)
6398 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6399 
6400 	return status;
6401 }
6402 
6403 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6404 			    const unsigned char *addr)
6405 {
6406 	struct hclge_vport *vport = hclge_get_vport(handle);
6407 
6408 	return hclge_rm_mc_addr_common(vport, addr);
6409 }
6410 
6411 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6412 			    const unsigned char *addr)
6413 {
6414 	struct hclge_dev *hdev = vport->back;
6415 	struct hclge_mac_vlan_tbl_entry_cmd req;
6416 	enum hclge_cmd_status status;
6417 	struct hclge_desc desc[3];
6418 
6419 	/* mac addr check */
6420 	if (!is_multicast_ether_addr(addr)) {
6421 		dev_dbg(&hdev->pdev->dev,
6422 			"Remove mc mac err! invalid mac:%pM.\n",
6423 			 addr);
6424 		return -EINVAL;
6425 	}
6426 
6427 	memset(&req, 0, sizeof(req));
6428 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6429 	hclge_prepare_mac_addr(&req, addr, true);
6430 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6431 	if (!status) {
6432 		/* This mac addr exist, remove this handle's VFID for it */
6433 		hclge_update_desc_vfid(desc, vport->vport_id, true);
6434 
6435 		if (hclge_is_all_function_id_zero(desc))
6436 			/* All the vfid is zero, so need to delete this entry */
6437 			status = hclge_remove_mac_vlan_tbl(vport, &req);
6438 		else
6439 			/* Not all the vfid is zero, update the vfid */
6440 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6441 
6442 	} else {
6443 		/* Maybe this mac address is in mta table, but it cannot be
6444 		 * deleted here because an entry of mta represents an address
6445 		 * range rather than a specific address. the delete action to
6446 		 * all entries will take effect in update_mta_status called by
6447 		 * hns3_nic_set_rx_mode.
6448 		 */
6449 		status = 0;
6450 	}
6451 
6452 	return status;
6453 }
6454 
6455 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6456 			       enum HCLGE_MAC_ADDR_TYPE mac_type)
6457 {
6458 	struct hclge_vport_mac_addr_cfg *mac_cfg;
6459 	struct list_head *list;
6460 
6461 	if (!vport->vport_id)
6462 		return;
6463 
6464 	mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6465 	if (!mac_cfg)
6466 		return;
6467 
6468 	mac_cfg->hd_tbl_status = true;
6469 	memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6470 
6471 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6472 	       &vport->uc_mac_list : &vport->mc_mac_list;
6473 
6474 	list_add_tail(&mac_cfg->node, list);
6475 }
6476 
6477 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6478 			      bool is_write_tbl,
6479 			      enum HCLGE_MAC_ADDR_TYPE mac_type)
6480 {
6481 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6482 	struct list_head *list;
6483 	bool uc_flag, mc_flag;
6484 
6485 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6486 	       &vport->uc_mac_list : &vport->mc_mac_list;
6487 
6488 	uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6489 	mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6490 
6491 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6492 		if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6493 			if (uc_flag && mac_cfg->hd_tbl_status)
6494 				hclge_rm_uc_addr_common(vport, mac_addr);
6495 
6496 			if (mc_flag && mac_cfg->hd_tbl_status)
6497 				hclge_rm_mc_addr_common(vport, mac_addr);
6498 
6499 			list_del(&mac_cfg->node);
6500 			kfree(mac_cfg);
6501 			break;
6502 		}
6503 	}
6504 }
6505 
6506 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6507 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
6508 {
6509 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6510 	struct list_head *list;
6511 
6512 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6513 	       &vport->uc_mac_list : &vport->mc_mac_list;
6514 
6515 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6516 		if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6517 			hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6518 
6519 		if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6520 			hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6521 
6522 		mac_cfg->hd_tbl_status = false;
6523 		if (is_del_list) {
6524 			list_del(&mac_cfg->node);
6525 			kfree(mac_cfg);
6526 		}
6527 	}
6528 }
6529 
6530 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6531 {
6532 	struct hclge_vport_mac_addr_cfg *mac, *tmp;
6533 	struct hclge_vport *vport;
6534 	int i;
6535 
6536 	mutex_lock(&hdev->vport_cfg_mutex);
6537 	for (i = 0; i < hdev->num_alloc_vport; i++) {
6538 		vport = &hdev->vport[i];
6539 		list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6540 			list_del(&mac->node);
6541 			kfree(mac);
6542 		}
6543 
6544 		list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6545 			list_del(&mac->node);
6546 			kfree(mac);
6547 		}
6548 	}
6549 	mutex_unlock(&hdev->vport_cfg_mutex);
6550 }
6551 
6552 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6553 					      u16 cmdq_resp, u8 resp_code)
6554 {
6555 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
6556 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
6557 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
6558 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
6559 
6560 	int return_status;
6561 
6562 	if (cmdq_resp) {
6563 		dev_err(&hdev->pdev->dev,
6564 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6565 			cmdq_resp);
6566 		return -EIO;
6567 	}
6568 
6569 	switch (resp_code) {
6570 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
6571 	case HCLGE_ETHERTYPE_ALREADY_ADD:
6572 		return_status = 0;
6573 		break;
6574 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6575 		dev_err(&hdev->pdev->dev,
6576 			"add mac ethertype failed for manager table overflow.\n");
6577 		return_status = -EIO;
6578 		break;
6579 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
6580 		dev_err(&hdev->pdev->dev,
6581 			"add mac ethertype failed for key conflict.\n");
6582 		return_status = -EIO;
6583 		break;
6584 	default:
6585 		dev_err(&hdev->pdev->dev,
6586 			"add mac ethertype failed for undefined, code=%d.\n",
6587 			resp_code);
6588 		return_status = -EIO;
6589 	}
6590 
6591 	return return_status;
6592 }
6593 
6594 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6595 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
6596 {
6597 	struct hclge_desc desc;
6598 	u8 resp_code;
6599 	u16 retval;
6600 	int ret;
6601 
6602 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6603 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6604 
6605 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6606 	if (ret) {
6607 		dev_err(&hdev->pdev->dev,
6608 			"add mac ethertype failed for cmd_send, ret =%d.\n",
6609 			ret);
6610 		return ret;
6611 	}
6612 
6613 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6614 	retval = le16_to_cpu(desc.retval);
6615 
6616 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6617 }
6618 
6619 static int init_mgr_tbl(struct hclge_dev *hdev)
6620 {
6621 	int ret;
6622 	int i;
6623 
6624 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6625 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6626 		if (ret) {
6627 			dev_err(&hdev->pdev->dev,
6628 				"add mac ethertype failed, ret =%d.\n",
6629 				ret);
6630 			return ret;
6631 		}
6632 	}
6633 
6634 	return 0;
6635 }
6636 
6637 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6638 {
6639 	struct hclge_vport *vport = hclge_get_vport(handle);
6640 	struct hclge_dev *hdev = vport->back;
6641 
6642 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
6643 }
6644 
6645 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6646 			      bool is_first)
6647 {
6648 	const unsigned char *new_addr = (const unsigned char *)p;
6649 	struct hclge_vport *vport = hclge_get_vport(handle);
6650 	struct hclge_dev *hdev = vport->back;
6651 	int ret;
6652 
6653 	/* mac addr check */
6654 	if (is_zero_ether_addr(new_addr) ||
6655 	    is_broadcast_ether_addr(new_addr) ||
6656 	    is_multicast_ether_addr(new_addr)) {
6657 		dev_err(&hdev->pdev->dev,
6658 			"Change uc mac err! invalid mac:%p.\n",
6659 			 new_addr);
6660 		return -EINVAL;
6661 	}
6662 
6663 	if ((!is_first || is_kdump_kernel()) &&
6664 	    hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6665 		dev_warn(&hdev->pdev->dev,
6666 			 "remove old uc mac address fail.\n");
6667 
6668 	ret = hclge_add_uc_addr(handle, new_addr);
6669 	if (ret) {
6670 		dev_err(&hdev->pdev->dev,
6671 			"add uc mac address fail, ret =%d.\n",
6672 			ret);
6673 
6674 		if (!is_first &&
6675 		    hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6676 			dev_err(&hdev->pdev->dev,
6677 				"restore uc mac address fail.\n");
6678 
6679 		return -EIO;
6680 	}
6681 
6682 	ret = hclge_pause_addr_cfg(hdev, new_addr);
6683 	if (ret) {
6684 		dev_err(&hdev->pdev->dev,
6685 			"configure mac pause address fail, ret =%d.\n",
6686 			ret);
6687 		return -EIO;
6688 	}
6689 
6690 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6691 
6692 	return 0;
6693 }
6694 
6695 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6696 			  int cmd)
6697 {
6698 	struct hclge_vport *vport = hclge_get_vport(handle);
6699 	struct hclge_dev *hdev = vport->back;
6700 
6701 	if (!hdev->hw.mac.phydev)
6702 		return -EOPNOTSUPP;
6703 
6704 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6705 }
6706 
6707 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6708 				      u8 fe_type, bool filter_en, u8 vf_id)
6709 {
6710 	struct hclge_vlan_filter_ctrl_cmd *req;
6711 	struct hclge_desc desc;
6712 	int ret;
6713 
6714 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6715 
6716 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6717 	req->vlan_type = vlan_type;
6718 	req->vlan_fe = filter_en ? fe_type : 0;
6719 	req->vf_id = vf_id;
6720 
6721 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6722 	if (ret)
6723 		dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6724 			ret);
6725 
6726 	return ret;
6727 }
6728 
6729 #define HCLGE_FILTER_TYPE_VF		0
6730 #define HCLGE_FILTER_TYPE_PORT		1
6731 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
6732 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
6733 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
6734 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
6735 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
6736 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
6737 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
6738 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
6739 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
6740 
6741 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6742 {
6743 	struct hclge_vport *vport = hclge_get_vport(handle);
6744 	struct hclge_dev *hdev = vport->back;
6745 
6746 	if (hdev->pdev->revision >= 0x21) {
6747 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6748 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
6749 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6750 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
6751 	} else {
6752 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6753 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
6754 					   0);
6755 	}
6756 	if (enable)
6757 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
6758 	else
6759 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6760 }
6761 
6762 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6763 				    bool is_kill, u16 vlan, u8 qos,
6764 				    __be16 proto)
6765 {
6766 #define HCLGE_MAX_VF_BYTES  16
6767 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
6768 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
6769 	struct hclge_desc desc[2];
6770 	u8 vf_byte_val;
6771 	u8 vf_byte_off;
6772 	int ret;
6773 
6774 	hclge_cmd_setup_basic_desc(&desc[0],
6775 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6776 	hclge_cmd_setup_basic_desc(&desc[1],
6777 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6778 
6779 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6780 
6781 	vf_byte_off = vfid / 8;
6782 	vf_byte_val = 1 << (vfid % 8);
6783 
6784 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6785 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6786 
6787 	req0->vlan_id  = cpu_to_le16(vlan);
6788 	req0->vlan_cfg = is_kill;
6789 
6790 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6791 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6792 	else
6793 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6794 
6795 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
6796 	if (ret) {
6797 		dev_err(&hdev->pdev->dev,
6798 			"Send vf vlan command fail, ret =%d.\n",
6799 			ret);
6800 		return ret;
6801 	}
6802 
6803 	if (!is_kill) {
6804 #define HCLGE_VF_VLAN_NO_ENTRY	2
6805 		if (!req0->resp_code || req0->resp_code == 1)
6806 			return 0;
6807 
6808 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6809 			dev_warn(&hdev->pdev->dev,
6810 				 "vf vlan table is full, vf vlan filter is disabled\n");
6811 			return 0;
6812 		}
6813 
6814 		dev_err(&hdev->pdev->dev,
6815 			"Add vf vlan filter fail, ret =%d.\n",
6816 			req0->resp_code);
6817 	} else {
6818 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
6819 		if (!req0->resp_code)
6820 			return 0;
6821 
6822 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6823 			dev_warn(&hdev->pdev->dev,
6824 				 "vlan %d filter is not in vf vlan table\n",
6825 				 vlan);
6826 			return 0;
6827 		}
6828 
6829 		dev_err(&hdev->pdev->dev,
6830 			"Kill vf vlan filter fail, ret =%d.\n",
6831 			req0->resp_code);
6832 	}
6833 
6834 	return -EIO;
6835 }
6836 
6837 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6838 				      u16 vlan_id, bool is_kill)
6839 {
6840 	struct hclge_vlan_filter_pf_cfg_cmd *req;
6841 	struct hclge_desc desc;
6842 	u8 vlan_offset_byte_val;
6843 	u8 vlan_offset_byte;
6844 	u8 vlan_offset_160;
6845 	int ret;
6846 
6847 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6848 
6849 	vlan_offset_160 = vlan_id / 160;
6850 	vlan_offset_byte = (vlan_id % 160) / 8;
6851 	vlan_offset_byte_val = 1 << (vlan_id % 8);
6852 
6853 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6854 	req->vlan_offset = vlan_offset_160;
6855 	req->vlan_cfg = is_kill;
6856 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6857 
6858 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6859 	if (ret)
6860 		dev_err(&hdev->pdev->dev,
6861 			"port vlan command, send fail, ret =%d.\n", ret);
6862 	return ret;
6863 }
6864 
6865 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6866 				    u16 vport_id, u16 vlan_id, u8 qos,
6867 				    bool is_kill)
6868 {
6869 	u16 vport_idx, vport_num = 0;
6870 	int ret;
6871 
6872 	if (is_kill && !vlan_id)
6873 		return 0;
6874 
6875 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6876 				       0, proto);
6877 	if (ret) {
6878 		dev_err(&hdev->pdev->dev,
6879 			"Set %d vport vlan filter config fail, ret =%d.\n",
6880 			vport_id, ret);
6881 		return ret;
6882 	}
6883 
6884 	/* vlan 0 may be added twice when 8021q module is enabled */
6885 	if (!is_kill && !vlan_id &&
6886 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
6887 		return 0;
6888 
6889 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6890 		dev_err(&hdev->pdev->dev,
6891 			"Add port vlan failed, vport %d is already in vlan %d\n",
6892 			vport_id, vlan_id);
6893 		return -EINVAL;
6894 	}
6895 
6896 	if (is_kill &&
6897 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6898 		dev_err(&hdev->pdev->dev,
6899 			"Delete port vlan failed, vport %d is not in vlan %d\n",
6900 			vport_id, vlan_id);
6901 		return -EINVAL;
6902 	}
6903 
6904 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6905 		vport_num++;
6906 
6907 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6908 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6909 						 is_kill);
6910 
6911 	return ret;
6912 }
6913 
6914 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6915 {
6916 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6917 	struct hclge_vport_vtag_tx_cfg_cmd *req;
6918 	struct hclge_dev *hdev = vport->back;
6919 	struct hclge_desc desc;
6920 	int status;
6921 
6922 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6923 
6924 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6925 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6926 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6927 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6928 		      vcfg->accept_tag1 ? 1 : 0);
6929 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6930 		      vcfg->accept_untag1 ? 1 : 0);
6931 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6932 		      vcfg->accept_tag2 ? 1 : 0);
6933 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6934 		      vcfg->accept_untag2 ? 1 : 0);
6935 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6936 		      vcfg->insert_tag1_en ? 1 : 0);
6937 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6938 		      vcfg->insert_tag2_en ? 1 : 0);
6939 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6940 
6941 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6942 	req->vf_bitmap[req->vf_offset] =
6943 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6944 
6945 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6946 	if (status)
6947 		dev_err(&hdev->pdev->dev,
6948 			"Send port txvlan cfg command fail, ret =%d\n",
6949 			status);
6950 
6951 	return status;
6952 }
6953 
6954 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6955 {
6956 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6957 	struct hclge_vport_vtag_rx_cfg_cmd *req;
6958 	struct hclge_dev *hdev = vport->back;
6959 	struct hclge_desc desc;
6960 	int status;
6961 
6962 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6963 
6964 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6965 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6966 		      vcfg->strip_tag1_en ? 1 : 0);
6967 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6968 		      vcfg->strip_tag2_en ? 1 : 0);
6969 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6970 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
6971 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6972 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
6973 
6974 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6975 	req->vf_bitmap[req->vf_offset] =
6976 		1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6977 
6978 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
6979 	if (status)
6980 		dev_err(&hdev->pdev->dev,
6981 			"Send port rxvlan cfg command fail, ret =%d\n",
6982 			status);
6983 
6984 	return status;
6985 }
6986 
6987 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
6988 				  u16 port_base_vlan_state,
6989 				  u16 vlan_tag)
6990 {
6991 	int ret;
6992 
6993 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6994 		vport->txvlan_cfg.accept_tag1 = true;
6995 		vport->txvlan_cfg.insert_tag1_en = false;
6996 		vport->txvlan_cfg.default_tag1 = 0;
6997 	} else {
6998 		vport->txvlan_cfg.accept_tag1 = false;
6999 		vport->txvlan_cfg.insert_tag1_en = true;
7000 		vport->txvlan_cfg.default_tag1 = vlan_tag;
7001 	}
7002 
7003 	vport->txvlan_cfg.accept_untag1 = true;
7004 
7005 	/* accept_tag2 and accept_untag2 are not supported on
7006 	 * pdev revision(0x20), new revision support them,
7007 	 * this two fields can not be configured by user.
7008 	 */
7009 	vport->txvlan_cfg.accept_tag2 = true;
7010 	vport->txvlan_cfg.accept_untag2 = true;
7011 	vport->txvlan_cfg.insert_tag2_en = false;
7012 	vport->txvlan_cfg.default_tag2 = 0;
7013 
7014 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7015 		vport->rxvlan_cfg.strip_tag1_en = false;
7016 		vport->rxvlan_cfg.strip_tag2_en =
7017 				vport->rxvlan_cfg.rx_vlan_offload_en;
7018 	} else {
7019 		vport->rxvlan_cfg.strip_tag1_en =
7020 				vport->rxvlan_cfg.rx_vlan_offload_en;
7021 		vport->rxvlan_cfg.strip_tag2_en = true;
7022 	}
7023 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7024 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7025 
7026 	ret = hclge_set_vlan_tx_offload_cfg(vport);
7027 	if (ret)
7028 		return ret;
7029 
7030 	return hclge_set_vlan_rx_offload_cfg(vport);
7031 }
7032 
7033 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7034 {
7035 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7036 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7037 	struct hclge_desc desc;
7038 	int status;
7039 
7040 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7041 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7042 	rx_req->ot_fst_vlan_type =
7043 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7044 	rx_req->ot_sec_vlan_type =
7045 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7046 	rx_req->in_fst_vlan_type =
7047 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7048 	rx_req->in_sec_vlan_type =
7049 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7050 
7051 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7052 	if (status) {
7053 		dev_err(&hdev->pdev->dev,
7054 			"Send rxvlan protocol type command fail, ret =%d\n",
7055 			status);
7056 		return status;
7057 	}
7058 
7059 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7060 
7061 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7062 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7063 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7064 
7065 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7066 	if (status)
7067 		dev_err(&hdev->pdev->dev,
7068 			"Send txvlan protocol type command fail, ret =%d\n",
7069 			status);
7070 
7071 	return status;
7072 }
7073 
7074 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7075 {
7076 #define HCLGE_DEF_VLAN_TYPE		0x8100
7077 
7078 	struct hnae3_handle *handle = &hdev->vport[0].nic;
7079 	struct hclge_vport *vport;
7080 	int ret;
7081 	int i;
7082 
7083 	if (hdev->pdev->revision >= 0x21) {
7084 		/* for revision 0x21, vf vlan filter is per function */
7085 		for (i = 0; i < hdev->num_alloc_vport; i++) {
7086 			vport = &hdev->vport[i];
7087 			ret = hclge_set_vlan_filter_ctrl(hdev,
7088 							 HCLGE_FILTER_TYPE_VF,
7089 							 HCLGE_FILTER_FE_EGRESS,
7090 							 true,
7091 							 vport->vport_id);
7092 			if (ret)
7093 				return ret;
7094 		}
7095 
7096 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7097 						 HCLGE_FILTER_FE_INGRESS, true,
7098 						 0);
7099 		if (ret)
7100 			return ret;
7101 	} else {
7102 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7103 						 HCLGE_FILTER_FE_EGRESS_V1_B,
7104 						 true, 0);
7105 		if (ret)
7106 			return ret;
7107 	}
7108 
7109 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
7110 
7111 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7112 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7113 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7114 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7115 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7116 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7117 
7118 	ret = hclge_set_vlan_protocol_type(hdev);
7119 	if (ret)
7120 		return ret;
7121 
7122 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7123 		u16 vlan_tag;
7124 
7125 		vport = &hdev->vport[i];
7126 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7127 
7128 		ret = hclge_vlan_offload_cfg(vport,
7129 					     vport->port_base_vlan_cfg.state,
7130 					     vlan_tag);
7131 		if (ret)
7132 			return ret;
7133 	}
7134 
7135 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7136 }
7137 
7138 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7139 				       bool writen_to_tbl)
7140 {
7141 	struct hclge_vport_vlan_cfg *vlan;
7142 
7143 	/* vlan 0 is reserved */
7144 	if (!vlan_id)
7145 		return;
7146 
7147 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7148 	if (!vlan)
7149 		return;
7150 
7151 	vlan->hd_tbl_status = writen_to_tbl;
7152 	vlan->vlan_id = vlan_id;
7153 
7154 	list_add_tail(&vlan->node, &vport->vlan_list);
7155 }
7156 
7157 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7158 {
7159 	struct hclge_vport_vlan_cfg *vlan, *tmp;
7160 	struct hclge_dev *hdev = vport->back;
7161 	int ret;
7162 
7163 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7164 		if (!vlan->hd_tbl_status) {
7165 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7166 						       vport->vport_id,
7167 						       vlan->vlan_id, 0, false);
7168 			if (ret) {
7169 				dev_err(&hdev->pdev->dev,
7170 					"restore vport vlan list failed, ret=%d\n",
7171 					ret);
7172 				return ret;
7173 			}
7174 		}
7175 		vlan->hd_tbl_status = true;
7176 	}
7177 
7178 	return 0;
7179 }
7180 
7181 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7182 				      bool is_write_tbl)
7183 {
7184 	struct hclge_vport_vlan_cfg *vlan, *tmp;
7185 	struct hclge_dev *hdev = vport->back;
7186 
7187 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7188 		if (vlan->vlan_id == vlan_id) {
7189 			if (is_write_tbl && vlan->hd_tbl_status)
7190 				hclge_set_vlan_filter_hw(hdev,
7191 							 htons(ETH_P_8021Q),
7192 							 vport->vport_id,
7193 							 vlan_id, 0,
7194 							 true);
7195 
7196 			list_del(&vlan->node);
7197 			kfree(vlan);
7198 			break;
7199 		}
7200 	}
7201 }
7202 
7203 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7204 {
7205 	struct hclge_vport_vlan_cfg *vlan, *tmp;
7206 	struct hclge_dev *hdev = vport->back;
7207 
7208 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7209 		if (vlan->hd_tbl_status)
7210 			hclge_set_vlan_filter_hw(hdev,
7211 						 htons(ETH_P_8021Q),
7212 						 vport->vport_id,
7213 						 vlan->vlan_id, 0,
7214 						 true);
7215 
7216 		vlan->hd_tbl_status = false;
7217 		if (is_del_list) {
7218 			list_del(&vlan->node);
7219 			kfree(vlan);
7220 		}
7221 	}
7222 }
7223 
7224 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7225 {
7226 	struct hclge_vport_vlan_cfg *vlan, *tmp;
7227 	struct hclge_vport *vport;
7228 	int i;
7229 
7230 	mutex_lock(&hdev->vport_cfg_mutex);
7231 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7232 		vport = &hdev->vport[i];
7233 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7234 			list_del(&vlan->node);
7235 			kfree(vlan);
7236 		}
7237 	}
7238 	mutex_unlock(&hdev->vport_cfg_mutex);
7239 }
7240 
7241 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7242 {
7243 	struct hclge_vport *vport = hclge_get_vport(handle);
7244 
7245 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7246 		vport->rxvlan_cfg.strip_tag1_en = false;
7247 		vport->rxvlan_cfg.strip_tag2_en = enable;
7248 	} else {
7249 		vport->rxvlan_cfg.strip_tag1_en = enable;
7250 		vport->rxvlan_cfg.strip_tag2_en = true;
7251 	}
7252 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7253 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7254 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7255 
7256 	return hclge_set_vlan_rx_offload_cfg(vport);
7257 }
7258 
7259 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7260 					    u16 port_base_vlan_state,
7261 					    struct hclge_vlan_info *new_info,
7262 					    struct hclge_vlan_info *old_info)
7263 {
7264 	struct hclge_dev *hdev = vport->back;
7265 	int ret;
7266 
7267 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7268 		hclge_rm_vport_all_vlan_table(vport, false);
7269 		return hclge_set_vlan_filter_hw(hdev,
7270 						 htons(new_info->vlan_proto),
7271 						 vport->vport_id,
7272 						 new_info->vlan_tag,
7273 						 new_info->qos, false);
7274 	}
7275 
7276 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7277 				       vport->vport_id, old_info->vlan_tag,
7278 				       old_info->qos, true);
7279 	if (ret)
7280 		return ret;
7281 
7282 	return hclge_add_vport_all_vlan_table(vport);
7283 }
7284 
7285 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7286 				    struct hclge_vlan_info *vlan_info)
7287 {
7288 	struct hnae3_handle *nic = &vport->nic;
7289 	struct hclge_vlan_info *old_vlan_info;
7290 	struct hclge_dev *hdev = vport->back;
7291 	int ret;
7292 
7293 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7294 
7295 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7296 	if (ret)
7297 		return ret;
7298 
7299 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7300 		/* add new VLAN tag */
7301 		ret = hclge_set_vlan_filter_hw(hdev,
7302 					       htons(vlan_info->vlan_proto),
7303 					       vport->vport_id,
7304 					       vlan_info->vlan_tag,
7305 					       vlan_info->qos, false);
7306 		if (ret)
7307 			return ret;
7308 
7309 		/* remove old VLAN tag */
7310 		ret = hclge_set_vlan_filter_hw(hdev,
7311 					       htons(old_vlan_info->vlan_proto),
7312 					       vport->vport_id,
7313 					       old_vlan_info->vlan_tag,
7314 					       old_vlan_info->qos, true);
7315 		if (ret)
7316 			return ret;
7317 
7318 		goto update;
7319 	}
7320 
7321 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7322 					       old_vlan_info);
7323 	if (ret)
7324 		return ret;
7325 
7326 	/* update state only when disable/enable port based VLAN */
7327 	vport->port_base_vlan_cfg.state = state;
7328 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7329 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7330 	else
7331 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7332 
7333 update:
7334 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7335 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7336 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7337 
7338 	return 0;
7339 }
7340 
7341 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7342 					  enum hnae3_port_base_vlan_state state,
7343 					  u16 vlan)
7344 {
7345 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7346 		if (!vlan)
7347 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7348 		else
7349 			return HNAE3_PORT_BASE_VLAN_ENABLE;
7350 	} else {
7351 		if (!vlan)
7352 			return HNAE3_PORT_BASE_VLAN_DISABLE;
7353 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7354 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7355 		else
7356 			return HNAE3_PORT_BASE_VLAN_MODIFY;
7357 	}
7358 }
7359 
7360 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7361 				    u16 vlan, u8 qos, __be16 proto)
7362 {
7363 	struct hclge_vport *vport = hclge_get_vport(handle);
7364 	struct hclge_dev *hdev = vport->back;
7365 	struct hclge_vlan_info vlan_info;
7366 	u16 state;
7367 	int ret;
7368 
7369 	if (hdev->pdev->revision == 0x20)
7370 		return -EOPNOTSUPP;
7371 
7372 	/* qos is a 3 bits value, so can not be bigger than 7 */
7373 	if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7374 		return -EINVAL;
7375 	if (proto != htons(ETH_P_8021Q))
7376 		return -EPROTONOSUPPORT;
7377 
7378 	vport = &hdev->vport[vfid];
7379 	state = hclge_get_port_base_vlan_state(vport,
7380 					       vport->port_base_vlan_cfg.state,
7381 					       vlan);
7382 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7383 		return 0;
7384 
7385 	vlan_info.vlan_tag = vlan;
7386 	vlan_info.qos = qos;
7387 	vlan_info.vlan_proto = ntohs(proto);
7388 
7389 	/* update port based VLAN for PF */
7390 	if (!vfid) {
7391 		hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7392 		ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7393 		hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7394 
7395 		return ret;
7396 	}
7397 
7398 	if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7399 		return hclge_update_port_base_vlan_cfg(vport, state,
7400 						       &vlan_info);
7401 	} else {
7402 		ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7403 							(u8)vfid, state,
7404 							vlan, qos,
7405 							ntohs(proto));
7406 		return ret;
7407 	}
7408 }
7409 
7410 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7411 			  u16 vlan_id, bool is_kill)
7412 {
7413 	struct hclge_vport *vport = hclge_get_vport(handle);
7414 	struct hclge_dev *hdev = vport->back;
7415 	bool writen_to_tbl = false;
7416 	int ret = 0;
7417 
7418 	/* when port based VLAN enabled, we use port based VLAN as the VLAN
7419 	 * filter entry. In this case, we don't update VLAN filter table
7420 	 * when user add new VLAN or remove exist VLAN, just update the vport
7421 	 * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7422 	 * table until port based VLAN disabled
7423 	 */
7424 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7425 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7426 					       vlan_id, 0, is_kill);
7427 		writen_to_tbl = true;
7428 	}
7429 
7430 	if (ret)
7431 		return ret;
7432 
7433 	if (is_kill)
7434 		hclge_rm_vport_vlan_table(vport, vlan_id, false);
7435 	else
7436 		hclge_add_vport_vlan_table(vport, vlan_id,
7437 					   writen_to_tbl);
7438 
7439 	return 0;
7440 }
7441 
7442 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7443 {
7444 	struct hclge_config_max_frm_size_cmd *req;
7445 	struct hclge_desc desc;
7446 
7447 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7448 
7449 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7450 	req->max_frm_size = cpu_to_le16(new_mps);
7451 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7452 
7453 	return hclge_cmd_send(&hdev->hw, &desc, 1);
7454 }
7455 
7456 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7457 {
7458 	struct hclge_vport *vport = hclge_get_vport(handle);
7459 
7460 	return hclge_set_vport_mtu(vport, new_mtu);
7461 }
7462 
7463 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7464 {
7465 	struct hclge_dev *hdev = vport->back;
7466 	int i, max_frm_size, ret = 0;
7467 
7468 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7469 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7470 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
7471 		return -EINVAL;
7472 
7473 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7474 	mutex_lock(&hdev->vport_lock);
7475 	/* VF's mps must fit within hdev->mps */
7476 	if (vport->vport_id && max_frm_size > hdev->mps) {
7477 		mutex_unlock(&hdev->vport_lock);
7478 		return -EINVAL;
7479 	} else if (vport->vport_id) {
7480 		vport->mps = max_frm_size;
7481 		mutex_unlock(&hdev->vport_lock);
7482 		return 0;
7483 	}
7484 
7485 	/* PF's mps must be greater then VF's mps */
7486 	for (i = 1; i < hdev->num_alloc_vport; i++)
7487 		if (max_frm_size < hdev->vport[i].mps) {
7488 			mutex_unlock(&hdev->vport_lock);
7489 			return -EINVAL;
7490 		}
7491 
7492 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7493 
7494 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
7495 	if (ret) {
7496 		dev_err(&hdev->pdev->dev,
7497 			"Change mtu fail, ret =%d\n", ret);
7498 		goto out;
7499 	}
7500 
7501 	hdev->mps = max_frm_size;
7502 	vport->mps = max_frm_size;
7503 
7504 	ret = hclge_buffer_alloc(hdev);
7505 	if (ret)
7506 		dev_err(&hdev->pdev->dev,
7507 			"Allocate buffer fail, ret =%d\n", ret);
7508 
7509 out:
7510 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7511 	mutex_unlock(&hdev->vport_lock);
7512 	return ret;
7513 }
7514 
7515 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7516 				    bool enable)
7517 {
7518 	struct hclge_reset_tqp_queue_cmd *req;
7519 	struct hclge_desc desc;
7520 	int ret;
7521 
7522 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7523 
7524 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7525 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7526 	hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7527 
7528 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7529 	if (ret) {
7530 		dev_err(&hdev->pdev->dev,
7531 			"Send tqp reset cmd error, status =%d\n", ret);
7532 		return ret;
7533 	}
7534 
7535 	return 0;
7536 }
7537 
7538 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7539 {
7540 	struct hclge_reset_tqp_queue_cmd *req;
7541 	struct hclge_desc desc;
7542 	int ret;
7543 
7544 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7545 
7546 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7547 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7548 
7549 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7550 	if (ret) {
7551 		dev_err(&hdev->pdev->dev,
7552 			"Get reset status error, status =%d\n", ret);
7553 		return ret;
7554 	}
7555 
7556 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7557 }
7558 
7559 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7560 {
7561 	struct hnae3_queue *queue;
7562 	struct hclge_tqp *tqp;
7563 
7564 	queue = handle->kinfo.tqp[queue_id];
7565 	tqp = container_of(queue, struct hclge_tqp, q);
7566 
7567 	return tqp->index;
7568 }
7569 
7570 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7571 {
7572 	struct hclge_vport *vport = hclge_get_vport(handle);
7573 	struct hclge_dev *hdev = vport->back;
7574 	int reset_try_times = 0;
7575 	int reset_status;
7576 	u16 queue_gid;
7577 	int ret = 0;
7578 
7579 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7580 
7581 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7582 	if (ret) {
7583 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7584 		return ret;
7585 	}
7586 
7587 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7588 	if (ret) {
7589 		dev_err(&hdev->pdev->dev,
7590 			"Send reset tqp cmd fail, ret = %d\n", ret);
7591 		return ret;
7592 	}
7593 
7594 	reset_try_times = 0;
7595 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7596 		/* Wait for tqp hw reset */
7597 		msleep(20);
7598 		reset_status = hclge_get_reset_status(hdev, queue_gid);
7599 		if (reset_status)
7600 			break;
7601 	}
7602 
7603 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7604 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7605 		return ret;
7606 	}
7607 
7608 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7609 	if (ret)
7610 		dev_err(&hdev->pdev->dev,
7611 			"Deassert the soft reset fail, ret = %d\n", ret);
7612 
7613 	return ret;
7614 }
7615 
7616 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7617 {
7618 	struct hclge_dev *hdev = vport->back;
7619 	int reset_try_times = 0;
7620 	int reset_status;
7621 	u16 queue_gid;
7622 	int ret;
7623 
7624 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7625 
7626 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7627 	if (ret) {
7628 		dev_warn(&hdev->pdev->dev,
7629 			 "Send reset tqp cmd fail, ret = %d\n", ret);
7630 		return;
7631 	}
7632 
7633 	reset_try_times = 0;
7634 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7635 		/* Wait for tqp hw reset */
7636 		msleep(20);
7637 		reset_status = hclge_get_reset_status(hdev, queue_gid);
7638 		if (reset_status)
7639 			break;
7640 	}
7641 
7642 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7643 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7644 		return;
7645 	}
7646 
7647 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7648 	if (ret)
7649 		dev_warn(&hdev->pdev->dev,
7650 			 "Deassert the soft reset fail, ret = %d\n", ret);
7651 }
7652 
7653 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7654 {
7655 	struct hclge_vport *vport = hclge_get_vport(handle);
7656 	struct hclge_dev *hdev = vport->back;
7657 
7658 	return hdev->fw_version;
7659 }
7660 
7661 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7662 {
7663 	struct phy_device *phydev = hdev->hw.mac.phydev;
7664 
7665 	if (!phydev)
7666 		return;
7667 
7668 	phy_set_asym_pause(phydev, rx_en, tx_en);
7669 }
7670 
7671 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7672 {
7673 	int ret;
7674 
7675 	if (rx_en && tx_en)
7676 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
7677 	else if (rx_en && !tx_en)
7678 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7679 	else if (!rx_en && tx_en)
7680 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7681 	else
7682 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
7683 
7684 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7685 		return 0;
7686 
7687 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7688 	if (ret) {
7689 		dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7690 			ret);
7691 		return ret;
7692 	}
7693 
7694 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7695 
7696 	return 0;
7697 }
7698 
7699 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7700 {
7701 	struct phy_device *phydev = hdev->hw.mac.phydev;
7702 	u16 remote_advertising = 0;
7703 	u16 local_advertising = 0;
7704 	u32 rx_pause, tx_pause;
7705 	u8 flowctl;
7706 
7707 	if (!phydev->link || !phydev->autoneg)
7708 		return 0;
7709 
7710 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7711 
7712 	if (phydev->pause)
7713 		remote_advertising = LPA_PAUSE_CAP;
7714 
7715 	if (phydev->asym_pause)
7716 		remote_advertising |= LPA_PAUSE_ASYM;
7717 
7718 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7719 					   remote_advertising);
7720 	tx_pause = flowctl & FLOW_CTRL_TX;
7721 	rx_pause = flowctl & FLOW_CTRL_RX;
7722 
7723 	if (phydev->duplex == HCLGE_MAC_HALF) {
7724 		tx_pause = 0;
7725 		rx_pause = 0;
7726 	}
7727 
7728 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
7729 }
7730 
7731 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
7732 				 u32 *rx_en, u32 *tx_en)
7733 {
7734 	struct hclge_vport *vport = hclge_get_vport(handle);
7735 	struct hclge_dev *hdev = vport->back;
7736 
7737 	*auto_neg = hclge_get_autoneg(handle);
7738 
7739 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7740 		*rx_en = 0;
7741 		*tx_en = 0;
7742 		return;
7743 	}
7744 
7745 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
7746 		*rx_en = 1;
7747 		*tx_en = 0;
7748 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
7749 		*tx_en = 1;
7750 		*rx_en = 0;
7751 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
7752 		*rx_en = 1;
7753 		*tx_en = 1;
7754 	} else {
7755 		*rx_en = 0;
7756 		*tx_en = 0;
7757 	}
7758 }
7759 
7760 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
7761 				u32 rx_en, u32 tx_en)
7762 {
7763 	struct hclge_vport *vport = hclge_get_vport(handle);
7764 	struct hclge_dev *hdev = vport->back;
7765 	struct phy_device *phydev = hdev->hw.mac.phydev;
7766 	u32 fc_autoneg;
7767 
7768 	fc_autoneg = hclge_get_autoneg(handle);
7769 	if (auto_neg != fc_autoneg) {
7770 		dev_info(&hdev->pdev->dev,
7771 			 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
7772 		return -EOPNOTSUPP;
7773 	}
7774 
7775 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7776 		dev_info(&hdev->pdev->dev,
7777 			 "Priority flow control enabled. Cannot set link flow control.\n");
7778 		return -EOPNOTSUPP;
7779 	}
7780 
7781 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
7782 
7783 	if (!fc_autoneg)
7784 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
7785 
7786 	if (phydev)
7787 		return phy_start_aneg(phydev);
7788 
7789 	if (hdev->pdev->revision == 0x20)
7790 		return -EOPNOTSUPP;
7791 
7792 	return hclge_restart_autoneg(handle);
7793 }
7794 
7795 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
7796 					  u8 *auto_neg, u32 *speed, u8 *duplex)
7797 {
7798 	struct hclge_vport *vport = hclge_get_vport(handle);
7799 	struct hclge_dev *hdev = vport->back;
7800 
7801 	if (speed)
7802 		*speed = hdev->hw.mac.speed;
7803 	if (duplex)
7804 		*duplex = hdev->hw.mac.duplex;
7805 	if (auto_neg)
7806 		*auto_neg = hdev->hw.mac.autoneg;
7807 }
7808 
7809 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
7810 				 u8 *module_type)
7811 {
7812 	struct hclge_vport *vport = hclge_get_vport(handle);
7813 	struct hclge_dev *hdev = vport->back;
7814 
7815 	if (media_type)
7816 		*media_type = hdev->hw.mac.media_type;
7817 
7818 	if (module_type)
7819 		*module_type = hdev->hw.mac.module_type;
7820 }
7821 
7822 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
7823 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
7824 {
7825 	struct hclge_vport *vport = hclge_get_vport(handle);
7826 	struct hclge_dev *hdev = vport->back;
7827 	struct phy_device *phydev = hdev->hw.mac.phydev;
7828 	int mdix_ctrl, mdix, retval, is_resolved;
7829 
7830 	if (!phydev) {
7831 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7832 		*tp_mdix = ETH_TP_MDI_INVALID;
7833 		return;
7834 	}
7835 
7836 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
7837 
7838 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
7839 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
7840 				    HCLGE_PHY_MDIX_CTRL_S);
7841 
7842 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
7843 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
7844 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
7845 
7846 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
7847 
7848 	switch (mdix_ctrl) {
7849 	case 0x0:
7850 		*tp_mdix_ctrl = ETH_TP_MDI;
7851 		break;
7852 	case 0x1:
7853 		*tp_mdix_ctrl = ETH_TP_MDI_X;
7854 		break;
7855 	case 0x3:
7856 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
7857 		break;
7858 	default:
7859 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7860 		break;
7861 	}
7862 
7863 	if (!is_resolved)
7864 		*tp_mdix = ETH_TP_MDI_INVALID;
7865 	else if (mdix)
7866 		*tp_mdix = ETH_TP_MDI_X;
7867 	else
7868 		*tp_mdix = ETH_TP_MDI;
7869 }
7870 
7871 static void hclge_info_show(struct hclge_dev *hdev)
7872 {
7873 	struct device *dev = &hdev->pdev->dev;
7874 
7875 	dev_info(dev, "PF info begin:\n");
7876 
7877 	dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
7878 	dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
7879 	dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
7880 	dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
7881 	dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
7882 	dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
7883 	dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
7884 	dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
7885 	dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
7886 	dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
7887 	dev_info(dev, "This is %s PF\n",
7888 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
7889 	dev_info(dev, "DCB %s\n",
7890 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
7891 	dev_info(dev, "MQPRIO %s\n",
7892 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
7893 
7894 	dev_info(dev, "PF info end.\n");
7895 }
7896 
7897 static int hclge_init_client_instance(struct hnae3_client *client,
7898 				      struct hnae3_ae_dev *ae_dev)
7899 {
7900 	struct hclge_dev *hdev = ae_dev->priv;
7901 	struct hclge_vport *vport;
7902 	int i, ret;
7903 
7904 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
7905 		vport = &hdev->vport[i];
7906 
7907 		switch (client->type) {
7908 		case HNAE3_CLIENT_KNIC:
7909 
7910 			hdev->nic_client = client;
7911 			vport->nic.client = client;
7912 			ret = client->ops->init_instance(&vport->nic);
7913 			if (ret)
7914 				goto clear_nic;
7915 
7916 			hnae3_set_client_init_flag(client, ae_dev, 1);
7917 
7918 			if (netif_msg_drv(&hdev->vport->nic))
7919 				hclge_info_show(hdev);
7920 
7921 			if (hdev->roce_client &&
7922 			    hnae3_dev_roce_supported(hdev)) {
7923 				struct hnae3_client *rc = hdev->roce_client;
7924 
7925 				ret = hclge_init_roce_base_info(vport);
7926 				if (ret)
7927 					goto clear_roce;
7928 
7929 				ret = rc->ops->init_instance(&vport->roce);
7930 				if (ret)
7931 					goto clear_roce;
7932 
7933 				hnae3_set_client_init_flag(hdev->roce_client,
7934 							   ae_dev, 1);
7935 			}
7936 
7937 			break;
7938 		case HNAE3_CLIENT_UNIC:
7939 			hdev->nic_client = client;
7940 			vport->nic.client = client;
7941 
7942 			ret = client->ops->init_instance(&vport->nic);
7943 			if (ret)
7944 				goto clear_nic;
7945 
7946 			hnae3_set_client_init_flag(client, ae_dev, 1);
7947 
7948 			break;
7949 		case HNAE3_CLIENT_ROCE:
7950 			if (hnae3_dev_roce_supported(hdev)) {
7951 				hdev->roce_client = client;
7952 				vport->roce.client = client;
7953 			}
7954 
7955 			if (hdev->roce_client && hdev->nic_client) {
7956 				ret = hclge_init_roce_base_info(vport);
7957 				if (ret)
7958 					goto clear_roce;
7959 
7960 				ret = client->ops->init_instance(&vport->roce);
7961 				if (ret)
7962 					goto clear_roce;
7963 
7964 				hnae3_set_client_init_flag(client, ae_dev, 1);
7965 			}
7966 
7967 			break;
7968 		default:
7969 			return -EINVAL;
7970 		}
7971 	}
7972 
7973 	return 0;
7974 
7975 clear_nic:
7976 	hdev->nic_client = NULL;
7977 	vport->nic.client = NULL;
7978 	return ret;
7979 clear_roce:
7980 	hdev->roce_client = NULL;
7981 	vport->roce.client = NULL;
7982 	return ret;
7983 }
7984 
7985 static void hclge_uninit_client_instance(struct hnae3_client *client,
7986 					 struct hnae3_ae_dev *ae_dev)
7987 {
7988 	struct hclge_dev *hdev = ae_dev->priv;
7989 	struct hclge_vport *vport;
7990 	int i;
7991 
7992 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7993 		vport = &hdev->vport[i];
7994 		if (hdev->roce_client) {
7995 			hdev->roce_client->ops->uninit_instance(&vport->roce,
7996 								0);
7997 			hdev->roce_client = NULL;
7998 			vport->roce.client = NULL;
7999 		}
8000 		if (client->type == HNAE3_CLIENT_ROCE)
8001 			return;
8002 		if (hdev->nic_client && client->ops->uninit_instance) {
8003 			client->ops->uninit_instance(&vport->nic, 0);
8004 			hdev->nic_client = NULL;
8005 			vport->nic.client = NULL;
8006 		}
8007 	}
8008 }
8009 
8010 static int hclge_pci_init(struct hclge_dev *hdev)
8011 {
8012 	struct pci_dev *pdev = hdev->pdev;
8013 	struct hclge_hw *hw;
8014 	int ret;
8015 
8016 	ret = pci_enable_device(pdev);
8017 	if (ret) {
8018 		dev_err(&pdev->dev, "failed to enable PCI device\n");
8019 		return ret;
8020 	}
8021 
8022 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8023 	if (ret) {
8024 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8025 		if (ret) {
8026 			dev_err(&pdev->dev,
8027 				"can't set consistent PCI DMA");
8028 			goto err_disable_device;
8029 		}
8030 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8031 	}
8032 
8033 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8034 	if (ret) {
8035 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8036 		goto err_disable_device;
8037 	}
8038 
8039 	pci_set_master(pdev);
8040 	hw = &hdev->hw;
8041 	hw->io_base = pcim_iomap(pdev, 2, 0);
8042 	if (!hw->io_base) {
8043 		dev_err(&pdev->dev, "Can't map configuration register space\n");
8044 		ret = -ENOMEM;
8045 		goto err_clr_master;
8046 	}
8047 
8048 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8049 
8050 	return 0;
8051 err_clr_master:
8052 	pci_clear_master(pdev);
8053 	pci_release_regions(pdev);
8054 err_disable_device:
8055 	pci_disable_device(pdev);
8056 
8057 	return ret;
8058 }
8059 
8060 static void hclge_pci_uninit(struct hclge_dev *hdev)
8061 {
8062 	struct pci_dev *pdev = hdev->pdev;
8063 
8064 	pcim_iounmap(pdev, hdev->hw.io_base);
8065 	pci_free_irq_vectors(pdev);
8066 	pci_clear_master(pdev);
8067 	pci_release_mem_regions(pdev);
8068 	pci_disable_device(pdev);
8069 }
8070 
8071 static void hclge_state_init(struct hclge_dev *hdev)
8072 {
8073 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8074 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8075 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8076 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8077 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8078 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8079 }
8080 
8081 static void hclge_state_uninit(struct hclge_dev *hdev)
8082 {
8083 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8084 
8085 	if (hdev->service_timer.function)
8086 		del_timer_sync(&hdev->service_timer);
8087 	if (hdev->reset_timer.function)
8088 		del_timer_sync(&hdev->reset_timer);
8089 	if (hdev->service_task.func)
8090 		cancel_work_sync(&hdev->service_task);
8091 	if (hdev->rst_service_task.func)
8092 		cancel_work_sync(&hdev->rst_service_task);
8093 	if (hdev->mbx_service_task.func)
8094 		cancel_work_sync(&hdev->mbx_service_task);
8095 }
8096 
8097 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8098 {
8099 #define HCLGE_FLR_WAIT_MS	100
8100 #define HCLGE_FLR_WAIT_CNT	50
8101 	struct hclge_dev *hdev = ae_dev->priv;
8102 	int cnt = 0;
8103 
8104 	clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8105 	clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8106 	set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8107 	hclge_reset_event(hdev->pdev, NULL);
8108 
8109 	while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8110 	       cnt++ < HCLGE_FLR_WAIT_CNT)
8111 		msleep(HCLGE_FLR_WAIT_MS);
8112 
8113 	if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8114 		dev_err(&hdev->pdev->dev,
8115 			"flr wait down timeout: %d\n", cnt);
8116 }
8117 
8118 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8119 {
8120 	struct hclge_dev *hdev = ae_dev->priv;
8121 
8122 	set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8123 }
8124 
8125 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8126 {
8127 	struct pci_dev *pdev = ae_dev->pdev;
8128 	struct hclge_dev *hdev;
8129 	int ret;
8130 
8131 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8132 	if (!hdev) {
8133 		ret = -ENOMEM;
8134 		goto out;
8135 	}
8136 
8137 	hdev->pdev = pdev;
8138 	hdev->ae_dev = ae_dev;
8139 	hdev->reset_type = HNAE3_NONE_RESET;
8140 	hdev->reset_level = HNAE3_FUNC_RESET;
8141 	ae_dev->priv = hdev;
8142 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8143 
8144 	mutex_init(&hdev->vport_lock);
8145 	mutex_init(&hdev->vport_cfg_mutex);
8146 
8147 	ret = hclge_pci_init(hdev);
8148 	if (ret) {
8149 		dev_err(&pdev->dev, "PCI init failed\n");
8150 		goto out;
8151 	}
8152 
8153 	/* Firmware command queue initialize */
8154 	ret = hclge_cmd_queue_init(hdev);
8155 	if (ret) {
8156 		dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8157 		goto err_pci_uninit;
8158 	}
8159 
8160 	/* Firmware command initialize */
8161 	ret = hclge_cmd_init(hdev);
8162 	if (ret)
8163 		goto err_cmd_uninit;
8164 
8165 	ret = hclge_get_cap(hdev);
8166 	if (ret) {
8167 		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8168 			ret);
8169 		goto err_cmd_uninit;
8170 	}
8171 
8172 	ret = hclge_configure(hdev);
8173 	if (ret) {
8174 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8175 		goto err_cmd_uninit;
8176 	}
8177 
8178 	ret = hclge_init_msi(hdev);
8179 	if (ret) {
8180 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8181 		goto err_cmd_uninit;
8182 	}
8183 
8184 	ret = hclge_misc_irq_init(hdev);
8185 	if (ret) {
8186 		dev_err(&pdev->dev,
8187 			"Misc IRQ(vector0) init error, ret = %d.\n",
8188 			ret);
8189 		goto err_msi_uninit;
8190 	}
8191 
8192 	ret = hclge_alloc_tqps(hdev);
8193 	if (ret) {
8194 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8195 		goto err_msi_irq_uninit;
8196 	}
8197 
8198 	ret = hclge_alloc_vport(hdev);
8199 	if (ret) {
8200 		dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8201 		goto err_msi_irq_uninit;
8202 	}
8203 
8204 	ret = hclge_map_tqp(hdev);
8205 	if (ret) {
8206 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8207 		goto err_msi_irq_uninit;
8208 	}
8209 
8210 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8211 		ret = hclge_mac_mdio_config(hdev);
8212 		if (ret) {
8213 			dev_err(&hdev->pdev->dev,
8214 				"mdio config fail ret=%d\n", ret);
8215 			goto err_msi_irq_uninit;
8216 		}
8217 	}
8218 
8219 	ret = hclge_init_umv_space(hdev);
8220 	if (ret) {
8221 		dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8222 		goto err_mdiobus_unreg;
8223 	}
8224 
8225 	ret = hclge_mac_init(hdev);
8226 	if (ret) {
8227 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8228 		goto err_mdiobus_unreg;
8229 	}
8230 
8231 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8232 	if (ret) {
8233 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8234 		goto err_mdiobus_unreg;
8235 	}
8236 
8237 	ret = hclge_config_gro(hdev, true);
8238 	if (ret)
8239 		goto err_mdiobus_unreg;
8240 
8241 	ret = hclge_init_vlan_config(hdev);
8242 	if (ret) {
8243 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8244 		goto err_mdiobus_unreg;
8245 	}
8246 
8247 	ret = hclge_tm_schd_init(hdev);
8248 	if (ret) {
8249 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8250 		goto err_mdiobus_unreg;
8251 	}
8252 
8253 	hclge_rss_init_cfg(hdev);
8254 	ret = hclge_rss_init_hw(hdev);
8255 	if (ret) {
8256 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8257 		goto err_mdiobus_unreg;
8258 	}
8259 
8260 	ret = init_mgr_tbl(hdev);
8261 	if (ret) {
8262 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8263 		goto err_mdiobus_unreg;
8264 	}
8265 
8266 	ret = hclge_init_fd_config(hdev);
8267 	if (ret) {
8268 		dev_err(&pdev->dev,
8269 			"fd table init fail, ret=%d\n", ret);
8270 		goto err_mdiobus_unreg;
8271 	}
8272 
8273 	ret = hclge_hw_error_set_state(hdev, true);
8274 	if (ret) {
8275 		dev_err(&pdev->dev,
8276 			"fail(%d) to enable hw error interrupts\n", ret);
8277 		goto err_mdiobus_unreg;
8278 	}
8279 
8280 	INIT_KFIFO(hdev->mac_tnl_log);
8281 
8282 	hclge_dcb_ops_set(hdev);
8283 
8284 	timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8285 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8286 	INIT_WORK(&hdev->service_task, hclge_service_task);
8287 	INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8288 	INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8289 
8290 	hclge_clear_all_event_cause(hdev);
8291 
8292 	/* Enable MISC vector(vector0) */
8293 	hclge_enable_vector(&hdev->misc_vector, true);
8294 
8295 	hclge_state_init(hdev);
8296 	hdev->last_reset_time = jiffies;
8297 
8298 	pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8299 	return 0;
8300 
8301 err_mdiobus_unreg:
8302 	if (hdev->hw.mac.phydev)
8303 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
8304 err_msi_irq_uninit:
8305 	hclge_misc_irq_uninit(hdev);
8306 err_msi_uninit:
8307 	pci_free_irq_vectors(pdev);
8308 err_cmd_uninit:
8309 	hclge_cmd_uninit(hdev);
8310 err_pci_uninit:
8311 	pcim_iounmap(pdev, hdev->hw.io_base);
8312 	pci_clear_master(pdev);
8313 	pci_release_regions(pdev);
8314 	pci_disable_device(pdev);
8315 out:
8316 	return ret;
8317 }
8318 
8319 static void hclge_stats_clear(struct hclge_dev *hdev)
8320 {
8321 	memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8322 }
8323 
8324 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8325 {
8326 	struct hclge_vport *vport = hdev->vport;
8327 	int i;
8328 
8329 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8330 		hclge_vport_stop(vport);
8331 		vport++;
8332 	}
8333 }
8334 
8335 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8336 {
8337 	struct hclge_dev *hdev = ae_dev->priv;
8338 	struct pci_dev *pdev = ae_dev->pdev;
8339 	int ret;
8340 
8341 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8342 
8343 	hclge_stats_clear(hdev);
8344 	memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8345 
8346 	ret = hclge_cmd_init(hdev);
8347 	if (ret) {
8348 		dev_err(&pdev->dev, "Cmd queue init failed\n");
8349 		return ret;
8350 	}
8351 
8352 	ret = hclge_map_tqp(hdev);
8353 	if (ret) {
8354 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8355 		return ret;
8356 	}
8357 
8358 	hclge_reset_umv_space(hdev);
8359 
8360 	ret = hclge_mac_init(hdev);
8361 	if (ret) {
8362 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8363 		return ret;
8364 	}
8365 
8366 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8367 	if (ret) {
8368 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8369 		return ret;
8370 	}
8371 
8372 	ret = hclge_config_gro(hdev, true);
8373 	if (ret)
8374 		return ret;
8375 
8376 	ret = hclge_init_vlan_config(hdev);
8377 	if (ret) {
8378 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8379 		return ret;
8380 	}
8381 
8382 	ret = hclge_tm_init_hw(hdev, true);
8383 	if (ret) {
8384 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8385 		return ret;
8386 	}
8387 
8388 	ret = hclge_rss_init_hw(hdev);
8389 	if (ret) {
8390 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8391 		return ret;
8392 	}
8393 
8394 	ret = hclge_init_fd_config(hdev);
8395 	if (ret) {
8396 		dev_err(&pdev->dev,
8397 			"fd table init fail, ret=%d\n", ret);
8398 		return ret;
8399 	}
8400 
8401 	/* Re-enable the hw error interrupts because
8402 	 * the interrupts get disabled on core/global reset.
8403 	 */
8404 	ret = hclge_hw_error_set_state(hdev, true);
8405 	if (ret) {
8406 		dev_err(&pdev->dev,
8407 			"fail(%d) to re-enable HNS hw error interrupts\n", ret);
8408 		return ret;
8409 	}
8410 
8411 	hclge_reset_vport_state(hdev);
8412 
8413 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8414 		 HCLGE_DRIVER_NAME);
8415 
8416 	return 0;
8417 }
8418 
8419 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8420 {
8421 	struct hclge_dev *hdev = ae_dev->priv;
8422 	struct hclge_mac *mac = &hdev->hw.mac;
8423 
8424 	hclge_state_uninit(hdev);
8425 
8426 	if (mac->phydev)
8427 		mdiobus_unregister(mac->mdio_bus);
8428 
8429 	hclge_uninit_umv_space(hdev);
8430 
8431 	/* Disable MISC vector(vector0) */
8432 	hclge_enable_vector(&hdev->misc_vector, false);
8433 	synchronize_irq(hdev->misc_vector.vector_irq);
8434 
8435 	hclge_config_mac_tnl_int(hdev, false);
8436 	hclge_hw_error_set_state(hdev, false);
8437 	hclge_cmd_uninit(hdev);
8438 	hclge_misc_irq_uninit(hdev);
8439 	hclge_pci_uninit(hdev);
8440 	mutex_destroy(&hdev->vport_lock);
8441 	hclge_uninit_vport_mac_table(hdev);
8442 	hclge_uninit_vport_vlan_table(hdev);
8443 	mutex_destroy(&hdev->vport_cfg_mutex);
8444 	ae_dev->priv = NULL;
8445 }
8446 
8447 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8448 {
8449 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8450 	struct hclge_vport *vport = hclge_get_vport(handle);
8451 	struct hclge_dev *hdev = vport->back;
8452 
8453 	return min_t(u32, hdev->rss_size_max,
8454 		     vport->alloc_tqps / kinfo->num_tc);
8455 }
8456 
8457 static void hclge_get_channels(struct hnae3_handle *handle,
8458 			       struct ethtool_channels *ch)
8459 {
8460 	ch->max_combined = hclge_get_max_channels(handle);
8461 	ch->other_count = 1;
8462 	ch->max_other = 1;
8463 	ch->combined_count = handle->kinfo.rss_size;
8464 }
8465 
8466 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8467 					u16 *alloc_tqps, u16 *max_rss_size)
8468 {
8469 	struct hclge_vport *vport = hclge_get_vport(handle);
8470 	struct hclge_dev *hdev = vport->back;
8471 
8472 	*alloc_tqps = vport->alloc_tqps;
8473 	*max_rss_size = hdev->rss_size_max;
8474 }
8475 
8476 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8477 			      bool rxfh_configured)
8478 {
8479 	struct hclge_vport *vport = hclge_get_vport(handle);
8480 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8481 	struct hclge_dev *hdev = vport->back;
8482 	int cur_rss_size = kinfo->rss_size;
8483 	int cur_tqps = kinfo->num_tqps;
8484 	u16 tc_offset[HCLGE_MAX_TC_NUM];
8485 	u16 tc_valid[HCLGE_MAX_TC_NUM];
8486 	u16 tc_size[HCLGE_MAX_TC_NUM];
8487 	u16 roundup_size;
8488 	u32 *rss_indir;
8489 	int ret, i;
8490 
8491 	kinfo->req_rss_size = new_tqps_num;
8492 
8493 	ret = hclge_tm_vport_map_update(hdev);
8494 	if (ret) {
8495 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8496 		return ret;
8497 	}
8498 
8499 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
8500 	roundup_size = ilog2(roundup_size);
8501 	/* Set the RSS TC mode according to the new RSS size */
8502 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8503 		tc_valid[i] = 0;
8504 
8505 		if (!(hdev->hw_tc_map & BIT(i)))
8506 			continue;
8507 
8508 		tc_valid[i] = 1;
8509 		tc_size[i] = roundup_size;
8510 		tc_offset[i] = kinfo->rss_size * i;
8511 	}
8512 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8513 	if (ret)
8514 		return ret;
8515 
8516 	/* RSS indirection table has been configuared by user */
8517 	if (rxfh_configured)
8518 		goto out;
8519 
8520 	/* Reinitializes the rss indirect table according to the new RSS size */
8521 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8522 	if (!rss_indir)
8523 		return -ENOMEM;
8524 
8525 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8526 		rss_indir[i] = i % kinfo->rss_size;
8527 
8528 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8529 	if (ret)
8530 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8531 			ret);
8532 
8533 	kfree(rss_indir);
8534 
8535 out:
8536 	if (!ret)
8537 		dev_info(&hdev->pdev->dev,
8538 			 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8539 			 cur_rss_size, kinfo->rss_size,
8540 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
8541 
8542 	return ret;
8543 }
8544 
8545 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8546 			      u32 *regs_num_64_bit)
8547 {
8548 	struct hclge_desc desc;
8549 	u32 total_num;
8550 	int ret;
8551 
8552 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8553 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8554 	if (ret) {
8555 		dev_err(&hdev->pdev->dev,
8556 			"Query register number cmd failed, ret = %d.\n", ret);
8557 		return ret;
8558 	}
8559 
8560 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
8561 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
8562 
8563 	total_num = *regs_num_32_bit + *regs_num_64_bit;
8564 	if (!total_num)
8565 		return -EINVAL;
8566 
8567 	return 0;
8568 }
8569 
8570 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8571 				 void *data)
8572 {
8573 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8574 
8575 	struct hclge_desc *desc;
8576 	u32 *reg_val = data;
8577 	__le32 *desc_data;
8578 	int cmd_num;
8579 	int i, k, n;
8580 	int ret;
8581 
8582 	if (regs_num == 0)
8583 		return 0;
8584 
8585 	cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8586 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8587 	if (!desc)
8588 		return -ENOMEM;
8589 
8590 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8591 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8592 	if (ret) {
8593 		dev_err(&hdev->pdev->dev,
8594 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
8595 		kfree(desc);
8596 		return ret;
8597 	}
8598 
8599 	for (i = 0; i < cmd_num; i++) {
8600 		if (i == 0) {
8601 			desc_data = (__le32 *)(&desc[i].data[0]);
8602 			n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8603 		} else {
8604 			desc_data = (__le32 *)(&desc[i]);
8605 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
8606 		}
8607 		for (k = 0; k < n; k++) {
8608 			*reg_val++ = le32_to_cpu(*desc_data++);
8609 
8610 			regs_num--;
8611 			if (!regs_num)
8612 				break;
8613 		}
8614 	}
8615 
8616 	kfree(desc);
8617 	return 0;
8618 }
8619 
8620 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8621 				 void *data)
8622 {
8623 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8624 
8625 	struct hclge_desc *desc;
8626 	u64 *reg_val = data;
8627 	__le64 *desc_data;
8628 	int cmd_num;
8629 	int i, k, n;
8630 	int ret;
8631 
8632 	if (regs_num == 0)
8633 		return 0;
8634 
8635 	cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8636 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8637 	if (!desc)
8638 		return -ENOMEM;
8639 
8640 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8641 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8642 	if (ret) {
8643 		dev_err(&hdev->pdev->dev,
8644 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
8645 		kfree(desc);
8646 		return ret;
8647 	}
8648 
8649 	for (i = 0; i < cmd_num; i++) {
8650 		if (i == 0) {
8651 			desc_data = (__le64 *)(&desc[i].data[0]);
8652 			n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8653 		} else {
8654 			desc_data = (__le64 *)(&desc[i]);
8655 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
8656 		}
8657 		for (k = 0; k < n; k++) {
8658 			*reg_val++ = le64_to_cpu(*desc_data++);
8659 
8660 			regs_num--;
8661 			if (!regs_num)
8662 				break;
8663 		}
8664 	}
8665 
8666 	kfree(desc);
8667 	return 0;
8668 }
8669 
8670 #define MAX_SEPARATE_NUM	4
8671 #define SEPARATOR_VALUE		0xFFFFFFFF
8672 #define REG_NUM_PER_LINE	4
8673 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
8674 
8675 static int hclge_get_regs_len(struct hnae3_handle *handle)
8676 {
8677 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8678 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8679 	struct hclge_vport *vport = hclge_get_vport(handle);
8680 	struct hclge_dev *hdev = vport->back;
8681 	u32 regs_num_32_bit, regs_num_64_bit;
8682 	int ret;
8683 
8684 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8685 	if (ret) {
8686 		dev_err(&hdev->pdev->dev,
8687 			"Get register number failed, ret = %d.\n", ret);
8688 		return -EOPNOTSUPP;
8689 	}
8690 
8691 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8692 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8693 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8694 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8695 
8696 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8697 		tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8698 		regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8699 }
8700 
8701 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8702 			   void *data)
8703 {
8704 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8705 	struct hclge_vport *vport = hclge_get_vport(handle);
8706 	struct hclge_dev *hdev = vport->back;
8707 	u32 regs_num_32_bit, regs_num_64_bit;
8708 	int i, j, reg_um, separator_num;
8709 	u32 *reg = data;
8710 	int ret;
8711 
8712 	*version = hdev->fw_version;
8713 
8714 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
8715 	if (ret) {
8716 		dev_err(&hdev->pdev->dev,
8717 			"Get register number failed, ret = %d.\n", ret);
8718 		return;
8719 	}
8720 
8721 	/* fetching per-PF registers valus from PF PCIe register space */
8722 	reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
8723 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8724 	for (i = 0; i < reg_um; i++)
8725 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
8726 	for (i = 0; i < separator_num; i++)
8727 		*reg++ = SEPARATOR_VALUE;
8728 
8729 	reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
8730 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8731 	for (i = 0; i < reg_um; i++)
8732 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
8733 	for (i = 0; i < separator_num; i++)
8734 		*reg++ = SEPARATOR_VALUE;
8735 
8736 	reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
8737 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8738 	for (j = 0; j < kinfo->num_tqps; j++) {
8739 		for (i = 0; i < reg_um; i++)
8740 			*reg++ = hclge_read_dev(&hdev->hw,
8741 						ring_reg_addr_list[i] +
8742 						0x200 * j);
8743 		for (i = 0; i < separator_num; i++)
8744 			*reg++ = SEPARATOR_VALUE;
8745 	}
8746 
8747 	reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
8748 	separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8749 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
8750 		for (i = 0; i < reg_um; i++)
8751 			*reg++ = hclge_read_dev(&hdev->hw,
8752 						tqp_intr_reg_addr_list[i] +
8753 						4 * j);
8754 		for (i = 0; i < separator_num; i++)
8755 			*reg++ = SEPARATOR_VALUE;
8756 	}
8757 
8758 	/* fetching PF common registers values from firmware */
8759 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
8760 	if (ret) {
8761 		dev_err(&hdev->pdev->dev,
8762 			"Get 32 bit register failed, ret = %d.\n", ret);
8763 		return;
8764 	}
8765 
8766 	reg += regs_num_32_bit;
8767 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
8768 	if (ret)
8769 		dev_err(&hdev->pdev->dev,
8770 			"Get 64 bit register failed, ret = %d.\n", ret);
8771 }
8772 
8773 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
8774 {
8775 	struct hclge_set_led_state_cmd *req;
8776 	struct hclge_desc desc;
8777 	int ret;
8778 
8779 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
8780 
8781 	req = (struct hclge_set_led_state_cmd *)desc.data;
8782 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
8783 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
8784 
8785 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8786 	if (ret)
8787 		dev_err(&hdev->pdev->dev,
8788 			"Send set led state cmd error, ret =%d\n", ret);
8789 
8790 	return ret;
8791 }
8792 
8793 enum hclge_led_status {
8794 	HCLGE_LED_OFF,
8795 	HCLGE_LED_ON,
8796 	HCLGE_LED_NO_CHANGE = 0xFF,
8797 };
8798 
8799 static int hclge_set_led_id(struct hnae3_handle *handle,
8800 			    enum ethtool_phys_id_state status)
8801 {
8802 	struct hclge_vport *vport = hclge_get_vport(handle);
8803 	struct hclge_dev *hdev = vport->back;
8804 
8805 	switch (status) {
8806 	case ETHTOOL_ID_ACTIVE:
8807 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
8808 	case ETHTOOL_ID_INACTIVE:
8809 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
8810 	default:
8811 		return -EINVAL;
8812 	}
8813 }
8814 
8815 static void hclge_get_link_mode(struct hnae3_handle *handle,
8816 				unsigned long *supported,
8817 				unsigned long *advertising)
8818 {
8819 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
8820 	struct hclge_vport *vport = hclge_get_vport(handle);
8821 	struct hclge_dev *hdev = vport->back;
8822 	unsigned int idx = 0;
8823 
8824 	for (; idx < size; idx++) {
8825 		supported[idx] = hdev->hw.mac.supported[idx];
8826 		advertising[idx] = hdev->hw.mac.advertising[idx];
8827 	}
8828 }
8829 
8830 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
8831 {
8832 	struct hclge_vport *vport = hclge_get_vport(handle);
8833 	struct hclge_dev *hdev = vport->back;
8834 
8835 	return hclge_config_gro(hdev, enable);
8836 }
8837 
8838 static const struct hnae3_ae_ops hclge_ops = {
8839 	.init_ae_dev = hclge_init_ae_dev,
8840 	.uninit_ae_dev = hclge_uninit_ae_dev,
8841 	.flr_prepare = hclge_flr_prepare,
8842 	.flr_done = hclge_flr_done,
8843 	.init_client_instance = hclge_init_client_instance,
8844 	.uninit_client_instance = hclge_uninit_client_instance,
8845 	.map_ring_to_vector = hclge_map_ring_to_vector,
8846 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
8847 	.get_vector = hclge_get_vector,
8848 	.put_vector = hclge_put_vector,
8849 	.set_promisc_mode = hclge_set_promisc_mode,
8850 	.set_loopback = hclge_set_loopback,
8851 	.start = hclge_ae_start,
8852 	.stop = hclge_ae_stop,
8853 	.client_start = hclge_client_start,
8854 	.client_stop = hclge_client_stop,
8855 	.get_status = hclge_get_status,
8856 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
8857 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
8858 	.get_media_type = hclge_get_media_type,
8859 	.check_port_speed = hclge_check_port_speed,
8860 	.get_fec = hclge_get_fec,
8861 	.set_fec = hclge_set_fec,
8862 	.get_rss_key_size = hclge_get_rss_key_size,
8863 	.get_rss_indir_size = hclge_get_rss_indir_size,
8864 	.get_rss = hclge_get_rss,
8865 	.set_rss = hclge_set_rss,
8866 	.set_rss_tuple = hclge_set_rss_tuple,
8867 	.get_rss_tuple = hclge_get_rss_tuple,
8868 	.get_tc_size = hclge_get_tc_size,
8869 	.get_mac_addr = hclge_get_mac_addr,
8870 	.set_mac_addr = hclge_set_mac_addr,
8871 	.do_ioctl = hclge_do_ioctl,
8872 	.add_uc_addr = hclge_add_uc_addr,
8873 	.rm_uc_addr = hclge_rm_uc_addr,
8874 	.add_mc_addr = hclge_add_mc_addr,
8875 	.rm_mc_addr = hclge_rm_mc_addr,
8876 	.set_autoneg = hclge_set_autoneg,
8877 	.get_autoneg = hclge_get_autoneg,
8878 	.restart_autoneg = hclge_restart_autoneg,
8879 	.get_pauseparam = hclge_get_pauseparam,
8880 	.set_pauseparam = hclge_set_pauseparam,
8881 	.set_mtu = hclge_set_mtu,
8882 	.reset_queue = hclge_reset_tqp,
8883 	.get_stats = hclge_get_stats,
8884 	.get_mac_pause_stats = hclge_get_mac_pause_stat,
8885 	.update_stats = hclge_update_stats,
8886 	.get_strings = hclge_get_strings,
8887 	.get_sset_count = hclge_get_sset_count,
8888 	.get_fw_version = hclge_get_fw_version,
8889 	.get_mdix_mode = hclge_get_mdix_mode,
8890 	.enable_vlan_filter = hclge_enable_vlan_filter,
8891 	.set_vlan_filter = hclge_set_vlan_filter,
8892 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
8893 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
8894 	.reset_event = hclge_reset_event,
8895 	.set_default_reset_request = hclge_set_def_reset_request,
8896 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
8897 	.set_channels = hclge_set_channels,
8898 	.get_channels = hclge_get_channels,
8899 	.get_regs_len = hclge_get_regs_len,
8900 	.get_regs = hclge_get_regs,
8901 	.set_led_id = hclge_set_led_id,
8902 	.get_link_mode = hclge_get_link_mode,
8903 	.add_fd_entry = hclge_add_fd_entry,
8904 	.del_fd_entry = hclge_del_fd_entry,
8905 	.del_all_fd_entries = hclge_del_all_fd_entries,
8906 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
8907 	.get_fd_rule_info = hclge_get_fd_rule_info,
8908 	.get_fd_all_rules = hclge_get_all_rules,
8909 	.restore_fd_rules = hclge_restore_fd_entries,
8910 	.enable_fd = hclge_enable_fd,
8911 	.dbg_run_cmd = hclge_dbg_run_cmd,
8912 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
8913 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
8914 	.ae_dev_resetting = hclge_ae_dev_resetting,
8915 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
8916 	.set_gro_en = hclge_gro_en,
8917 	.get_global_queue_id = hclge_covert_handle_qid_global,
8918 	.set_timer_task = hclge_set_timer_task,
8919 	.mac_connect_phy = hclge_mac_connect_phy,
8920 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
8921 };
8922 
8923 static struct hnae3_ae_algo ae_algo = {
8924 	.ops = &hclge_ops,
8925 	.pdev_id_table = ae_algo_pci_tbl,
8926 };
8927 
8928 static int hclge_init(void)
8929 {
8930 	pr_info("%s is initializing\n", HCLGE_NAME);
8931 
8932 	hnae3_register_ae_algo(&ae_algo);
8933 
8934 	return 0;
8935 }
8936 
8937 static void hclge_exit(void)
8938 {
8939 	hnae3_unregister_ae_algo(&ae_algo);
8940 }
8941 module_init(hclge_init);
8942 module_exit(hclge_exit);
8943 
8944 MODULE_LICENSE("GPL");
8945 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8946 MODULE_DESCRIPTION("HCLGE Driver");
8947 MODULE_VERSION(HCLGE_MOD_VERSION);
8948