1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26 
27 #define HCLGE_NAME			"hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 
31 #define HCLGE_BUF_SIZE_UNIT	256U
32 #define HCLGE_BUF_MUL_BY	2
33 #define HCLGE_BUF_DIV_BY	2
34 #define NEED_RESERVE_TC_NUM	2
35 #define BUF_MAX_PERCENT		100
36 #define BUF_RESERVE_PERCENT	90
37 
38 #define HCLGE_RESET_MAX_FAIL_CNT	5
39 #define HCLGE_RESET_SYNC_TIME		100
40 #define HCLGE_PF_RESET_SYNC_TIME	20
41 #define HCLGE_PF_RESET_SYNC_CNT		1500
42 
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET        1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
47 #define HCLGE_DFX_IGU_BD_OFFSET         4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
50 #define HCLGE_DFX_NCSI_BD_OFFSET        7
51 #define HCLGE_DFX_RTC_BD_OFFSET         8
52 #define HCLGE_DFX_PPP_BD_OFFSET         9
53 #define HCLGE_DFX_RCB_BD_OFFSET         10
54 #define HCLGE_DFX_TQP_BD_OFFSET         11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
56 
57 #define HCLGE_LINK_STATUS_MS	10
58 
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
67 						   unsigned long *addr);
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
69 
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73 static void hclge_sync_fd_table(struct hclge_dev *hdev);
74 
75 static struct hnae3_ae_algo ae_algo;
76 
77 static struct workqueue_struct *hclge_wq;
78 
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88 	/* required last entry */
89 	{0, }
90 };
91 
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93 
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 					 HCLGE_CMDQ_TX_ADDR_H_REG,
96 					 HCLGE_CMDQ_TX_DEPTH_REG,
97 					 HCLGE_CMDQ_TX_TAIL_REG,
98 					 HCLGE_CMDQ_TX_HEAD_REG,
99 					 HCLGE_CMDQ_RX_ADDR_L_REG,
100 					 HCLGE_CMDQ_RX_ADDR_H_REG,
101 					 HCLGE_CMDQ_RX_DEPTH_REG,
102 					 HCLGE_CMDQ_RX_TAIL_REG,
103 					 HCLGE_CMDQ_RX_HEAD_REG,
104 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 					 HCLGE_CMDQ_INTR_STS_REG,
106 					 HCLGE_CMDQ_INTR_EN_REG,
107 					 HCLGE_CMDQ_INTR_GEN_REG};
108 
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 					   HCLGE_VECTOR0_OTER_EN_REG,
111 					   HCLGE_MISC_RESET_STS_REG,
112 					   HCLGE_MISC_VECTOR_INT_STS,
113 					   HCLGE_GLOBAL_RESET_REG,
114 					   HCLGE_FUN_RST_ING,
115 					   HCLGE_GRO_EN_REG};
116 
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 					 HCLGE_RING_RX_ADDR_H_REG,
119 					 HCLGE_RING_RX_BD_NUM_REG,
120 					 HCLGE_RING_RX_BD_LENGTH_REG,
121 					 HCLGE_RING_RX_MERGE_EN_REG,
122 					 HCLGE_RING_RX_TAIL_REG,
123 					 HCLGE_RING_RX_HEAD_REG,
124 					 HCLGE_RING_RX_FBD_NUM_REG,
125 					 HCLGE_RING_RX_OFFSET_REG,
126 					 HCLGE_RING_RX_FBD_OFFSET_REG,
127 					 HCLGE_RING_RX_STASH_REG,
128 					 HCLGE_RING_RX_BD_ERR_REG,
129 					 HCLGE_RING_TX_ADDR_L_REG,
130 					 HCLGE_RING_TX_ADDR_H_REG,
131 					 HCLGE_RING_TX_BD_NUM_REG,
132 					 HCLGE_RING_TX_PRIORITY_REG,
133 					 HCLGE_RING_TX_TC_REG,
134 					 HCLGE_RING_TX_MERGE_EN_REG,
135 					 HCLGE_RING_TX_TAIL_REG,
136 					 HCLGE_RING_TX_HEAD_REG,
137 					 HCLGE_RING_TX_FBD_NUM_REG,
138 					 HCLGE_RING_TX_OFFSET_REG,
139 					 HCLGE_RING_TX_EBD_NUM_REG,
140 					 HCLGE_RING_TX_EBD_OFFSET_REG,
141 					 HCLGE_RING_TX_BD_ERR_REG,
142 					 HCLGE_RING_EN_REG};
143 
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 					     HCLGE_TQP_INTR_GL0_REG,
146 					     HCLGE_TQP_INTR_GL1_REG,
147 					     HCLGE_TQP_INTR_GL2_REG,
148 					     HCLGE_TQP_INTR_RL_REG};
149 
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 	"App    Loopback test",
152 	"Serdes serial Loopback test",
153 	"Serdes parallel Loopback test",
154 	"Phy    Loopback test"
155 };
156 
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 	{"mac_tx_mac_pause_num",
159 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 	{"mac_rx_mac_pause_num",
161 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 	{"mac_tx_control_pkt_num",
163 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 	{"mac_rx_control_pkt_num",
165 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 	{"mac_tx_pfc_pkt_num",
167 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168 	{"mac_tx_pfc_pri0_pkt_num",
169 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 	{"mac_tx_pfc_pri1_pkt_num",
171 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 	{"mac_tx_pfc_pri2_pkt_num",
173 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 	{"mac_tx_pfc_pri3_pkt_num",
175 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 	{"mac_tx_pfc_pri4_pkt_num",
177 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 	{"mac_tx_pfc_pri5_pkt_num",
179 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 	{"mac_tx_pfc_pri6_pkt_num",
181 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 	{"mac_tx_pfc_pri7_pkt_num",
183 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184 	{"mac_rx_pfc_pkt_num",
185 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186 	{"mac_rx_pfc_pri0_pkt_num",
187 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 	{"mac_rx_pfc_pri1_pkt_num",
189 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 	{"mac_rx_pfc_pri2_pkt_num",
191 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 	{"mac_rx_pfc_pri3_pkt_num",
193 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 	{"mac_rx_pfc_pri4_pkt_num",
195 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 	{"mac_rx_pfc_pri5_pkt_num",
197 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 	{"mac_rx_pfc_pri6_pkt_num",
199 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 	{"mac_rx_pfc_pri7_pkt_num",
201 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 	{"mac_tx_total_pkt_num",
203 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 	{"mac_tx_total_oct_num",
205 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 	{"mac_tx_good_pkt_num",
207 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 	{"mac_tx_bad_pkt_num",
209 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 	{"mac_tx_good_oct_num",
211 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 	{"mac_tx_bad_oct_num",
213 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 	{"mac_tx_uni_pkt_num",
215 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 	{"mac_tx_multi_pkt_num",
217 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 	{"mac_tx_broad_pkt_num",
219 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 	{"mac_tx_undersize_pkt_num",
221 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222 	{"mac_tx_oversize_pkt_num",
223 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224 	{"mac_tx_64_oct_pkt_num",
225 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 	{"mac_tx_65_127_oct_pkt_num",
227 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 	{"mac_tx_128_255_oct_pkt_num",
229 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 	{"mac_tx_256_511_oct_pkt_num",
231 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 	{"mac_tx_512_1023_oct_pkt_num",
233 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 	{"mac_tx_1024_1518_oct_pkt_num",
235 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236 	{"mac_tx_1519_2047_oct_pkt_num",
237 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 	{"mac_tx_2048_4095_oct_pkt_num",
239 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 	{"mac_tx_4096_8191_oct_pkt_num",
241 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242 	{"mac_tx_8192_9216_oct_pkt_num",
243 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 	{"mac_tx_9217_12287_oct_pkt_num",
245 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 	{"mac_tx_12288_16383_oct_pkt_num",
247 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 	{"mac_tx_1519_max_good_pkt_num",
249 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 	{"mac_tx_1519_max_bad_pkt_num",
251 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252 	{"mac_rx_total_pkt_num",
253 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 	{"mac_rx_total_oct_num",
255 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 	{"mac_rx_good_pkt_num",
257 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 	{"mac_rx_bad_pkt_num",
259 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 	{"mac_rx_good_oct_num",
261 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 	{"mac_rx_bad_oct_num",
263 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 	{"mac_rx_uni_pkt_num",
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 	{"mac_rx_multi_pkt_num",
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 	{"mac_rx_broad_pkt_num",
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 	{"mac_rx_undersize_pkt_num",
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272 	{"mac_rx_oversize_pkt_num",
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274 	{"mac_rx_64_oct_pkt_num",
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 	{"mac_rx_65_127_oct_pkt_num",
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 	{"mac_rx_128_255_oct_pkt_num",
279 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 	{"mac_rx_256_511_oct_pkt_num",
281 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 	{"mac_rx_512_1023_oct_pkt_num",
283 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 	{"mac_rx_1024_1518_oct_pkt_num",
285 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286 	{"mac_rx_1519_2047_oct_pkt_num",
287 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 	{"mac_rx_2048_4095_oct_pkt_num",
289 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 	{"mac_rx_4096_8191_oct_pkt_num",
291 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292 	{"mac_rx_8192_9216_oct_pkt_num",
293 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 	{"mac_rx_9217_12287_oct_pkt_num",
295 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 	{"mac_rx_12288_16383_oct_pkt_num",
297 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 	{"mac_rx_1519_max_good_pkt_num",
299 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 	{"mac_rx_1519_max_bad_pkt_num",
301 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302 
303 	{"mac_tx_fragment_pkt_num",
304 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 	{"mac_tx_undermin_pkt_num",
306 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 	{"mac_tx_jabber_pkt_num",
308 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 	{"mac_tx_err_all_pkt_num",
310 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 	{"mac_tx_from_app_good_pkt_num",
312 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 	{"mac_tx_from_app_bad_pkt_num",
314 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 	{"mac_rx_fragment_pkt_num",
316 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 	{"mac_rx_undermin_pkt_num",
318 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 	{"mac_rx_jabber_pkt_num",
320 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 	{"mac_rx_fcs_err_pkt_num",
322 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 	{"mac_rx_send_app_good_pkt_num",
324 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 	{"mac_rx_send_app_bad_pkt_num",
326 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
327 };
328 
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 	{
331 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
333 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334 		.i_port_bitmap = 0x1,
335 	},
336 };
337 
338 static const u8 hclge_hash_key[] = {
339 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344 };
345 
346 static const u32 hclge_dfx_bd_offset_list[] = {
347 	HCLGE_DFX_BIOS_BD_OFFSET,
348 	HCLGE_DFX_SSU_0_BD_OFFSET,
349 	HCLGE_DFX_SSU_1_BD_OFFSET,
350 	HCLGE_DFX_IGU_BD_OFFSET,
351 	HCLGE_DFX_RPU_0_BD_OFFSET,
352 	HCLGE_DFX_RPU_1_BD_OFFSET,
353 	HCLGE_DFX_NCSI_BD_OFFSET,
354 	HCLGE_DFX_RTC_BD_OFFSET,
355 	HCLGE_DFX_PPP_BD_OFFSET,
356 	HCLGE_DFX_RCB_BD_OFFSET,
357 	HCLGE_DFX_TQP_BD_OFFSET,
358 	HCLGE_DFX_SSU_2_BD_OFFSET
359 };
360 
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 	HCLGE_OPC_DFX_SSU_REG_0,
364 	HCLGE_OPC_DFX_SSU_REG_1,
365 	HCLGE_OPC_DFX_IGU_EGU_REG,
366 	HCLGE_OPC_DFX_RPU_REG_0,
367 	HCLGE_OPC_DFX_RPU_REG_1,
368 	HCLGE_OPC_DFX_NCSI_REG,
369 	HCLGE_OPC_DFX_RTC_REG,
370 	HCLGE_OPC_DFX_PPP_REG,
371 	HCLGE_OPC_DFX_RCB_REG,
372 	HCLGE_OPC_DFX_TQP_REG,
373 	HCLGE_OPC_DFX_SSU_REG_2
374 };
375 
376 static const struct key_info meta_data_key_info[] = {
377 	{ PACKET_TYPE_ID, 6},
378 	{ IP_FRAGEMENT, 1},
379 	{ ROCE_TYPE, 1},
380 	{ NEXT_KEY, 5},
381 	{ VLAN_NUMBER, 2},
382 	{ SRC_VPORT, 12},
383 	{ DST_VPORT, 12},
384 	{ TUNNEL_PACKET, 1},
385 };
386 
387 static const struct key_info tuple_key_info[] = {
388 	{ OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
389 	{ OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
390 	{ OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
391 	{ OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
392 	{ OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
393 	{ OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
394 	{ OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
395 	{ OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
396 	{ OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
397 	{ OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
398 	{ OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
399 	{ OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
400 	{ OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
401 	{ OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
402 	{ OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
403 	{ OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
404 	{ INNER_DST_MAC, 48, KEY_OPT_MAC,
405 	  offsetof(struct hclge_fd_rule, tuples.dst_mac),
406 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
407 	{ INNER_SRC_MAC, 48, KEY_OPT_MAC,
408 	  offsetof(struct hclge_fd_rule, tuples.src_mac),
409 	  offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
410 	{ INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
411 	  offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
412 	  offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
413 	{ INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
414 	{ INNER_ETH_TYPE, 16, KEY_OPT_LE16,
415 	  offsetof(struct hclge_fd_rule, tuples.ether_proto),
416 	  offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
417 	{ INNER_L2_RSV, 16, KEY_OPT_LE16,
418 	  offsetof(struct hclge_fd_rule, tuples.l2_user_def),
419 	  offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
420 	{ INNER_IP_TOS, 8, KEY_OPT_U8,
421 	  offsetof(struct hclge_fd_rule, tuples.ip_tos),
422 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
423 	{ INNER_IP_PROTO, 8, KEY_OPT_U8,
424 	  offsetof(struct hclge_fd_rule, tuples.ip_proto),
425 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
426 	{ INNER_SRC_IP, 32, KEY_OPT_IP,
427 	  offsetof(struct hclge_fd_rule, tuples.src_ip),
428 	  offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
429 	{ INNER_DST_IP, 32, KEY_OPT_IP,
430 	  offsetof(struct hclge_fd_rule, tuples.dst_ip),
431 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
432 	{ INNER_L3_RSV, 16, KEY_OPT_LE16,
433 	  offsetof(struct hclge_fd_rule, tuples.l3_user_def),
434 	  offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
435 	{ INNER_SRC_PORT, 16, KEY_OPT_LE16,
436 	  offsetof(struct hclge_fd_rule, tuples.src_port),
437 	  offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
438 	{ INNER_DST_PORT, 16, KEY_OPT_LE16,
439 	  offsetof(struct hclge_fd_rule, tuples.dst_port),
440 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
441 	{ INNER_L4_RSV, 32, KEY_OPT_LE32,
442 	  offsetof(struct hclge_fd_rule, tuples.l4_user_def),
443 	  offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
444 };
445 
446 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
447 {
448 #define HCLGE_MAC_CMD_NUM 21
449 
450 	u64 *data = (u64 *)(&hdev->mac_stats);
451 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
452 	__le64 *desc_data;
453 	int i, k, n;
454 	int ret;
455 
456 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
457 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
458 	if (ret) {
459 		dev_err(&hdev->pdev->dev,
460 			"Get MAC pkt stats fail, status = %d.\n", ret);
461 
462 		return ret;
463 	}
464 
465 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
466 		/* for special opcode 0032, only the first desc has the head */
467 		if (unlikely(i == 0)) {
468 			desc_data = (__le64 *)(&desc[i].data[0]);
469 			n = HCLGE_RD_FIRST_STATS_NUM;
470 		} else {
471 			desc_data = (__le64 *)(&desc[i]);
472 			n = HCLGE_RD_OTHER_STATS_NUM;
473 		}
474 
475 		for (k = 0; k < n; k++) {
476 			*data += le64_to_cpu(*desc_data);
477 			data++;
478 			desc_data++;
479 		}
480 	}
481 
482 	return 0;
483 }
484 
485 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
486 {
487 	u64 *data = (u64 *)(&hdev->mac_stats);
488 	struct hclge_desc *desc;
489 	__le64 *desc_data;
490 	u16 i, k, n;
491 	int ret;
492 
493 	/* This may be called inside atomic sections,
494 	 * so GFP_ATOMIC is more suitalbe here
495 	 */
496 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
497 	if (!desc)
498 		return -ENOMEM;
499 
500 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
501 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
502 	if (ret) {
503 		kfree(desc);
504 		return ret;
505 	}
506 
507 	for (i = 0; i < desc_num; i++) {
508 		/* for special opcode 0034, only the first desc has the head */
509 		if (i == 0) {
510 			desc_data = (__le64 *)(&desc[i].data[0]);
511 			n = HCLGE_RD_FIRST_STATS_NUM;
512 		} else {
513 			desc_data = (__le64 *)(&desc[i]);
514 			n = HCLGE_RD_OTHER_STATS_NUM;
515 		}
516 
517 		for (k = 0; k < n; k++) {
518 			*data += le64_to_cpu(*desc_data);
519 			data++;
520 			desc_data++;
521 		}
522 	}
523 
524 	kfree(desc);
525 
526 	return 0;
527 }
528 
529 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
530 {
531 	struct hclge_desc desc;
532 	__le32 *desc_data;
533 	u32 reg_num;
534 	int ret;
535 
536 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
537 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
538 	if (ret)
539 		return ret;
540 
541 	desc_data = (__le32 *)(&desc.data[0]);
542 	reg_num = le32_to_cpu(*desc_data);
543 
544 	*desc_num = 1 + ((reg_num - 3) >> 2) +
545 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
546 
547 	return 0;
548 }
549 
550 static int hclge_mac_update_stats(struct hclge_dev *hdev)
551 {
552 	u32 desc_num;
553 	int ret;
554 
555 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
556 	/* The firmware supports the new statistics acquisition method */
557 	if (!ret)
558 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
559 	else if (ret == -EOPNOTSUPP)
560 		ret = hclge_mac_update_stats_defective(hdev);
561 	else
562 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
563 
564 	return ret;
565 }
566 
567 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
568 {
569 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
570 	struct hclge_vport *vport = hclge_get_vport(handle);
571 	struct hclge_dev *hdev = vport->back;
572 	struct hnae3_queue *queue;
573 	struct hclge_desc desc[1];
574 	struct hclge_tqp *tqp;
575 	int ret, i;
576 
577 	for (i = 0; i < kinfo->num_tqps; i++) {
578 		queue = handle->kinfo.tqp[i];
579 		tqp = container_of(queue, struct hclge_tqp, q);
580 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
581 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
582 					   true);
583 
584 		desc[0].data[0] = cpu_to_le32(tqp->index);
585 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
586 		if (ret) {
587 			dev_err(&hdev->pdev->dev,
588 				"Query tqp stat fail, status = %d,queue = %d\n",
589 				ret, i);
590 			return ret;
591 		}
592 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
593 			le32_to_cpu(desc[0].data[1]);
594 	}
595 
596 	for (i = 0; i < kinfo->num_tqps; i++) {
597 		queue = handle->kinfo.tqp[i];
598 		tqp = container_of(queue, struct hclge_tqp, q);
599 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
600 		hclge_cmd_setup_basic_desc(&desc[0],
601 					   HCLGE_OPC_QUERY_TX_STATS,
602 					   true);
603 
604 		desc[0].data[0] = cpu_to_le32(tqp->index);
605 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
606 		if (ret) {
607 			dev_err(&hdev->pdev->dev,
608 				"Query tqp stat fail, status = %d,queue = %d\n",
609 				ret, i);
610 			return ret;
611 		}
612 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
613 			le32_to_cpu(desc[0].data[1]);
614 	}
615 
616 	return 0;
617 }
618 
619 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
620 {
621 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622 	struct hclge_tqp *tqp;
623 	u64 *buff = data;
624 	int i;
625 
626 	for (i = 0; i < kinfo->num_tqps; i++) {
627 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
628 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
629 	}
630 
631 	for (i = 0; i < kinfo->num_tqps; i++) {
632 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
633 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
634 	}
635 
636 	return buff;
637 }
638 
639 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
640 {
641 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
642 
643 	/* each tqp has TX & RX two queues */
644 	return kinfo->num_tqps * (2);
645 }
646 
647 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
648 {
649 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
650 	u8 *buff = data;
651 	int i;
652 
653 	for (i = 0; i < kinfo->num_tqps; i++) {
654 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
655 			struct hclge_tqp, q);
656 		snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
657 			 tqp->index);
658 		buff = buff + ETH_GSTRING_LEN;
659 	}
660 
661 	for (i = 0; i < kinfo->num_tqps; i++) {
662 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
663 			struct hclge_tqp, q);
664 		snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
665 			 tqp->index);
666 		buff = buff + ETH_GSTRING_LEN;
667 	}
668 
669 	return buff;
670 }
671 
672 static u64 *hclge_comm_get_stats(const void *comm_stats,
673 				 const struct hclge_comm_stats_str strs[],
674 				 int size, u64 *data)
675 {
676 	u64 *buf = data;
677 	u32 i;
678 
679 	for (i = 0; i < size; i++)
680 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
681 
682 	return buf + size;
683 }
684 
685 static u8 *hclge_comm_get_strings(u32 stringset,
686 				  const struct hclge_comm_stats_str strs[],
687 				  int size, u8 *data)
688 {
689 	char *buff = (char *)data;
690 	u32 i;
691 
692 	if (stringset != ETH_SS_STATS)
693 		return buff;
694 
695 	for (i = 0; i < size; i++) {
696 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
697 		buff = buff + ETH_GSTRING_LEN;
698 	}
699 
700 	return (u8 *)buff;
701 }
702 
703 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
704 {
705 	struct hnae3_handle *handle;
706 	int status;
707 
708 	handle = &hdev->vport[0].nic;
709 	if (handle->client) {
710 		status = hclge_tqps_update_stats(handle);
711 		if (status) {
712 			dev_err(&hdev->pdev->dev,
713 				"Update TQPS stats fail, status = %d.\n",
714 				status);
715 		}
716 	}
717 
718 	status = hclge_mac_update_stats(hdev);
719 	if (status)
720 		dev_err(&hdev->pdev->dev,
721 			"Update MAC stats fail, status = %d.\n", status);
722 }
723 
724 static void hclge_update_stats(struct hnae3_handle *handle,
725 			       struct net_device_stats *net_stats)
726 {
727 	struct hclge_vport *vport = hclge_get_vport(handle);
728 	struct hclge_dev *hdev = vport->back;
729 	int status;
730 
731 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
732 		return;
733 
734 	status = hclge_mac_update_stats(hdev);
735 	if (status)
736 		dev_err(&hdev->pdev->dev,
737 			"Update MAC stats fail, status = %d.\n",
738 			status);
739 
740 	status = hclge_tqps_update_stats(handle);
741 	if (status)
742 		dev_err(&hdev->pdev->dev,
743 			"Update TQPS stats fail, status = %d.\n",
744 			status);
745 
746 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
747 }
748 
749 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
750 {
751 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
752 		HNAE3_SUPPORT_PHY_LOOPBACK |\
753 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
754 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
755 
756 	struct hclge_vport *vport = hclge_get_vport(handle);
757 	struct hclge_dev *hdev = vport->back;
758 	int count = 0;
759 
760 	/* Loopback test support rules:
761 	 * mac: only GE mode support
762 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
763 	 * phy: only support when phy device exist on board
764 	 */
765 	if (stringset == ETH_SS_TEST) {
766 		/* clear loopback bit flags at first */
767 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
768 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
769 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
770 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
771 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
772 			count += 1;
773 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
774 		}
775 
776 		count += 2;
777 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
778 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
779 
780 		if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
781 		     hdev->hw.mac.phydev->drv->set_loopback) ||
782 		    hnae3_dev_phy_imp_supported(hdev)) {
783 			count += 1;
784 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
785 		}
786 	} else if (stringset == ETH_SS_STATS) {
787 		count = ARRAY_SIZE(g_mac_stats_string) +
788 			hclge_tqps_get_sset_count(handle, stringset);
789 	}
790 
791 	return count;
792 }
793 
794 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
795 			      u8 *data)
796 {
797 	u8 *p = (char *)data;
798 	int size;
799 
800 	if (stringset == ETH_SS_STATS) {
801 		size = ARRAY_SIZE(g_mac_stats_string);
802 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
803 					   size, p);
804 		p = hclge_tqps_get_strings(handle, p);
805 	} else if (stringset == ETH_SS_TEST) {
806 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
807 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
808 			       ETH_GSTRING_LEN);
809 			p += ETH_GSTRING_LEN;
810 		}
811 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
812 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
813 			       ETH_GSTRING_LEN);
814 			p += ETH_GSTRING_LEN;
815 		}
816 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
817 			memcpy(p,
818 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
819 			       ETH_GSTRING_LEN);
820 			p += ETH_GSTRING_LEN;
821 		}
822 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
823 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
824 			       ETH_GSTRING_LEN);
825 			p += ETH_GSTRING_LEN;
826 		}
827 	}
828 }
829 
830 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
831 {
832 	struct hclge_vport *vport = hclge_get_vport(handle);
833 	struct hclge_dev *hdev = vport->back;
834 	u64 *p;
835 
836 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
837 				 ARRAY_SIZE(g_mac_stats_string), data);
838 	p = hclge_tqps_get_stats(handle, p);
839 }
840 
841 static void hclge_get_mac_stat(struct hnae3_handle *handle,
842 			       struct hns3_mac_stats *mac_stats)
843 {
844 	struct hclge_vport *vport = hclge_get_vport(handle);
845 	struct hclge_dev *hdev = vport->back;
846 
847 	hclge_update_stats(handle, NULL);
848 
849 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
850 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
851 }
852 
853 static int hclge_parse_func_status(struct hclge_dev *hdev,
854 				   struct hclge_func_status_cmd *status)
855 {
856 #define HCLGE_MAC_ID_MASK	0xF
857 
858 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
859 		return -EINVAL;
860 
861 	/* Set the pf to main pf */
862 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
863 		hdev->flag |= HCLGE_FLAG_MAIN;
864 	else
865 		hdev->flag &= ~HCLGE_FLAG_MAIN;
866 
867 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
868 	return 0;
869 }
870 
871 static int hclge_query_function_status(struct hclge_dev *hdev)
872 {
873 #define HCLGE_QUERY_MAX_CNT	5
874 
875 	struct hclge_func_status_cmd *req;
876 	struct hclge_desc desc;
877 	int timeout = 0;
878 	int ret;
879 
880 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
881 	req = (struct hclge_func_status_cmd *)desc.data;
882 
883 	do {
884 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
885 		if (ret) {
886 			dev_err(&hdev->pdev->dev,
887 				"query function status failed %d.\n", ret);
888 			return ret;
889 		}
890 
891 		/* Check pf reset is done */
892 		if (req->pf_state)
893 			break;
894 		usleep_range(1000, 2000);
895 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
896 
897 	return hclge_parse_func_status(hdev, req);
898 }
899 
900 static int hclge_query_pf_resource(struct hclge_dev *hdev)
901 {
902 	struct hclge_pf_res_cmd *req;
903 	struct hclge_desc desc;
904 	int ret;
905 
906 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
907 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
908 	if (ret) {
909 		dev_err(&hdev->pdev->dev,
910 			"query pf resource failed %d.\n", ret);
911 		return ret;
912 	}
913 
914 	req = (struct hclge_pf_res_cmd *)desc.data;
915 	hdev->num_tqps = le16_to_cpu(req->tqp_num) +
916 			 le16_to_cpu(req->ext_tqp_num);
917 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
918 
919 	if (req->tx_buf_size)
920 		hdev->tx_buf_size =
921 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
922 	else
923 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
924 
925 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
926 
927 	if (req->dv_buf_size)
928 		hdev->dv_buf_size =
929 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
930 	else
931 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
932 
933 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
934 
935 	hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
936 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
937 		dev_err(&hdev->pdev->dev,
938 			"only %u msi resources available, not enough for pf(min:2).\n",
939 			hdev->num_nic_msi);
940 		return -EINVAL;
941 	}
942 
943 	if (hnae3_dev_roce_supported(hdev)) {
944 		hdev->num_roce_msi =
945 			le16_to_cpu(req->pf_intr_vector_number_roce);
946 
947 		/* PF should have NIC vectors and Roce vectors,
948 		 * NIC vectors are queued before Roce vectors.
949 		 */
950 		hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
951 	} else {
952 		hdev->num_msi = hdev->num_nic_msi;
953 	}
954 
955 	return 0;
956 }
957 
958 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
959 {
960 	switch (speed_cmd) {
961 	case 6:
962 		*speed = HCLGE_MAC_SPEED_10M;
963 		break;
964 	case 7:
965 		*speed = HCLGE_MAC_SPEED_100M;
966 		break;
967 	case 0:
968 		*speed = HCLGE_MAC_SPEED_1G;
969 		break;
970 	case 1:
971 		*speed = HCLGE_MAC_SPEED_10G;
972 		break;
973 	case 2:
974 		*speed = HCLGE_MAC_SPEED_25G;
975 		break;
976 	case 3:
977 		*speed = HCLGE_MAC_SPEED_40G;
978 		break;
979 	case 4:
980 		*speed = HCLGE_MAC_SPEED_50G;
981 		break;
982 	case 5:
983 		*speed = HCLGE_MAC_SPEED_100G;
984 		break;
985 	case 8:
986 		*speed = HCLGE_MAC_SPEED_200G;
987 		break;
988 	default:
989 		return -EINVAL;
990 	}
991 
992 	return 0;
993 }
994 
995 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
996 {
997 	struct hclge_vport *vport = hclge_get_vport(handle);
998 	struct hclge_dev *hdev = vport->back;
999 	u32 speed_ability = hdev->hw.mac.speed_ability;
1000 	u32 speed_bit = 0;
1001 
1002 	switch (speed) {
1003 	case HCLGE_MAC_SPEED_10M:
1004 		speed_bit = HCLGE_SUPPORT_10M_BIT;
1005 		break;
1006 	case HCLGE_MAC_SPEED_100M:
1007 		speed_bit = HCLGE_SUPPORT_100M_BIT;
1008 		break;
1009 	case HCLGE_MAC_SPEED_1G:
1010 		speed_bit = HCLGE_SUPPORT_1G_BIT;
1011 		break;
1012 	case HCLGE_MAC_SPEED_10G:
1013 		speed_bit = HCLGE_SUPPORT_10G_BIT;
1014 		break;
1015 	case HCLGE_MAC_SPEED_25G:
1016 		speed_bit = HCLGE_SUPPORT_25G_BIT;
1017 		break;
1018 	case HCLGE_MAC_SPEED_40G:
1019 		speed_bit = HCLGE_SUPPORT_40G_BIT;
1020 		break;
1021 	case HCLGE_MAC_SPEED_50G:
1022 		speed_bit = HCLGE_SUPPORT_50G_BIT;
1023 		break;
1024 	case HCLGE_MAC_SPEED_100G:
1025 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1026 		break;
1027 	case HCLGE_MAC_SPEED_200G:
1028 		speed_bit = HCLGE_SUPPORT_200G_BIT;
1029 		break;
1030 	default:
1031 		return -EINVAL;
1032 	}
1033 
1034 	if (speed_bit & speed_ability)
1035 		return 0;
1036 
1037 	return -EINVAL;
1038 }
1039 
1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1041 {
1042 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1044 				 mac->supported);
1045 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1047 				 mac->supported);
1048 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1050 				 mac->supported);
1051 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1053 				 mac->supported);
1054 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1056 				 mac->supported);
1057 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1059 				 mac->supported);
1060 }
1061 
1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1063 {
1064 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1066 				 mac->supported);
1067 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1069 				 mac->supported);
1070 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1072 				 mac->supported);
1073 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1075 				 mac->supported);
1076 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1078 				 mac->supported);
1079 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1080 		linkmode_set_bit(
1081 			ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1082 			mac->supported);
1083 }
1084 
1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1086 {
1087 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1089 				 mac->supported);
1090 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1092 				 mac->supported);
1093 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1095 				 mac->supported);
1096 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1098 				 mac->supported);
1099 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1101 				 mac->supported);
1102 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1104 				 mac->supported);
1105 }
1106 
1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1108 {
1109 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1111 				 mac->supported);
1112 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1114 				 mac->supported);
1115 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1117 				 mac->supported);
1118 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1120 				 mac->supported);
1121 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1123 				 mac->supported);
1124 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1126 				 mac->supported);
1127 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1129 				 mac->supported);
1130 }
1131 
1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1133 {
1134 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1136 
1137 	switch (mac->speed) {
1138 	case HCLGE_MAC_SPEED_10G:
1139 	case HCLGE_MAC_SPEED_40G:
1140 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1141 				 mac->supported);
1142 		mac->fec_ability =
1143 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1144 		break;
1145 	case HCLGE_MAC_SPEED_25G:
1146 	case HCLGE_MAC_SPEED_50G:
1147 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1148 				 mac->supported);
1149 		mac->fec_ability =
1150 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151 			BIT(HNAE3_FEC_AUTO);
1152 		break;
1153 	case HCLGE_MAC_SPEED_100G:
1154 	case HCLGE_MAC_SPEED_200G:
1155 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1157 		break;
1158 	default:
1159 		mac->fec_ability = 0;
1160 		break;
1161 	}
1162 }
1163 
1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1165 					u16 speed_ability)
1166 {
1167 	struct hclge_mac *mac = &hdev->hw.mac;
1168 
1169 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1171 				 mac->supported);
1172 
1173 	hclge_convert_setting_sr(mac, speed_ability);
1174 	hclge_convert_setting_lr(mac, speed_ability);
1175 	hclge_convert_setting_cr(mac, speed_ability);
1176 	if (hnae3_dev_fec_supported(hdev))
1177 		hclge_convert_setting_fec(mac);
1178 
1179 	if (hnae3_dev_pause_supported(hdev))
1180 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1181 
1182 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1184 }
1185 
1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1187 					    u16 speed_ability)
1188 {
1189 	struct hclge_mac *mac = &hdev->hw.mac;
1190 
1191 	hclge_convert_setting_kr(mac, speed_ability);
1192 	if (hnae3_dev_fec_supported(hdev))
1193 		hclge_convert_setting_fec(mac);
1194 
1195 	if (hnae3_dev_pause_supported(hdev))
1196 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1197 
1198 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1200 }
1201 
1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1203 					 u16 speed_ability)
1204 {
1205 	unsigned long *supported = hdev->hw.mac.supported;
1206 
1207 	/* default to support all speed for GE port */
1208 	if (!speed_ability)
1209 		speed_ability = HCLGE_SUPPORT_GE;
1210 
1211 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1213 				 supported);
1214 
1215 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1217 				 supported);
1218 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1219 				 supported);
1220 	}
1221 
1222 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1225 	}
1226 
1227 	if (hnae3_dev_pause_supported(hdev)) {
1228 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229 		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1230 	}
1231 
1232 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1234 }
1235 
1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1237 {
1238 	u8 media_type = hdev->hw.mac.media_type;
1239 
1240 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1242 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243 		hclge_parse_copper_link_mode(hdev, speed_ability);
1244 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1246 }
1247 
1248 static u32 hclge_get_max_speed(u16 speed_ability)
1249 {
1250 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251 		return HCLGE_MAC_SPEED_200G;
1252 
1253 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254 		return HCLGE_MAC_SPEED_100G;
1255 
1256 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257 		return HCLGE_MAC_SPEED_50G;
1258 
1259 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260 		return HCLGE_MAC_SPEED_40G;
1261 
1262 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263 		return HCLGE_MAC_SPEED_25G;
1264 
1265 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266 		return HCLGE_MAC_SPEED_10G;
1267 
1268 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269 		return HCLGE_MAC_SPEED_1G;
1270 
1271 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272 		return HCLGE_MAC_SPEED_100M;
1273 
1274 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275 		return HCLGE_MAC_SPEED_10M;
1276 
1277 	return HCLGE_MAC_SPEED_1G;
1278 }
1279 
1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1281 {
1282 #define SPEED_ABILITY_EXT_SHIFT			8
1283 
1284 	struct hclge_cfg_param_cmd *req;
1285 	u64 mac_addr_tmp_high;
1286 	u16 speed_ability_ext;
1287 	u64 mac_addr_tmp;
1288 	unsigned int i;
1289 
1290 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1291 
1292 	/* get the configuration */
1293 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1294 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1295 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1296 					    HCLGE_CFG_TQP_DESC_N_M,
1297 					    HCLGE_CFG_TQP_DESC_N_S);
1298 
1299 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1300 					HCLGE_CFG_PHY_ADDR_M,
1301 					HCLGE_CFG_PHY_ADDR_S);
1302 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1303 					  HCLGE_CFG_MEDIA_TP_M,
1304 					  HCLGE_CFG_MEDIA_TP_S);
1305 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1306 					  HCLGE_CFG_RX_BUF_LEN_M,
1307 					  HCLGE_CFG_RX_BUF_LEN_S);
1308 	/* get mac_address */
1309 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1310 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1311 					    HCLGE_CFG_MAC_ADDR_H_M,
1312 					    HCLGE_CFG_MAC_ADDR_H_S);
1313 
1314 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1315 
1316 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1317 					     HCLGE_CFG_DEFAULT_SPEED_M,
1318 					     HCLGE_CFG_DEFAULT_SPEED_S);
1319 	cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1320 					       HCLGE_CFG_RSS_SIZE_M,
1321 					       HCLGE_CFG_RSS_SIZE_S);
1322 
1323 	for (i = 0; i < ETH_ALEN; i++)
1324 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1325 
1326 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1327 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1328 
1329 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1330 					     HCLGE_CFG_SPEED_ABILITY_M,
1331 					     HCLGE_CFG_SPEED_ABILITY_S);
1332 	speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1333 					    HCLGE_CFG_SPEED_ABILITY_EXT_M,
1334 					    HCLGE_CFG_SPEED_ABILITY_EXT_S);
1335 	cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1336 
1337 	cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1338 					       HCLGE_CFG_VLAN_FLTR_CAP_M,
1339 					       HCLGE_CFG_VLAN_FLTR_CAP_S);
1340 
1341 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1342 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1343 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1344 	if (!cfg->umv_space)
1345 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1346 
1347 	cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1348 					       HCLGE_CFG_PF_RSS_SIZE_M,
1349 					       HCLGE_CFG_PF_RSS_SIZE_S);
1350 
1351 	/* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1352 	 * power of 2, instead of reading out directly. This would
1353 	 * be more flexible for future changes and expansions.
1354 	 * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1355 	 * it does not make sense if PF's field is 0. In this case, PF and VF
1356 	 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1357 	 */
1358 	cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1359 			       1U << cfg->pf_rss_size_max :
1360 			       cfg->vf_rss_size_max;
1361 }
1362 
1363 /* hclge_get_cfg: query the static parameter from flash
1364  * @hdev: pointer to struct hclge_dev
1365  * @hcfg: the config structure to be getted
1366  */
1367 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1368 {
1369 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1370 	struct hclge_cfg_param_cmd *req;
1371 	unsigned int i;
1372 	int ret;
1373 
1374 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1375 		u32 offset = 0;
1376 
1377 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1378 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1379 					   true);
1380 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1381 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1382 		/* Len should be united by 4 bytes when send to hardware */
1383 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1384 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1385 		req->offset = cpu_to_le32(offset);
1386 	}
1387 
1388 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1389 	if (ret) {
1390 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1391 		return ret;
1392 	}
1393 
1394 	hclge_parse_cfg(hcfg, desc);
1395 
1396 	return 0;
1397 }
1398 
1399 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1400 {
1401 #define HCLGE_MAX_NON_TSO_BD_NUM			8U
1402 
1403 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1404 
1405 	ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1406 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1407 	ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1408 	ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1409 	ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1410 	ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1411 	ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1412 }
1413 
1414 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1415 				  struct hclge_desc *desc)
1416 {
1417 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1418 	struct hclge_dev_specs_0_cmd *req0;
1419 	struct hclge_dev_specs_1_cmd *req1;
1420 
1421 	req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1422 	req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1423 
1424 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1425 	ae_dev->dev_specs.rss_ind_tbl_size =
1426 		le16_to_cpu(req0->rss_ind_tbl_size);
1427 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1428 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1429 	ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1430 	ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1431 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1432 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1433 }
1434 
1435 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1436 {
1437 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1438 
1439 	if (!dev_specs->max_non_tso_bd_num)
1440 		dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1441 	if (!dev_specs->rss_ind_tbl_size)
1442 		dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1443 	if (!dev_specs->rss_key_size)
1444 		dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1445 	if (!dev_specs->max_tm_rate)
1446 		dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1447 	if (!dev_specs->max_qset_num)
1448 		dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1449 	if (!dev_specs->max_int_gl)
1450 		dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1451 	if (!dev_specs->max_frm_size)
1452 		dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1453 }
1454 
1455 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1456 {
1457 	struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1458 	int ret;
1459 	int i;
1460 
1461 	/* set default specifications as devices lower than version V3 do not
1462 	 * support querying specifications from firmware.
1463 	 */
1464 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1465 		hclge_set_default_dev_specs(hdev);
1466 		return 0;
1467 	}
1468 
1469 	for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1470 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1471 					   true);
1472 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1473 	}
1474 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1475 
1476 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1477 	if (ret)
1478 		return ret;
1479 
1480 	hclge_parse_dev_specs(hdev, desc);
1481 	hclge_check_dev_specs(hdev);
1482 
1483 	return 0;
1484 }
1485 
1486 static int hclge_get_cap(struct hclge_dev *hdev)
1487 {
1488 	int ret;
1489 
1490 	ret = hclge_query_function_status(hdev);
1491 	if (ret) {
1492 		dev_err(&hdev->pdev->dev,
1493 			"query function status error %d.\n", ret);
1494 		return ret;
1495 	}
1496 
1497 	/* get pf resource */
1498 	return hclge_query_pf_resource(hdev);
1499 }
1500 
1501 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1502 {
1503 #define HCLGE_MIN_TX_DESC	64
1504 #define HCLGE_MIN_RX_DESC	64
1505 
1506 	if (!is_kdump_kernel())
1507 		return;
1508 
1509 	dev_info(&hdev->pdev->dev,
1510 		 "Running kdump kernel. Using minimal resources\n");
1511 
1512 	/* minimal queue pairs equals to the number of vports */
1513 	hdev->num_tqps = hdev->num_req_vfs + 1;
1514 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1515 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1516 }
1517 
1518 static int hclge_configure(struct hclge_dev *hdev)
1519 {
1520 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1521 	struct hclge_cfg cfg;
1522 	unsigned int i;
1523 	int ret;
1524 
1525 	ret = hclge_get_cfg(hdev, &cfg);
1526 	if (ret)
1527 		return ret;
1528 
1529 	hdev->base_tqp_pid = 0;
1530 	hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1531 	hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1532 	hdev->rx_buf_len = cfg.rx_buf_len;
1533 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1534 	hdev->hw.mac.media_type = cfg.media_type;
1535 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1536 	hdev->num_tx_desc = cfg.tqp_desc_num;
1537 	hdev->num_rx_desc = cfg.tqp_desc_num;
1538 	hdev->tm_info.num_pg = 1;
1539 	hdev->tc_max = cfg.tc_num;
1540 	hdev->tm_info.hw_pfc_map = 0;
1541 	hdev->wanted_umv_size = cfg.umv_space;
1542 	if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1543 		set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1544 
1545 	if (hnae3_dev_fd_supported(hdev)) {
1546 		hdev->fd_en = true;
1547 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1548 	}
1549 
1550 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1551 	if (ret) {
1552 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1553 			cfg.default_speed, ret);
1554 		return ret;
1555 	}
1556 
1557 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1558 
1559 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1560 
1561 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1562 	    (hdev->tc_max < 1)) {
1563 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1564 			 hdev->tc_max);
1565 		hdev->tc_max = 1;
1566 	}
1567 
1568 	/* Dev does not support DCB */
1569 	if (!hnae3_dev_dcb_supported(hdev)) {
1570 		hdev->tc_max = 1;
1571 		hdev->pfc_max = 0;
1572 	} else {
1573 		hdev->pfc_max = hdev->tc_max;
1574 	}
1575 
1576 	hdev->tm_info.num_tc = 1;
1577 
1578 	/* Currently not support uncontiuous tc */
1579 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1580 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1581 
1582 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1583 
1584 	hclge_init_kdump_kernel_config(hdev);
1585 
1586 	/* Set the init affinity based on pci func number */
1587 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1588 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1589 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1590 			&hdev->affinity_mask);
1591 
1592 	return ret;
1593 }
1594 
1595 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1596 			    u16 tso_mss_max)
1597 {
1598 	struct hclge_cfg_tso_status_cmd *req;
1599 	struct hclge_desc desc;
1600 
1601 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1602 
1603 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1604 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1605 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1606 
1607 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1608 }
1609 
1610 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1611 {
1612 	struct hclge_cfg_gro_status_cmd *req;
1613 	struct hclge_desc desc;
1614 	int ret;
1615 
1616 	if (!hnae3_dev_gro_supported(hdev))
1617 		return 0;
1618 
1619 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1620 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1621 
1622 	req->gro_en = en ? 1 : 0;
1623 
1624 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1625 	if (ret)
1626 		dev_err(&hdev->pdev->dev,
1627 			"GRO hardware config cmd failed, ret = %d\n", ret);
1628 
1629 	return ret;
1630 }
1631 
1632 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1633 {
1634 	struct hclge_tqp *tqp;
1635 	int i;
1636 
1637 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1638 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1639 	if (!hdev->htqp)
1640 		return -ENOMEM;
1641 
1642 	tqp = hdev->htqp;
1643 
1644 	for (i = 0; i < hdev->num_tqps; i++) {
1645 		tqp->dev = &hdev->pdev->dev;
1646 		tqp->index = i;
1647 
1648 		tqp->q.ae_algo = &ae_algo;
1649 		tqp->q.buf_size = hdev->rx_buf_len;
1650 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1651 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1652 
1653 		/* need an extended offset to configure queues >=
1654 		 * HCLGE_TQP_MAX_SIZE_DEV_V2
1655 		 */
1656 		if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1657 			tqp->q.io_base = hdev->hw.io_base +
1658 					 HCLGE_TQP_REG_OFFSET +
1659 					 i * HCLGE_TQP_REG_SIZE;
1660 		else
1661 			tqp->q.io_base = hdev->hw.io_base +
1662 					 HCLGE_TQP_REG_OFFSET +
1663 					 HCLGE_TQP_EXT_REG_OFFSET +
1664 					 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1665 					 HCLGE_TQP_REG_SIZE;
1666 
1667 		tqp++;
1668 	}
1669 
1670 	return 0;
1671 }
1672 
1673 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1674 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1675 {
1676 	struct hclge_tqp_map_cmd *req;
1677 	struct hclge_desc desc;
1678 	int ret;
1679 
1680 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1681 
1682 	req = (struct hclge_tqp_map_cmd *)desc.data;
1683 	req->tqp_id = cpu_to_le16(tqp_pid);
1684 	req->tqp_vf = func_id;
1685 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1686 	if (!is_pf)
1687 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1688 	req->tqp_vid = cpu_to_le16(tqp_vid);
1689 
1690 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1691 	if (ret)
1692 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1693 
1694 	return ret;
1695 }
1696 
1697 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1698 {
1699 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1700 	struct hclge_dev *hdev = vport->back;
1701 	int i, alloced;
1702 
1703 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1704 	     alloced < num_tqps; i++) {
1705 		if (!hdev->htqp[i].alloced) {
1706 			hdev->htqp[i].q.handle = &vport->nic;
1707 			hdev->htqp[i].q.tqp_index = alloced;
1708 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1709 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1710 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1711 			hdev->htqp[i].alloced = true;
1712 			alloced++;
1713 		}
1714 	}
1715 	vport->alloc_tqps = alloced;
1716 	kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1717 				vport->alloc_tqps / hdev->tm_info.num_tc);
1718 
1719 	/* ensure one to one mapping between irq and queue at default */
1720 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1721 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1722 
1723 	return 0;
1724 }
1725 
1726 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1727 			    u16 num_tx_desc, u16 num_rx_desc)
1728 
1729 {
1730 	struct hnae3_handle *nic = &vport->nic;
1731 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1732 	struct hclge_dev *hdev = vport->back;
1733 	int ret;
1734 
1735 	kinfo->num_tx_desc = num_tx_desc;
1736 	kinfo->num_rx_desc = num_rx_desc;
1737 
1738 	kinfo->rx_buf_len = hdev->rx_buf_len;
1739 
1740 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1741 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1742 	if (!kinfo->tqp)
1743 		return -ENOMEM;
1744 
1745 	ret = hclge_assign_tqp(vport, num_tqps);
1746 	if (ret)
1747 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1748 
1749 	return ret;
1750 }
1751 
1752 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1753 				  struct hclge_vport *vport)
1754 {
1755 	struct hnae3_handle *nic = &vport->nic;
1756 	struct hnae3_knic_private_info *kinfo;
1757 	u16 i;
1758 
1759 	kinfo = &nic->kinfo;
1760 	for (i = 0; i < vport->alloc_tqps; i++) {
1761 		struct hclge_tqp *q =
1762 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1763 		bool is_pf;
1764 		int ret;
1765 
1766 		is_pf = !(vport->vport_id);
1767 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1768 					     i, is_pf);
1769 		if (ret)
1770 			return ret;
1771 	}
1772 
1773 	return 0;
1774 }
1775 
1776 static int hclge_map_tqp(struct hclge_dev *hdev)
1777 {
1778 	struct hclge_vport *vport = hdev->vport;
1779 	u16 i, num_vport;
1780 
1781 	num_vport = hdev->num_req_vfs + 1;
1782 	for (i = 0; i < num_vport; i++)	{
1783 		int ret;
1784 
1785 		ret = hclge_map_tqp_to_vport(hdev, vport);
1786 		if (ret)
1787 			return ret;
1788 
1789 		vport++;
1790 	}
1791 
1792 	return 0;
1793 }
1794 
1795 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1796 {
1797 	struct hnae3_handle *nic = &vport->nic;
1798 	struct hclge_dev *hdev = vport->back;
1799 	int ret;
1800 
1801 	nic->pdev = hdev->pdev;
1802 	nic->ae_algo = &ae_algo;
1803 	nic->numa_node_mask = hdev->numa_node_mask;
1804 
1805 	ret = hclge_knic_setup(vport, num_tqps,
1806 			       hdev->num_tx_desc, hdev->num_rx_desc);
1807 	if (ret)
1808 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1809 
1810 	return ret;
1811 }
1812 
1813 static int hclge_alloc_vport(struct hclge_dev *hdev)
1814 {
1815 	struct pci_dev *pdev = hdev->pdev;
1816 	struct hclge_vport *vport;
1817 	u32 tqp_main_vport;
1818 	u32 tqp_per_vport;
1819 	int num_vport, i;
1820 	int ret;
1821 
1822 	/* We need to alloc a vport for main NIC of PF */
1823 	num_vport = hdev->num_req_vfs + 1;
1824 
1825 	if (hdev->num_tqps < num_vport) {
1826 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1827 			hdev->num_tqps, num_vport);
1828 		return -EINVAL;
1829 	}
1830 
1831 	/* Alloc the same number of TQPs for every vport */
1832 	tqp_per_vport = hdev->num_tqps / num_vport;
1833 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1834 
1835 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1836 			     GFP_KERNEL);
1837 	if (!vport)
1838 		return -ENOMEM;
1839 
1840 	hdev->vport = vport;
1841 	hdev->num_alloc_vport = num_vport;
1842 
1843 	if (IS_ENABLED(CONFIG_PCI_IOV))
1844 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1845 
1846 	for (i = 0; i < num_vport; i++) {
1847 		vport->back = hdev;
1848 		vport->vport_id = i;
1849 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1850 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1851 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1852 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1853 		vport->req_vlan_fltr_en = true;
1854 		INIT_LIST_HEAD(&vport->vlan_list);
1855 		INIT_LIST_HEAD(&vport->uc_mac_list);
1856 		INIT_LIST_HEAD(&vport->mc_mac_list);
1857 		spin_lock_init(&vport->mac_list_lock);
1858 
1859 		if (i == 0)
1860 			ret = hclge_vport_setup(vport, tqp_main_vport);
1861 		else
1862 			ret = hclge_vport_setup(vport, tqp_per_vport);
1863 		if (ret) {
1864 			dev_err(&pdev->dev,
1865 				"vport setup failed for vport %d, %d\n",
1866 				i, ret);
1867 			return ret;
1868 		}
1869 
1870 		vport++;
1871 	}
1872 
1873 	return 0;
1874 }
1875 
1876 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1877 				    struct hclge_pkt_buf_alloc *buf_alloc)
1878 {
1879 /* TX buffer size is unit by 128 byte */
1880 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1881 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1882 	struct hclge_tx_buff_alloc_cmd *req;
1883 	struct hclge_desc desc;
1884 	int ret;
1885 	u8 i;
1886 
1887 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1888 
1889 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1890 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1891 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1892 
1893 		req->tx_pkt_buff[i] =
1894 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1895 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1896 	}
1897 
1898 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1899 	if (ret)
1900 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1901 			ret);
1902 
1903 	return ret;
1904 }
1905 
1906 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1907 				 struct hclge_pkt_buf_alloc *buf_alloc)
1908 {
1909 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1910 
1911 	if (ret)
1912 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1913 
1914 	return ret;
1915 }
1916 
1917 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1918 {
1919 	unsigned int i;
1920 	u32 cnt = 0;
1921 
1922 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1923 		if (hdev->hw_tc_map & BIT(i))
1924 			cnt++;
1925 	return cnt;
1926 }
1927 
1928 /* Get the number of pfc enabled TCs, which have private buffer */
1929 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1930 				  struct hclge_pkt_buf_alloc *buf_alloc)
1931 {
1932 	struct hclge_priv_buf *priv;
1933 	unsigned int i;
1934 	int cnt = 0;
1935 
1936 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1937 		priv = &buf_alloc->priv_buf[i];
1938 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1939 		    priv->enable)
1940 			cnt++;
1941 	}
1942 
1943 	return cnt;
1944 }
1945 
1946 /* Get the number of pfc disabled TCs, which have private buffer */
1947 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1948 				     struct hclge_pkt_buf_alloc *buf_alloc)
1949 {
1950 	struct hclge_priv_buf *priv;
1951 	unsigned int i;
1952 	int cnt = 0;
1953 
1954 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1955 		priv = &buf_alloc->priv_buf[i];
1956 		if (hdev->hw_tc_map & BIT(i) &&
1957 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1958 		    priv->enable)
1959 			cnt++;
1960 	}
1961 
1962 	return cnt;
1963 }
1964 
1965 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1966 {
1967 	struct hclge_priv_buf *priv;
1968 	u32 rx_priv = 0;
1969 	int i;
1970 
1971 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1972 		priv = &buf_alloc->priv_buf[i];
1973 		if (priv->enable)
1974 			rx_priv += priv->buf_size;
1975 	}
1976 	return rx_priv;
1977 }
1978 
1979 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1980 {
1981 	u32 i, total_tx_size = 0;
1982 
1983 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1984 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1985 
1986 	return total_tx_size;
1987 }
1988 
1989 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1990 				struct hclge_pkt_buf_alloc *buf_alloc,
1991 				u32 rx_all)
1992 {
1993 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1994 	u32 tc_num = hclge_get_tc_num(hdev);
1995 	u32 shared_buf, aligned_mps;
1996 	u32 rx_priv;
1997 	int i;
1998 
1999 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2000 
2001 	if (hnae3_dev_dcb_supported(hdev))
2002 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2003 					hdev->dv_buf_size;
2004 	else
2005 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2006 					+ hdev->dv_buf_size;
2007 
2008 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2009 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2010 			     HCLGE_BUF_SIZE_UNIT);
2011 
2012 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2013 	if (rx_all < rx_priv + shared_std)
2014 		return false;
2015 
2016 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2017 	buf_alloc->s_buf.buf_size = shared_buf;
2018 	if (hnae3_dev_dcb_supported(hdev)) {
2019 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2020 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2021 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2022 				  HCLGE_BUF_SIZE_UNIT);
2023 	} else {
2024 		buf_alloc->s_buf.self.high = aligned_mps +
2025 						HCLGE_NON_DCB_ADDITIONAL_BUF;
2026 		buf_alloc->s_buf.self.low = aligned_mps;
2027 	}
2028 
2029 	if (hnae3_dev_dcb_supported(hdev)) {
2030 		hi_thrd = shared_buf - hdev->dv_buf_size;
2031 
2032 		if (tc_num <= NEED_RESERVE_TC_NUM)
2033 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2034 					/ BUF_MAX_PERCENT;
2035 
2036 		if (tc_num)
2037 			hi_thrd = hi_thrd / tc_num;
2038 
2039 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2040 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2041 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2042 	} else {
2043 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2044 		lo_thrd = aligned_mps;
2045 	}
2046 
2047 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2048 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2049 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2050 	}
2051 
2052 	return true;
2053 }
2054 
2055 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2056 				struct hclge_pkt_buf_alloc *buf_alloc)
2057 {
2058 	u32 i, total_size;
2059 
2060 	total_size = hdev->pkt_buf_size;
2061 
2062 	/* alloc tx buffer for all enabled tc */
2063 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2064 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2065 
2066 		if (hdev->hw_tc_map & BIT(i)) {
2067 			if (total_size < hdev->tx_buf_size)
2068 				return -ENOMEM;
2069 
2070 			priv->tx_buf_size = hdev->tx_buf_size;
2071 		} else {
2072 			priv->tx_buf_size = 0;
2073 		}
2074 
2075 		total_size -= priv->tx_buf_size;
2076 	}
2077 
2078 	return 0;
2079 }
2080 
2081 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2082 				  struct hclge_pkt_buf_alloc *buf_alloc)
2083 {
2084 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2085 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2086 	unsigned int i;
2087 
2088 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2089 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2090 
2091 		priv->enable = 0;
2092 		priv->wl.low = 0;
2093 		priv->wl.high = 0;
2094 		priv->buf_size = 0;
2095 
2096 		if (!(hdev->hw_tc_map & BIT(i)))
2097 			continue;
2098 
2099 		priv->enable = 1;
2100 
2101 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2102 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2103 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
2104 						HCLGE_BUF_SIZE_UNIT);
2105 		} else {
2106 			priv->wl.low = 0;
2107 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2108 					aligned_mps;
2109 		}
2110 
2111 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2112 	}
2113 
2114 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2115 }
2116 
2117 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2118 					  struct hclge_pkt_buf_alloc *buf_alloc)
2119 {
2120 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2121 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2122 	int i;
2123 
2124 	/* let the last to be cleared first */
2125 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2126 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2127 		unsigned int mask = BIT((unsigned int)i);
2128 
2129 		if (hdev->hw_tc_map & mask &&
2130 		    !(hdev->tm_info.hw_pfc_map & mask)) {
2131 			/* Clear the no pfc TC private buffer */
2132 			priv->wl.low = 0;
2133 			priv->wl.high = 0;
2134 			priv->buf_size = 0;
2135 			priv->enable = 0;
2136 			no_pfc_priv_num--;
2137 		}
2138 
2139 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2140 		    no_pfc_priv_num == 0)
2141 			break;
2142 	}
2143 
2144 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2145 }
2146 
2147 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2148 					struct hclge_pkt_buf_alloc *buf_alloc)
2149 {
2150 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2151 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2152 	int i;
2153 
2154 	/* let the last to be cleared first */
2155 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2156 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2157 		unsigned int mask = BIT((unsigned int)i);
2158 
2159 		if (hdev->hw_tc_map & mask &&
2160 		    hdev->tm_info.hw_pfc_map & mask) {
2161 			/* Reduce the number of pfc TC with private buffer */
2162 			priv->wl.low = 0;
2163 			priv->enable = 0;
2164 			priv->wl.high = 0;
2165 			priv->buf_size = 0;
2166 			pfc_priv_num--;
2167 		}
2168 
2169 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2170 		    pfc_priv_num == 0)
2171 			break;
2172 	}
2173 
2174 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2175 }
2176 
2177 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2178 				      struct hclge_pkt_buf_alloc *buf_alloc)
2179 {
2180 #define COMPENSATE_BUFFER	0x3C00
2181 #define COMPENSATE_HALF_MPS_NUM	5
2182 #define PRIV_WL_GAP		0x1800
2183 
2184 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2185 	u32 tc_num = hclge_get_tc_num(hdev);
2186 	u32 half_mps = hdev->mps >> 1;
2187 	u32 min_rx_priv;
2188 	unsigned int i;
2189 
2190 	if (tc_num)
2191 		rx_priv = rx_priv / tc_num;
2192 
2193 	if (tc_num <= NEED_RESERVE_TC_NUM)
2194 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2195 
2196 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2197 			COMPENSATE_HALF_MPS_NUM * half_mps;
2198 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2199 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2200 	if (rx_priv < min_rx_priv)
2201 		return false;
2202 
2203 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2204 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2205 
2206 		priv->enable = 0;
2207 		priv->wl.low = 0;
2208 		priv->wl.high = 0;
2209 		priv->buf_size = 0;
2210 
2211 		if (!(hdev->hw_tc_map & BIT(i)))
2212 			continue;
2213 
2214 		priv->enable = 1;
2215 		priv->buf_size = rx_priv;
2216 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2217 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2218 	}
2219 
2220 	buf_alloc->s_buf.buf_size = 0;
2221 
2222 	return true;
2223 }
2224 
2225 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2226  * @hdev: pointer to struct hclge_dev
2227  * @buf_alloc: pointer to buffer calculation data
2228  * @return: 0: calculate successful, negative: fail
2229  */
2230 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2231 				struct hclge_pkt_buf_alloc *buf_alloc)
2232 {
2233 	/* When DCB is not supported, rx private buffer is not allocated. */
2234 	if (!hnae3_dev_dcb_supported(hdev)) {
2235 		u32 rx_all = hdev->pkt_buf_size;
2236 
2237 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2238 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2239 			return -ENOMEM;
2240 
2241 		return 0;
2242 	}
2243 
2244 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2245 		return 0;
2246 
2247 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2248 		return 0;
2249 
2250 	/* try to decrease the buffer size */
2251 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2252 		return 0;
2253 
2254 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2255 		return 0;
2256 
2257 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2258 		return 0;
2259 
2260 	return -ENOMEM;
2261 }
2262 
2263 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2264 				   struct hclge_pkt_buf_alloc *buf_alloc)
2265 {
2266 	struct hclge_rx_priv_buff_cmd *req;
2267 	struct hclge_desc desc;
2268 	int ret;
2269 	int i;
2270 
2271 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2272 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2273 
2274 	/* Alloc private buffer TCs */
2275 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2276 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2277 
2278 		req->buf_num[i] =
2279 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2280 		req->buf_num[i] |=
2281 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2282 	}
2283 
2284 	req->shared_buf =
2285 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2286 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2287 
2288 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2289 	if (ret)
2290 		dev_err(&hdev->pdev->dev,
2291 			"rx private buffer alloc cmd failed %d\n", ret);
2292 
2293 	return ret;
2294 }
2295 
2296 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2297 				   struct hclge_pkt_buf_alloc *buf_alloc)
2298 {
2299 	struct hclge_rx_priv_wl_buf *req;
2300 	struct hclge_priv_buf *priv;
2301 	struct hclge_desc desc[2];
2302 	int i, j;
2303 	int ret;
2304 
2305 	for (i = 0; i < 2; i++) {
2306 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2307 					   false);
2308 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2309 
2310 		/* The first descriptor set the NEXT bit to 1 */
2311 		if (i == 0)
2312 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2313 		else
2314 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2315 
2316 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2317 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2318 
2319 			priv = &buf_alloc->priv_buf[idx];
2320 			req->tc_wl[j].high =
2321 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2322 			req->tc_wl[j].high |=
2323 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2324 			req->tc_wl[j].low =
2325 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2326 			req->tc_wl[j].low |=
2327 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2328 		}
2329 	}
2330 
2331 	/* Send 2 descriptor at one time */
2332 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2333 	if (ret)
2334 		dev_err(&hdev->pdev->dev,
2335 			"rx private waterline config cmd failed %d\n",
2336 			ret);
2337 	return ret;
2338 }
2339 
2340 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2341 				    struct hclge_pkt_buf_alloc *buf_alloc)
2342 {
2343 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2344 	struct hclge_rx_com_thrd *req;
2345 	struct hclge_desc desc[2];
2346 	struct hclge_tc_thrd *tc;
2347 	int i, j;
2348 	int ret;
2349 
2350 	for (i = 0; i < 2; i++) {
2351 		hclge_cmd_setup_basic_desc(&desc[i],
2352 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2353 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2354 
2355 		/* The first descriptor set the NEXT bit to 1 */
2356 		if (i == 0)
2357 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2358 		else
2359 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2360 
2361 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2362 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2363 
2364 			req->com_thrd[j].high =
2365 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2366 			req->com_thrd[j].high |=
2367 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2368 			req->com_thrd[j].low =
2369 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2370 			req->com_thrd[j].low |=
2371 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2372 		}
2373 	}
2374 
2375 	/* Send 2 descriptors at one time */
2376 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2377 	if (ret)
2378 		dev_err(&hdev->pdev->dev,
2379 			"common threshold config cmd failed %d\n", ret);
2380 	return ret;
2381 }
2382 
2383 static int hclge_common_wl_config(struct hclge_dev *hdev,
2384 				  struct hclge_pkt_buf_alloc *buf_alloc)
2385 {
2386 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2387 	struct hclge_rx_com_wl *req;
2388 	struct hclge_desc desc;
2389 	int ret;
2390 
2391 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2392 
2393 	req = (struct hclge_rx_com_wl *)desc.data;
2394 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2395 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2396 
2397 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2398 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2399 
2400 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2401 	if (ret)
2402 		dev_err(&hdev->pdev->dev,
2403 			"common waterline config cmd failed %d\n", ret);
2404 
2405 	return ret;
2406 }
2407 
2408 int hclge_buffer_alloc(struct hclge_dev *hdev)
2409 {
2410 	struct hclge_pkt_buf_alloc *pkt_buf;
2411 	int ret;
2412 
2413 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2414 	if (!pkt_buf)
2415 		return -ENOMEM;
2416 
2417 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2418 	if (ret) {
2419 		dev_err(&hdev->pdev->dev,
2420 			"could not calc tx buffer size for all TCs %d\n", ret);
2421 		goto out;
2422 	}
2423 
2424 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2425 	if (ret) {
2426 		dev_err(&hdev->pdev->dev,
2427 			"could not alloc tx buffers %d\n", ret);
2428 		goto out;
2429 	}
2430 
2431 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2432 	if (ret) {
2433 		dev_err(&hdev->pdev->dev,
2434 			"could not calc rx priv buffer size for all TCs %d\n",
2435 			ret);
2436 		goto out;
2437 	}
2438 
2439 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2440 	if (ret) {
2441 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2442 			ret);
2443 		goto out;
2444 	}
2445 
2446 	if (hnae3_dev_dcb_supported(hdev)) {
2447 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2448 		if (ret) {
2449 			dev_err(&hdev->pdev->dev,
2450 				"could not configure rx private waterline %d\n",
2451 				ret);
2452 			goto out;
2453 		}
2454 
2455 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2456 		if (ret) {
2457 			dev_err(&hdev->pdev->dev,
2458 				"could not configure common threshold %d\n",
2459 				ret);
2460 			goto out;
2461 		}
2462 	}
2463 
2464 	ret = hclge_common_wl_config(hdev, pkt_buf);
2465 	if (ret)
2466 		dev_err(&hdev->pdev->dev,
2467 			"could not configure common waterline %d\n", ret);
2468 
2469 out:
2470 	kfree(pkt_buf);
2471 	return ret;
2472 }
2473 
2474 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2475 {
2476 	struct hnae3_handle *roce = &vport->roce;
2477 	struct hnae3_handle *nic = &vport->nic;
2478 	struct hclge_dev *hdev = vport->back;
2479 
2480 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2481 
2482 	if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2483 		return -EINVAL;
2484 
2485 	roce->rinfo.base_vector = hdev->roce_base_vector;
2486 
2487 	roce->rinfo.netdev = nic->kinfo.netdev;
2488 	roce->rinfo.roce_io_base = hdev->hw.io_base;
2489 	roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2490 
2491 	roce->pdev = nic->pdev;
2492 	roce->ae_algo = nic->ae_algo;
2493 	roce->numa_node_mask = nic->numa_node_mask;
2494 
2495 	return 0;
2496 }
2497 
2498 static int hclge_init_msi(struct hclge_dev *hdev)
2499 {
2500 	struct pci_dev *pdev = hdev->pdev;
2501 	int vectors;
2502 	int i;
2503 
2504 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2505 					hdev->num_msi,
2506 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2507 	if (vectors < 0) {
2508 		dev_err(&pdev->dev,
2509 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2510 			vectors);
2511 		return vectors;
2512 	}
2513 	if (vectors < hdev->num_msi)
2514 		dev_warn(&hdev->pdev->dev,
2515 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2516 			 hdev->num_msi, vectors);
2517 
2518 	hdev->num_msi = vectors;
2519 	hdev->num_msi_left = vectors;
2520 
2521 	hdev->base_msi_vector = pdev->irq;
2522 	hdev->roce_base_vector = hdev->base_msi_vector +
2523 				hdev->num_nic_msi;
2524 
2525 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2526 					   sizeof(u16), GFP_KERNEL);
2527 	if (!hdev->vector_status) {
2528 		pci_free_irq_vectors(pdev);
2529 		return -ENOMEM;
2530 	}
2531 
2532 	for (i = 0; i < hdev->num_msi; i++)
2533 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2534 
2535 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2536 					sizeof(int), GFP_KERNEL);
2537 	if (!hdev->vector_irq) {
2538 		pci_free_irq_vectors(pdev);
2539 		return -ENOMEM;
2540 	}
2541 
2542 	return 0;
2543 }
2544 
2545 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2546 {
2547 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2548 		duplex = HCLGE_MAC_FULL;
2549 
2550 	return duplex;
2551 }
2552 
2553 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2554 				      u8 duplex)
2555 {
2556 	struct hclge_config_mac_speed_dup_cmd *req;
2557 	struct hclge_desc desc;
2558 	int ret;
2559 
2560 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2561 
2562 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2563 
2564 	if (duplex)
2565 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2566 
2567 	switch (speed) {
2568 	case HCLGE_MAC_SPEED_10M:
2569 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2570 				HCLGE_CFG_SPEED_S, 6);
2571 		break;
2572 	case HCLGE_MAC_SPEED_100M:
2573 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2574 				HCLGE_CFG_SPEED_S, 7);
2575 		break;
2576 	case HCLGE_MAC_SPEED_1G:
2577 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2578 				HCLGE_CFG_SPEED_S, 0);
2579 		break;
2580 	case HCLGE_MAC_SPEED_10G:
2581 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2582 				HCLGE_CFG_SPEED_S, 1);
2583 		break;
2584 	case HCLGE_MAC_SPEED_25G:
2585 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2586 				HCLGE_CFG_SPEED_S, 2);
2587 		break;
2588 	case HCLGE_MAC_SPEED_40G:
2589 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2590 				HCLGE_CFG_SPEED_S, 3);
2591 		break;
2592 	case HCLGE_MAC_SPEED_50G:
2593 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2594 				HCLGE_CFG_SPEED_S, 4);
2595 		break;
2596 	case HCLGE_MAC_SPEED_100G:
2597 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2598 				HCLGE_CFG_SPEED_S, 5);
2599 		break;
2600 	case HCLGE_MAC_SPEED_200G:
2601 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2602 				HCLGE_CFG_SPEED_S, 8);
2603 		break;
2604 	default:
2605 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2606 		return -EINVAL;
2607 	}
2608 
2609 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2610 		      1);
2611 
2612 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2613 	if (ret) {
2614 		dev_err(&hdev->pdev->dev,
2615 			"mac speed/duplex config cmd failed %d.\n", ret);
2616 		return ret;
2617 	}
2618 
2619 	return 0;
2620 }
2621 
2622 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2623 {
2624 	struct hclge_mac *mac = &hdev->hw.mac;
2625 	int ret;
2626 
2627 	duplex = hclge_check_speed_dup(duplex, speed);
2628 	if (!mac->support_autoneg && mac->speed == speed &&
2629 	    mac->duplex == duplex)
2630 		return 0;
2631 
2632 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2633 	if (ret)
2634 		return ret;
2635 
2636 	hdev->hw.mac.speed = speed;
2637 	hdev->hw.mac.duplex = duplex;
2638 
2639 	return 0;
2640 }
2641 
2642 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2643 				     u8 duplex)
2644 {
2645 	struct hclge_vport *vport = hclge_get_vport(handle);
2646 	struct hclge_dev *hdev = vport->back;
2647 
2648 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2649 }
2650 
2651 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2652 {
2653 	struct hclge_config_auto_neg_cmd *req;
2654 	struct hclge_desc desc;
2655 	u32 flag = 0;
2656 	int ret;
2657 
2658 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2659 
2660 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2661 	if (enable)
2662 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2663 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2664 
2665 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2666 	if (ret)
2667 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2668 			ret);
2669 
2670 	return ret;
2671 }
2672 
2673 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2674 {
2675 	struct hclge_vport *vport = hclge_get_vport(handle);
2676 	struct hclge_dev *hdev = vport->back;
2677 
2678 	if (!hdev->hw.mac.support_autoneg) {
2679 		if (enable) {
2680 			dev_err(&hdev->pdev->dev,
2681 				"autoneg is not supported by current port\n");
2682 			return -EOPNOTSUPP;
2683 		} else {
2684 			return 0;
2685 		}
2686 	}
2687 
2688 	return hclge_set_autoneg_en(hdev, enable);
2689 }
2690 
2691 static int hclge_get_autoneg(struct hnae3_handle *handle)
2692 {
2693 	struct hclge_vport *vport = hclge_get_vport(handle);
2694 	struct hclge_dev *hdev = vport->back;
2695 	struct phy_device *phydev = hdev->hw.mac.phydev;
2696 
2697 	if (phydev)
2698 		return phydev->autoneg;
2699 
2700 	return hdev->hw.mac.autoneg;
2701 }
2702 
2703 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2704 {
2705 	struct hclge_vport *vport = hclge_get_vport(handle);
2706 	struct hclge_dev *hdev = vport->back;
2707 	int ret;
2708 
2709 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2710 
2711 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2712 	if (ret)
2713 		return ret;
2714 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2715 }
2716 
2717 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2718 {
2719 	struct hclge_vport *vport = hclge_get_vport(handle);
2720 	struct hclge_dev *hdev = vport->back;
2721 
2722 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2723 		return hclge_set_autoneg_en(hdev, !halt);
2724 
2725 	return 0;
2726 }
2727 
2728 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2729 {
2730 	struct hclge_config_fec_cmd *req;
2731 	struct hclge_desc desc;
2732 	int ret;
2733 
2734 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2735 
2736 	req = (struct hclge_config_fec_cmd *)desc.data;
2737 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2738 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2739 	if (fec_mode & BIT(HNAE3_FEC_RS))
2740 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2741 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2742 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2743 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2744 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2745 
2746 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2747 	if (ret)
2748 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2749 
2750 	return ret;
2751 }
2752 
2753 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2754 {
2755 	struct hclge_vport *vport = hclge_get_vport(handle);
2756 	struct hclge_dev *hdev = vport->back;
2757 	struct hclge_mac *mac = &hdev->hw.mac;
2758 	int ret;
2759 
2760 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2761 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2762 		return -EINVAL;
2763 	}
2764 
2765 	ret = hclge_set_fec_hw(hdev, fec_mode);
2766 	if (ret)
2767 		return ret;
2768 
2769 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2770 	return 0;
2771 }
2772 
2773 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2774 			  u8 *fec_mode)
2775 {
2776 	struct hclge_vport *vport = hclge_get_vport(handle);
2777 	struct hclge_dev *hdev = vport->back;
2778 	struct hclge_mac *mac = &hdev->hw.mac;
2779 
2780 	if (fec_ability)
2781 		*fec_ability = mac->fec_ability;
2782 	if (fec_mode)
2783 		*fec_mode = mac->fec_mode;
2784 }
2785 
2786 static int hclge_mac_init(struct hclge_dev *hdev)
2787 {
2788 	struct hclge_mac *mac = &hdev->hw.mac;
2789 	int ret;
2790 
2791 	hdev->support_sfp_query = true;
2792 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2793 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2794 					 hdev->hw.mac.duplex);
2795 	if (ret)
2796 		return ret;
2797 
2798 	if (hdev->hw.mac.support_autoneg) {
2799 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2800 		if (ret)
2801 			return ret;
2802 	}
2803 
2804 	mac->link = 0;
2805 
2806 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2807 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2808 		if (ret)
2809 			return ret;
2810 	}
2811 
2812 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2813 	if (ret) {
2814 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2815 		return ret;
2816 	}
2817 
2818 	ret = hclge_set_default_loopback(hdev);
2819 	if (ret)
2820 		return ret;
2821 
2822 	ret = hclge_buffer_alloc(hdev);
2823 	if (ret)
2824 		dev_err(&hdev->pdev->dev,
2825 			"allocate buffer fail, ret=%d\n", ret);
2826 
2827 	return ret;
2828 }
2829 
2830 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2831 {
2832 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2833 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2834 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2835 				    hclge_wq, &hdev->service_task, 0);
2836 }
2837 
2838 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2839 {
2840 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2841 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2842 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2843 				    hclge_wq, &hdev->service_task, 0);
2844 }
2845 
2846 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2847 {
2848 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2849 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2850 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2851 				    hclge_wq, &hdev->service_task,
2852 				    delay_time);
2853 }
2854 
2855 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2856 {
2857 	struct hclge_link_status_cmd *req;
2858 	struct hclge_desc desc;
2859 	int ret;
2860 
2861 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2862 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2863 	if (ret) {
2864 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2865 			ret);
2866 		return ret;
2867 	}
2868 
2869 	req = (struct hclge_link_status_cmd *)desc.data;
2870 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2871 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2872 
2873 	return 0;
2874 }
2875 
2876 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2877 {
2878 	struct phy_device *phydev = hdev->hw.mac.phydev;
2879 
2880 	*link_status = HCLGE_LINK_STATUS_DOWN;
2881 
2882 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2883 		return 0;
2884 
2885 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2886 		return 0;
2887 
2888 	return hclge_get_mac_link_status(hdev, link_status);
2889 }
2890 
2891 static void hclge_push_link_status(struct hclge_dev *hdev)
2892 {
2893 	struct hclge_vport *vport;
2894 	int ret;
2895 	u16 i;
2896 
2897 	for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2898 		vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2899 
2900 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2901 		    vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2902 			continue;
2903 
2904 		ret = hclge_push_vf_link_status(vport);
2905 		if (ret) {
2906 			dev_err(&hdev->pdev->dev,
2907 				"failed to push link status to vf%u, ret = %d\n",
2908 				i, ret);
2909 		}
2910 	}
2911 }
2912 
2913 static void hclge_update_link_status(struct hclge_dev *hdev)
2914 {
2915 	struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2916 	struct hnae3_handle *handle = &hdev->vport[0].nic;
2917 	struct hnae3_client *rclient = hdev->roce_client;
2918 	struct hnae3_client *client = hdev->nic_client;
2919 	int state;
2920 	int ret;
2921 
2922 	if (!client)
2923 		return;
2924 
2925 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2926 		return;
2927 
2928 	ret = hclge_get_mac_phy_link(hdev, &state);
2929 	if (ret) {
2930 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2931 		return;
2932 	}
2933 
2934 	if (state != hdev->hw.mac.link) {
2935 		client->ops->link_status_change(handle, state);
2936 		hclge_config_mac_tnl_int(hdev, state);
2937 		if (rclient && rclient->ops->link_status_change)
2938 			rclient->ops->link_status_change(rhandle, state);
2939 
2940 		hdev->hw.mac.link = state;
2941 		hclge_push_link_status(hdev);
2942 	}
2943 
2944 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2945 }
2946 
2947 static void hclge_update_port_capability(struct hclge_dev *hdev,
2948 					 struct hclge_mac *mac)
2949 {
2950 	if (hnae3_dev_fec_supported(hdev))
2951 		/* update fec ability by speed */
2952 		hclge_convert_setting_fec(mac);
2953 
2954 	/* firmware can not identify back plane type, the media type
2955 	 * read from configuration can help deal it
2956 	 */
2957 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2958 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2959 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2960 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2961 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2962 
2963 	if (mac->support_autoneg) {
2964 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2965 		linkmode_copy(mac->advertising, mac->supported);
2966 	} else {
2967 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2968 				   mac->supported);
2969 		linkmode_zero(mac->advertising);
2970 	}
2971 }
2972 
2973 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2974 {
2975 	struct hclge_sfp_info_cmd *resp;
2976 	struct hclge_desc desc;
2977 	int ret;
2978 
2979 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2980 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2981 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2982 	if (ret == -EOPNOTSUPP) {
2983 		dev_warn(&hdev->pdev->dev,
2984 			 "IMP do not support get SFP speed %d\n", ret);
2985 		return ret;
2986 	} else if (ret) {
2987 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2988 		return ret;
2989 	}
2990 
2991 	*speed = le32_to_cpu(resp->speed);
2992 
2993 	return 0;
2994 }
2995 
2996 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2997 {
2998 	struct hclge_sfp_info_cmd *resp;
2999 	struct hclge_desc desc;
3000 	int ret;
3001 
3002 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3003 	resp = (struct hclge_sfp_info_cmd *)desc.data;
3004 
3005 	resp->query_type = QUERY_ACTIVE_SPEED;
3006 
3007 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3008 	if (ret == -EOPNOTSUPP) {
3009 		dev_warn(&hdev->pdev->dev,
3010 			 "IMP does not support get SFP info %d\n", ret);
3011 		return ret;
3012 	} else if (ret) {
3013 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3014 		return ret;
3015 	}
3016 
3017 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
3018 	 * set to mac->speed.
3019 	 */
3020 	if (!le32_to_cpu(resp->speed))
3021 		return 0;
3022 
3023 	mac->speed = le32_to_cpu(resp->speed);
3024 	/* if resp->speed_ability is 0, it means it's an old version
3025 	 * firmware, do not update these params
3026 	 */
3027 	if (resp->speed_ability) {
3028 		mac->module_type = le32_to_cpu(resp->module_type);
3029 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
3030 		mac->autoneg = resp->autoneg;
3031 		mac->support_autoneg = resp->autoneg_ability;
3032 		mac->speed_type = QUERY_ACTIVE_SPEED;
3033 		if (!resp->active_fec)
3034 			mac->fec_mode = 0;
3035 		else
3036 			mac->fec_mode = BIT(resp->active_fec);
3037 	} else {
3038 		mac->speed_type = QUERY_SFP_SPEED;
3039 	}
3040 
3041 	return 0;
3042 }
3043 
3044 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3045 					struct ethtool_link_ksettings *cmd)
3046 {
3047 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3048 	struct hclge_vport *vport = hclge_get_vport(handle);
3049 	struct hclge_phy_link_ksetting_0_cmd *req0;
3050 	struct hclge_phy_link_ksetting_1_cmd *req1;
3051 	u32 supported, advertising, lp_advertising;
3052 	struct hclge_dev *hdev = vport->back;
3053 	int ret;
3054 
3055 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3056 				   true);
3057 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3058 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3059 				   true);
3060 
3061 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3062 	if (ret) {
3063 		dev_err(&hdev->pdev->dev,
3064 			"failed to get phy link ksetting, ret = %d.\n", ret);
3065 		return ret;
3066 	}
3067 
3068 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3069 	cmd->base.autoneg = req0->autoneg;
3070 	cmd->base.speed = le32_to_cpu(req0->speed);
3071 	cmd->base.duplex = req0->duplex;
3072 	cmd->base.port = req0->port;
3073 	cmd->base.transceiver = req0->transceiver;
3074 	cmd->base.phy_address = req0->phy_address;
3075 	cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3076 	cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3077 	supported = le32_to_cpu(req0->supported);
3078 	advertising = le32_to_cpu(req0->advertising);
3079 	lp_advertising = le32_to_cpu(req0->lp_advertising);
3080 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3081 						supported);
3082 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3083 						advertising);
3084 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3085 						lp_advertising);
3086 
3087 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3088 	cmd->base.master_slave_cfg = req1->master_slave_cfg;
3089 	cmd->base.master_slave_state = req1->master_slave_state;
3090 
3091 	return 0;
3092 }
3093 
3094 static int
3095 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3096 			     const struct ethtool_link_ksettings *cmd)
3097 {
3098 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3099 	struct hclge_vport *vport = hclge_get_vport(handle);
3100 	struct hclge_phy_link_ksetting_0_cmd *req0;
3101 	struct hclge_phy_link_ksetting_1_cmd *req1;
3102 	struct hclge_dev *hdev = vport->back;
3103 	u32 advertising;
3104 	int ret;
3105 
3106 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
3107 	    ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3108 	     (cmd->base.duplex != DUPLEX_HALF &&
3109 	      cmd->base.duplex != DUPLEX_FULL)))
3110 		return -EINVAL;
3111 
3112 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3113 				   false);
3114 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3115 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3116 				   false);
3117 
3118 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3119 	req0->autoneg = cmd->base.autoneg;
3120 	req0->speed = cpu_to_le32(cmd->base.speed);
3121 	req0->duplex = cmd->base.duplex;
3122 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
3123 						cmd->link_modes.advertising);
3124 	req0->advertising = cpu_to_le32(advertising);
3125 	req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3126 
3127 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3128 	req1->master_slave_cfg = cmd->base.master_slave_cfg;
3129 
3130 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3131 	if (ret) {
3132 		dev_err(&hdev->pdev->dev,
3133 			"failed to set phy link ksettings, ret = %d.\n", ret);
3134 		return ret;
3135 	}
3136 
3137 	hdev->hw.mac.autoneg = cmd->base.autoneg;
3138 	hdev->hw.mac.speed = cmd->base.speed;
3139 	hdev->hw.mac.duplex = cmd->base.duplex;
3140 	linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3141 
3142 	return 0;
3143 }
3144 
3145 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3146 {
3147 	struct ethtool_link_ksettings cmd;
3148 	int ret;
3149 
3150 	if (!hnae3_dev_phy_imp_supported(hdev))
3151 		return 0;
3152 
3153 	ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3154 	if (ret)
3155 		return ret;
3156 
3157 	hdev->hw.mac.autoneg = cmd.base.autoneg;
3158 	hdev->hw.mac.speed = cmd.base.speed;
3159 	hdev->hw.mac.duplex = cmd.base.duplex;
3160 
3161 	return 0;
3162 }
3163 
3164 static int hclge_tp_port_init(struct hclge_dev *hdev)
3165 {
3166 	struct ethtool_link_ksettings cmd;
3167 
3168 	if (!hnae3_dev_phy_imp_supported(hdev))
3169 		return 0;
3170 
3171 	cmd.base.autoneg = hdev->hw.mac.autoneg;
3172 	cmd.base.speed = hdev->hw.mac.speed;
3173 	cmd.base.duplex = hdev->hw.mac.duplex;
3174 	linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3175 
3176 	return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3177 }
3178 
3179 static int hclge_update_port_info(struct hclge_dev *hdev)
3180 {
3181 	struct hclge_mac *mac = &hdev->hw.mac;
3182 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
3183 	int ret;
3184 
3185 	/* get the port info from SFP cmd if not copper port */
3186 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3187 		return hclge_update_tp_port_info(hdev);
3188 
3189 	/* if IMP does not support get SFP/qSFP info, return directly */
3190 	if (!hdev->support_sfp_query)
3191 		return 0;
3192 
3193 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3194 		ret = hclge_get_sfp_info(hdev, mac);
3195 	else
3196 		ret = hclge_get_sfp_speed(hdev, &speed);
3197 
3198 	if (ret == -EOPNOTSUPP) {
3199 		hdev->support_sfp_query = false;
3200 		return ret;
3201 	} else if (ret) {
3202 		return ret;
3203 	}
3204 
3205 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3206 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3207 			hclge_update_port_capability(hdev, mac);
3208 			return 0;
3209 		}
3210 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3211 					       HCLGE_MAC_FULL);
3212 	} else {
3213 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3214 			return 0; /* do nothing if no SFP */
3215 
3216 		/* must config full duplex for SFP */
3217 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3218 	}
3219 }
3220 
3221 static int hclge_get_status(struct hnae3_handle *handle)
3222 {
3223 	struct hclge_vport *vport = hclge_get_vport(handle);
3224 	struct hclge_dev *hdev = vport->back;
3225 
3226 	hclge_update_link_status(hdev);
3227 
3228 	return hdev->hw.mac.link;
3229 }
3230 
3231 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3232 {
3233 	if (!pci_num_vf(hdev->pdev)) {
3234 		dev_err(&hdev->pdev->dev,
3235 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
3236 		return NULL;
3237 	}
3238 
3239 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3240 		dev_err(&hdev->pdev->dev,
3241 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
3242 			vf, pci_num_vf(hdev->pdev));
3243 		return NULL;
3244 	}
3245 
3246 	/* VF start from 1 in vport */
3247 	vf += HCLGE_VF_VPORT_START_NUM;
3248 	return &hdev->vport[vf];
3249 }
3250 
3251 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3252 			       struct ifla_vf_info *ivf)
3253 {
3254 	struct hclge_vport *vport = hclge_get_vport(handle);
3255 	struct hclge_dev *hdev = vport->back;
3256 
3257 	vport = hclge_get_vf_vport(hdev, vf);
3258 	if (!vport)
3259 		return -EINVAL;
3260 
3261 	ivf->vf = vf;
3262 	ivf->linkstate = vport->vf_info.link_state;
3263 	ivf->spoofchk = vport->vf_info.spoofchk;
3264 	ivf->trusted = vport->vf_info.trusted;
3265 	ivf->min_tx_rate = 0;
3266 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3267 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3268 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3269 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3270 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
3271 
3272 	return 0;
3273 }
3274 
3275 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3276 				   int link_state)
3277 {
3278 	struct hclge_vport *vport = hclge_get_vport(handle);
3279 	struct hclge_dev *hdev = vport->back;
3280 	int link_state_old;
3281 	int ret;
3282 
3283 	vport = hclge_get_vf_vport(hdev, vf);
3284 	if (!vport)
3285 		return -EINVAL;
3286 
3287 	link_state_old = vport->vf_info.link_state;
3288 	vport->vf_info.link_state = link_state;
3289 
3290 	ret = hclge_push_vf_link_status(vport);
3291 	if (ret) {
3292 		vport->vf_info.link_state = link_state_old;
3293 		dev_err(&hdev->pdev->dev,
3294 			"failed to push vf%d link status, ret = %d\n", vf, ret);
3295 	}
3296 
3297 	return ret;
3298 }
3299 
3300 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3301 {
3302 	u32 cmdq_src_reg, msix_src_reg;
3303 
3304 	/* fetch the events from their corresponding regs */
3305 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3306 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3307 
3308 	/* Assumption: If by any chance reset and mailbox events are reported
3309 	 * together then we will only process reset event in this go and will
3310 	 * defer the processing of the mailbox events. Since, we would have not
3311 	 * cleared RX CMDQ event this time we would receive again another
3312 	 * interrupt from H/W just for the mailbox.
3313 	 *
3314 	 * check for vector0 reset event sources
3315 	 */
3316 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3317 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3318 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3319 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3320 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3321 		hdev->rst_stats.imp_rst_cnt++;
3322 		return HCLGE_VECTOR0_EVENT_RST;
3323 	}
3324 
3325 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3326 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3327 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3328 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3329 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3330 		hdev->rst_stats.global_rst_cnt++;
3331 		return HCLGE_VECTOR0_EVENT_RST;
3332 	}
3333 
3334 	/* check for vector0 msix event source */
3335 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3336 		*clearval = msix_src_reg;
3337 		return HCLGE_VECTOR0_EVENT_ERR;
3338 	}
3339 
3340 	/* check for vector0 mailbox(=CMDQ RX) event source */
3341 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3342 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3343 		*clearval = cmdq_src_reg;
3344 		return HCLGE_VECTOR0_EVENT_MBX;
3345 	}
3346 
3347 	/* print other vector0 event source */
3348 	dev_info(&hdev->pdev->dev,
3349 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3350 		 cmdq_src_reg, msix_src_reg);
3351 	*clearval = msix_src_reg;
3352 
3353 	return HCLGE_VECTOR0_EVENT_OTHER;
3354 }
3355 
3356 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3357 				    u32 regclr)
3358 {
3359 	switch (event_type) {
3360 	case HCLGE_VECTOR0_EVENT_RST:
3361 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3362 		break;
3363 	case HCLGE_VECTOR0_EVENT_MBX:
3364 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3365 		break;
3366 	default:
3367 		break;
3368 	}
3369 }
3370 
3371 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3372 {
3373 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3374 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3375 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3376 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3377 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3378 }
3379 
3380 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3381 {
3382 	writel(enable ? 1 : 0, vector->addr);
3383 }
3384 
3385 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3386 {
3387 	struct hclge_dev *hdev = data;
3388 	u32 clearval = 0;
3389 	u32 event_cause;
3390 
3391 	hclge_enable_vector(&hdev->misc_vector, false);
3392 	event_cause = hclge_check_event_cause(hdev, &clearval);
3393 
3394 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
3395 	switch (event_cause) {
3396 	case HCLGE_VECTOR0_EVENT_ERR:
3397 		/* we do not know what type of reset is required now. This could
3398 		 * only be decided after we fetch the type of errors which
3399 		 * caused this event. Therefore, we will do below for now:
3400 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3401 		 *    have defered type of reset to be used.
3402 		 * 2. Schedule the reset service task.
3403 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3404 		 *    will fetch the correct type of reset.  This would be done
3405 		 *    by first decoding the types of errors.
3406 		 */
3407 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3408 		fallthrough;
3409 	case HCLGE_VECTOR0_EVENT_RST:
3410 		hclge_reset_task_schedule(hdev);
3411 		break;
3412 	case HCLGE_VECTOR0_EVENT_MBX:
3413 		/* If we are here then,
3414 		 * 1. Either we are not handling any mbx task and we are not
3415 		 *    scheduled as well
3416 		 *                        OR
3417 		 * 2. We could be handling a mbx task but nothing more is
3418 		 *    scheduled.
3419 		 * In both cases, we should schedule mbx task as there are more
3420 		 * mbx messages reported by this interrupt.
3421 		 */
3422 		hclge_mbx_task_schedule(hdev);
3423 		break;
3424 	default:
3425 		dev_warn(&hdev->pdev->dev,
3426 			 "received unknown or unhandled event of vector0\n");
3427 		break;
3428 	}
3429 
3430 	hclge_clear_event_cause(hdev, event_cause, clearval);
3431 
3432 	/* Enable interrupt if it is not cause by reset. And when
3433 	 * clearval equal to 0, it means interrupt status may be
3434 	 * cleared by hardware before driver reads status register.
3435 	 * For this case, vector0 interrupt also should be enabled.
3436 	 */
3437 	if (!clearval ||
3438 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3439 		hclge_enable_vector(&hdev->misc_vector, true);
3440 	}
3441 
3442 	return IRQ_HANDLED;
3443 }
3444 
3445 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3446 {
3447 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3448 		dev_warn(&hdev->pdev->dev,
3449 			 "vector(vector_id %d) has been freed.\n", vector_id);
3450 		return;
3451 	}
3452 
3453 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3454 	hdev->num_msi_left += 1;
3455 	hdev->num_msi_used -= 1;
3456 }
3457 
3458 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3459 {
3460 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3461 
3462 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3463 
3464 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3465 	hdev->vector_status[0] = 0;
3466 
3467 	hdev->num_msi_left -= 1;
3468 	hdev->num_msi_used += 1;
3469 }
3470 
3471 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3472 				      const cpumask_t *mask)
3473 {
3474 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3475 					      affinity_notify);
3476 
3477 	cpumask_copy(&hdev->affinity_mask, mask);
3478 }
3479 
3480 static void hclge_irq_affinity_release(struct kref *ref)
3481 {
3482 }
3483 
3484 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3485 {
3486 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3487 			      &hdev->affinity_mask);
3488 
3489 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3490 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3491 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3492 				  &hdev->affinity_notify);
3493 }
3494 
3495 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3496 {
3497 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3498 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3499 }
3500 
3501 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3502 {
3503 	int ret;
3504 
3505 	hclge_get_misc_vector(hdev);
3506 
3507 	/* this would be explicitly freed in the end */
3508 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3509 		 HCLGE_NAME, pci_name(hdev->pdev));
3510 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3511 			  0, hdev->misc_vector.name, hdev);
3512 	if (ret) {
3513 		hclge_free_vector(hdev, 0);
3514 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3515 			hdev->misc_vector.vector_irq);
3516 	}
3517 
3518 	return ret;
3519 }
3520 
3521 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3522 {
3523 	free_irq(hdev->misc_vector.vector_irq, hdev);
3524 	hclge_free_vector(hdev, 0);
3525 }
3526 
3527 int hclge_notify_client(struct hclge_dev *hdev,
3528 			enum hnae3_reset_notify_type type)
3529 {
3530 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3531 	struct hnae3_client *client = hdev->nic_client;
3532 	int ret;
3533 
3534 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3535 		return 0;
3536 
3537 	if (!client->ops->reset_notify)
3538 		return -EOPNOTSUPP;
3539 
3540 	ret = client->ops->reset_notify(handle, type);
3541 	if (ret)
3542 		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3543 			type, ret);
3544 
3545 	return ret;
3546 }
3547 
3548 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3549 				    enum hnae3_reset_notify_type type)
3550 {
3551 	struct hnae3_handle *handle = &hdev->vport[0].roce;
3552 	struct hnae3_client *client = hdev->roce_client;
3553 	int ret;
3554 
3555 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3556 		return 0;
3557 
3558 	if (!client->ops->reset_notify)
3559 		return -EOPNOTSUPP;
3560 
3561 	ret = client->ops->reset_notify(handle, type);
3562 	if (ret)
3563 		dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3564 			type, ret);
3565 
3566 	return ret;
3567 }
3568 
3569 static int hclge_reset_wait(struct hclge_dev *hdev)
3570 {
3571 #define HCLGE_RESET_WATI_MS	100
3572 #define HCLGE_RESET_WAIT_CNT	350
3573 
3574 	u32 val, reg, reg_bit;
3575 	u32 cnt = 0;
3576 
3577 	switch (hdev->reset_type) {
3578 	case HNAE3_IMP_RESET:
3579 		reg = HCLGE_GLOBAL_RESET_REG;
3580 		reg_bit = HCLGE_IMP_RESET_BIT;
3581 		break;
3582 	case HNAE3_GLOBAL_RESET:
3583 		reg = HCLGE_GLOBAL_RESET_REG;
3584 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3585 		break;
3586 	case HNAE3_FUNC_RESET:
3587 		reg = HCLGE_FUN_RST_ING;
3588 		reg_bit = HCLGE_FUN_RST_ING_B;
3589 		break;
3590 	default:
3591 		dev_err(&hdev->pdev->dev,
3592 			"Wait for unsupported reset type: %d\n",
3593 			hdev->reset_type);
3594 		return -EINVAL;
3595 	}
3596 
3597 	val = hclge_read_dev(&hdev->hw, reg);
3598 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3599 		msleep(HCLGE_RESET_WATI_MS);
3600 		val = hclge_read_dev(&hdev->hw, reg);
3601 		cnt++;
3602 	}
3603 
3604 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3605 		dev_warn(&hdev->pdev->dev,
3606 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3607 		return -EBUSY;
3608 	}
3609 
3610 	return 0;
3611 }
3612 
3613 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3614 {
3615 	struct hclge_vf_rst_cmd *req;
3616 	struct hclge_desc desc;
3617 
3618 	req = (struct hclge_vf_rst_cmd *)desc.data;
3619 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3620 	req->dest_vfid = func_id;
3621 
3622 	if (reset)
3623 		req->vf_rst = 0x1;
3624 
3625 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3626 }
3627 
3628 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3629 {
3630 	int i;
3631 
3632 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3633 		struct hclge_vport *vport = &hdev->vport[i];
3634 		int ret;
3635 
3636 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3637 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3638 		if (ret) {
3639 			dev_err(&hdev->pdev->dev,
3640 				"set vf(%u) rst failed %d!\n",
3641 				vport->vport_id, ret);
3642 			return ret;
3643 		}
3644 
3645 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3646 			continue;
3647 
3648 		/* Inform VF to process the reset.
3649 		 * hclge_inform_reset_assert_to_vf may fail if VF
3650 		 * driver is not loaded.
3651 		 */
3652 		ret = hclge_inform_reset_assert_to_vf(vport);
3653 		if (ret)
3654 			dev_warn(&hdev->pdev->dev,
3655 				 "inform reset to vf(%u) failed %d!\n",
3656 				 vport->vport_id, ret);
3657 	}
3658 
3659 	return 0;
3660 }
3661 
3662 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3663 {
3664 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3665 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3666 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3667 		return;
3668 
3669 	hclge_mbx_handler(hdev);
3670 
3671 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3672 }
3673 
3674 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3675 {
3676 	struct hclge_pf_rst_sync_cmd *req;
3677 	struct hclge_desc desc;
3678 	int cnt = 0;
3679 	int ret;
3680 
3681 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3682 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3683 
3684 	do {
3685 		/* vf need to down netdev by mbx during PF or FLR reset */
3686 		hclge_mailbox_service_task(hdev);
3687 
3688 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3689 		/* for compatible with old firmware, wait
3690 		 * 100 ms for VF to stop IO
3691 		 */
3692 		if (ret == -EOPNOTSUPP) {
3693 			msleep(HCLGE_RESET_SYNC_TIME);
3694 			return;
3695 		} else if (ret) {
3696 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3697 				 ret);
3698 			return;
3699 		} else if (req->all_vf_ready) {
3700 			return;
3701 		}
3702 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3703 		hclge_cmd_reuse_desc(&desc, true);
3704 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3705 
3706 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3707 }
3708 
3709 void hclge_report_hw_error(struct hclge_dev *hdev,
3710 			   enum hnae3_hw_error_type type)
3711 {
3712 	struct hnae3_client *client = hdev->nic_client;
3713 
3714 	if (!client || !client->ops->process_hw_error ||
3715 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3716 		return;
3717 
3718 	client->ops->process_hw_error(&hdev->vport[0].nic, type);
3719 }
3720 
3721 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3722 {
3723 	u32 reg_val;
3724 
3725 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3726 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3727 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3728 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3729 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3730 	}
3731 
3732 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3733 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3734 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3735 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3736 	}
3737 }
3738 
3739 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3740 {
3741 	struct hclge_desc desc;
3742 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3743 	int ret;
3744 
3745 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3746 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3747 	req->fun_reset_vfid = func_id;
3748 
3749 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3750 	if (ret)
3751 		dev_err(&hdev->pdev->dev,
3752 			"send function reset cmd fail, status =%d\n", ret);
3753 
3754 	return ret;
3755 }
3756 
3757 static void hclge_do_reset(struct hclge_dev *hdev)
3758 {
3759 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3760 	struct pci_dev *pdev = hdev->pdev;
3761 	u32 val;
3762 
3763 	if (hclge_get_hw_reset_stat(handle)) {
3764 		dev_info(&pdev->dev, "hardware reset not finish\n");
3765 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3766 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3767 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3768 		return;
3769 	}
3770 
3771 	switch (hdev->reset_type) {
3772 	case HNAE3_GLOBAL_RESET:
3773 		dev_info(&pdev->dev, "global reset requested\n");
3774 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3775 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3776 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3777 		break;
3778 	case HNAE3_FUNC_RESET:
3779 		dev_info(&pdev->dev, "PF reset requested\n");
3780 		/* schedule again to check later */
3781 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3782 		hclge_reset_task_schedule(hdev);
3783 		break;
3784 	default:
3785 		dev_warn(&pdev->dev,
3786 			 "unsupported reset type: %d\n", hdev->reset_type);
3787 		break;
3788 	}
3789 }
3790 
3791 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3792 						   unsigned long *addr)
3793 {
3794 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3795 	struct hclge_dev *hdev = ae_dev->priv;
3796 
3797 	/* first, resolve any unknown reset type to the known type(s) */
3798 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3799 		u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3800 					HCLGE_MISC_VECTOR_INT_STS);
3801 		/* we will intentionally ignore any errors from this function
3802 		 *  as we will end up in *some* reset request in any case
3803 		 */
3804 		if (hclge_handle_hw_msix_error(hdev, addr))
3805 			dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3806 				 msix_sts_reg);
3807 
3808 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3809 		/* We defered the clearing of the error event which caused
3810 		 * interrupt since it was not posssible to do that in
3811 		 * interrupt context (and this is the reason we introduced
3812 		 * new UNKNOWN reset type). Now, the errors have been
3813 		 * handled and cleared in hardware we can safely enable
3814 		 * interrupts. This is an exception to the norm.
3815 		 */
3816 		hclge_enable_vector(&hdev->misc_vector, true);
3817 	}
3818 
3819 	/* return the highest priority reset level amongst all */
3820 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3821 		rst_level = HNAE3_IMP_RESET;
3822 		clear_bit(HNAE3_IMP_RESET, addr);
3823 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3824 		clear_bit(HNAE3_FUNC_RESET, addr);
3825 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3826 		rst_level = HNAE3_GLOBAL_RESET;
3827 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3828 		clear_bit(HNAE3_FUNC_RESET, addr);
3829 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3830 		rst_level = HNAE3_FUNC_RESET;
3831 		clear_bit(HNAE3_FUNC_RESET, addr);
3832 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3833 		rst_level = HNAE3_FLR_RESET;
3834 		clear_bit(HNAE3_FLR_RESET, addr);
3835 	}
3836 
3837 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3838 	    rst_level < hdev->reset_type)
3839 		return HNAE3_NONE_RESET;
3840 
3841 	return rst_level;
3842 }
3843 
3844 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3845 {
3846 	u32 clearval = 0;
3847 
3848 	switch (hdev->reset_type) {
3849 	case HNAE3_IMP_RESET:
3850 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3851 		break;
3852 	case HNAE3_GLOBAL_RESET:
3853 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3854 		break;
3855 	default:
3856 		break;
3857 	}
3858 
3859 	if (!clearval)
3860 		return;
3861 
3862 	/* For revision 0x20, the reset interrupt source
3863 	 * can only be cleared after hardware reset done
3864 	 */
3865 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3866 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3867 				clearval);
3868 
3869 	hclge_enable_vector(&hdev->misc_vector, true);
3870 }
3871 
3872 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3873 {
3874 	u32 reg_val;
3875 
3876 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3877 	if (enable)
3878 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3879 	else
3880 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3881 
3882 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3883 }
3884 
3885 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3886 {
3887 	int ret;
3888 
3889 	ret = hclge_set_all_vf_rst(hdev, true);
3890 	if (ret)
3891 		return ret;
3892 
3893 	hclge_func_reset_sync_vf(hdev);
3894 
3895 	return 0;
3896 }
3897 
3898 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3899 {
3900 	u32 reg_val;
3901 	int ret = 0;
3902 
3903 	switch (hdev->reset_type) {
3904 	case HNAE3_FUNC_RESET:
3905 		ret = hclge_func_reset_notify_vf(hdev);
3906 		if (ret)
3907 			return ret;
3908 
3909 		ret = hclge_func_reset_cmd(hdev, 0);
3910 		if (ret) {
3911 			dev_err(&hdev->pdev->dev,
3912 				"asserting function reset fail %d!\n", ret);
3913 			return ret;
3914 		}
3915 
3916 		/* After performaning pf reset, it is not necessary to do the
3917 		 * mailbox handling or send any command to firmware, because
3918 		 * any mailbox handling or command to firmware is only valid
3919 		 * after hclge_cmd_init is called.
3920 		 */
3921 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3922 		hdev->rst_stats.pf_rst_cnt++;
3923 		break;
3924 	case HNAE3_FLR_RESET:
3925 		ret = hclge_func_reset_notify_vf(hdev);
3926 		if (ret)
3927 			return ret;
3928 		break;
3929 	case HNAE3_IMP_RESET:
3930 		hclge_handle_imp_error(hdev);
3931 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3932 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3933 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3934 		break;
3935 	default:
3936 		break;
3937 	}
3938 
3939 	/* inform hardware that preparatory work is done */
3940 	msleep(HCLGE_RESET_SYNC_TIME);
3941 	hclge_reset_handshake(hdev, true);
3942 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3943 
3944 	return ret;
3945 }
3946 
3947 static void hclge_show_rst_info(struct hclge_dev *hdev)
3948 {
3949 	char *buf;
3950 
3951 	buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
3952 	if (!buf)
3953 		return;
3954 
3955 	hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
3956 
3957 	dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
3958 
3959 	kfree(buf);
3960 }
3961 
3962 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3963 {
3964 #define MAX_RESET_FAIL_CNT 5
3965 
3966 	if (hdev->reset_pending) {
3967 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3968 			 hdev->reset_pending);
3969 		return true;
3970 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3971 		   HCLGE_RESET_INT_M) {
3972 		dev_info(&hdev->pdev->dev,
3973 			 "reset failed because new reset interrupt\n");
3974 		hclge_clear_reset_cause(hdev);
3975 		return false;
3976 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3977 		hdev->rst_stats.reset_fail_cnt++;
3978 		set_bit(hdev->reset_type, &hdev->reset_pending);
3979 		dev_info(&hdev->pdev->dev,
3980 			 "re-schedule reset task(%u)\n",
3981 			 hdev->rst_stats.reset_fail_cnt);
3982 		return true;
3983 	}
3984 
3985 	hclge_clear_reset_cause(hdev);
3986 
3987 	/* recover the handshake status when reset fail */
3988 	hclge_reset_handshake(hdev, true);
3989 
3990 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3991 
3992 	hclge_show_rst_info(hdev);
3993 
3994 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3995 
3996 	return false;
3997 }
3998 
3999 static void hclge_update_reset_level(struct hclge_dev *hdev)
4000 {
4001 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4002 	enum hnae3_reset_type reset_level;
4003 
4004 	/* reset request will not be set during reset, so clear
4005 	 * pending reset request to avoid unnecessary reset
4006 	 * caused by the same reason.
4007 	 */
4008 	hclge_get_reset_level(ae_dev, &hdev->reset_request);
4009 
4010 	/* if default_reset_request has a higher level reset request,
4011 	 * it should be handled as soon as possible. since some errors
4012 	 * need this kind of reset to fix.
4013 	 */
4014 	reset_level = hclge_get_reset_level(ae_dev,
4015 					    &hdev->default_reset_request);
4016 	if (reset_level != HNAE3_NONE_RESET)
4017 		set_bit(reset_level, &hdev->reset_request);
4018 }
4019 
4020 static int hclge_set_rst_done(struct hclge_dev *hdev)
4021 {
4022 	struct hclge_pf_rst_done_cmd *req;
4023 	struct hclge_desc desc;
4024 	int ret;
4025 
4026 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
4027 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4028 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4029 
4030 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4031 	/* To be compatible with the old firmware, which does not support
4032 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4033 	 * return success
4034 	 */
4035 	if (ret == -EOPNOTSUPP) {
4036 		dev_warn(&hdev->pdev->dev,
4037 			 "current firmware does not support command(0x%x)!\n",
4038 			 HCLGE_OPC_PF_RST_DONE);
4039 		return 0;
4040 	} else if (ret) {
4041 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4042 			ret);
4043 	}
4044 
4045 	return ret;
4046 }
4047 
4048 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4049 {
4050 	int ret = 0;
4051 
4052 	switch (hdev->reset_type) {
4053 	case HNAE3_FUNC_RESET:
4054 	case HNAE3_FLR_RESET:
4055 		ret = hclge_set_all_vf_rst(hdev, false);
4056 		break;
4057 	case HNAE3_GLOBAL_RESET:
4058 	case HNAE3_IMP_RESET:
4059 		ret = hclge_set_rst_done(hdev);
4060 		break;
4061 	default:
4062 		break;
4063 	}
4064 
4065 	/* clear up the handshake status after re-initialize done */
4066 	hclge_reset_handshake(hdev, false);
4067 
4068 	return ret;
4069 }
4070 
4071 static int hclge_reset_stack(struct hclge_dev *hdev)
4072 {
4073 	int ret;
4074 
4075 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4076 	if (ret)
4077 		return ret;
4078 
4079 	ret = hclge_reset_ae_dev(hdev->ae_dev);
4080 	if (ret)
4081 		return ret;
4082 
4083 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4084 }
4085 
4086 static int hclge_reset_prepare(struct hclge_dev *hdev)
4087 {
4088 	int ret;
4089 
4090 	hdev->rst_stats.reset_cnt++;
4091 	/* perform reset of the stack & ae device for a client */
4092 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4093 	if (ret)
4094 		return ret;
4095 
4096 	rtnl_lock();
4097 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4098 	rtnl_unlock();
4099 	if (ret)
4100 		return ret;
4101 
4102 	return hclge_reset_prepare_wait(hdev);
4103 }
4104 
4105 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4106 {
4107 	int ret;
4108 
4109 	hdev->rst_stats.hw_reset_done_cnt++;
4110 
4111 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4112 	if (ret)
4113 		return ret;
4114 
4115 	rtnl_lock();
4116 	ret = hclge_reset_stack(hdev);
4117 	rtnl_unlock();
4118 	if (ret)
4119 		return ret;
4120 
4121 	hclge_clear_reset_cause(hdev);
4122 
4123 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4124 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4125 	 * times
4126 	 */
4127 	if (ret &&
4128 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4129 		return ret;
4130 
4131 	ret = hclge_reset_prepare_up(hdev);
4132 	if (ret)
4133 		return ret;
4134 
4135 	rtnl_lock();
4136 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4137 	rtnl_unlock();
4138 	if (ret)
4139 		return ret;
4140 
4141 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4142 	if (ret)
4143 		return ret;
4144 
4145 	hdev->last_reset_time = jiffies;
4146 	hdev->rst_stats.reset_fail_cnt = 0;
4147 	hdev->rst_stats.reset_done_cnt++;
4148 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4149 
4150 	hclge_update_reset_level(hdev);
4151 
4152 	return 0;
4153 }
4154 
4155 static void hclge_reset(struct hclge_dev *hdev)
4156 {
4157 	if (hclge_reset_prepare(hdev))
4158 		goto err_reset;
4159 
4160 	if (hclge_reset_wait(hdev))
4161 		goto err_reset;
4162 
4163 	if (hclge_reset_rebuild(hdev))
4164 		goto err_reset;
4165 
4166 	return;
4167 
4168 err_reset:
4169 	if (hclge_reset_err_handle(hdev))
4170 		hclge_reset_task_schedule(hdev);
4171 }
4172 
4173 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4174 {
4175 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4176 	struct hclge_dev *hdev = ae_dev->priv;
4177 
4178 	/* We might end up getting called broadly because of 2 below cases:
4179 	 * 1. Recoverable error was conveyed through APEI and only way to bring
4180 	 *    normalcy is to reset.
4181 	 * 2. A new reset request from the stack due to timeout
4182 	 *
4183 	 * check if this is a new reset request and we are not here just because
4184 	 * last reset attempt did not succeed and watchdog hit us again. We will
4185 	 * know this if last reset request did not occur very recently (watchdog
4186 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4187 	 * In case of new request we reset the "reset level" to PF reset.
4188 	 * And if it is a repeat reset request of the most recent one then we
4189 	 * want to make sure we throttle the reset request. Therefore, we will
4190 	 * not allow it again before 3*HZ times.
4191 	 */
4192 
4193 	if (time_before(jiffies, (hdev->last_reset_time +
4194 				  HCLGE_RESET_INTERVAL))) {
4195 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4196 		return;
4197 	}
4198 
4199 	if (hdev->default_reset_request) {
4200 		hdev->reset_level =
4201 			hclge_get_reset_level(ae_dev,
4202 					      &hdev->default_reset_request);
4203 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4204 		hdev->reset_level = HNAE3_FUNC_RESET;
4205 	}
4206 
4207 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4208 		 hdev->reset_level);
4209 
4210 	/* request reset & schedule reset task */
4211 	set_bit(hdev->reset_level, &hdev->reset_request);
4212 	hclge_reset_task_schedule(hdev);
4213 
4214 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4215 		hdev->reset_level++;
4216 }
4217 
4218 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4219 					enum hnae3_reset_type rst_type)
4220 {
4221 	struct hclge_dev *hdev = ae_dev->priv;
4222 
4223 	set_bit(rst_type, &hdev->default_reset_request);
4224 }
4225 
4226 static void hclge_reset_timer(struct timer_list *t)
4227 {
4228 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4229 
4230 	/* if default_reset_request has no value, it means that this reset
4231 	 * request has already be handled, so just return here
4232 	 */
4233 	if (!hdev->default_reset_request)
4234 		return;
4235 
4236 	dev_info(&hdev->pdev->dev,
4237 		 "triggering reset in reset timer\n");
4238 	hclge_reset_event(hdev->pdev, NULL);
4239 }
4240 
4241 static void hclge_reset_subtask(struct hclge_dev *hdev)
4242 {
4243 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4244 
4245 	/* check if there is any ongoing reset in the hardware. This status can
4246 	 * be checked from reset_pending. If there is then, we need to wait for
4247 	 * hardware to complete reset.
4248 	 *    a. If we are able to figure out in reasonable time that hardware
4249 	 *       has fully resetted then, we can proceed with driver, client
4250 	 *       reset.
4251 	 *    b. else, we can come back later to check this status so re-sched
4252 	 *       now.
4253 	 */
4254 	hdev->last_reset_time = jiffies;
4255 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4256 	if (hdev->reset_type != HNAE3_NONE_RESET)
4257 		hclge_reset(hdev);
4258 
4259 	/* check if we got any *new* reset requests to be honored */
4260 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4261 	if (hdev->reset_type != HNAE3_NONE_RESET)
4262 		hclge_do_reset(hdev);
4263 
4264 	hdev->reset_type = HNAE3_NONE_RESET;
4265 }
4266 
4267 static void hclge_reset_service_task(struct hclge_dev *hdev)
4268 {
4269 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4270 		return;
4271 
4272 	down(&hdev->reset_sem);
4273 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4274 
4275 	hclge_reset_subtask(hdev);
4276 
4277 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4278 	up(&hdev->reset_sem);
4279 }
4280 
4281 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4282 {
4283 	int i;
4284 
4285 	/* start from vport 1 for PF is always alive */
4286 	for (i = 1; i < hdev->num_alloc_vport; i++) {
4287 		struct hclge_vport *vport = &hdev->vport[i];
4288 
4289 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4290 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4291 
4292 		/* If vf is not alive, set to default value */
4293 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4294 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4295 	}
4296 }
4297 
4298 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4299 {
4300 	unsigned long delta = round_jiffies_relative(HZ);
4301 
4302 	if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4303 		return;
4304 
4305 	/* Always handle the link updating to make sure link state is
4306 	 * updated when it is triggered by mbx.
4307 	 */
4308 	hclge_update_link_status(hdev);
4309 	hclge_sync_mac_table(hdev);
4310 	hclge_sync_promisc_mode(hdev);
4311 	hclge_sync_fd_table(hdev);
4312 
4313 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4314 		delta = jiffies - hdev->last_serv_processed;
4315 
4316 		if (delta < round_jiffies_relative(HZ)) {
4317 			delta = round_jiffies_relative(HZ) - delta;
4318 			goto out;
4319 		}
4320 	}
4321 
4322 	hdev->serv_processed_cnt++;
4323 	hclge_update_vport_alive(hdev);
4324 
4325 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4326 		hdev->last_serv_processed = jiffies;
4327 		goto out;
4328 	}
4329 
4330 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4331 		hclge_update_stats_for_all(hdev);
4332 
4333 	hclge_update_port_info(hdev);
4334 	hclge_sync_vlan_filter(hdev);
4335 
4336 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4337 		hclge_rfs_filter_expire(hdev);
4338 
4339 	hdev->last_serv_processed = jiffies;
4340 
4341 out:
4342 	hclge_task_schedule(hdev, delta);
4343 }
4344 
4345 static void hclge_service_task(struct work_struct *work)
4346 {
4347 	struct hclge_dev *hdev =
4348 		container_of(work, struct hclge_dev, service_task.work);
4349 
4350 	hclge_reset_service_task(hdev);
4351 	hclge_mailbox_service_task(hdev);
4352 	hclge_periodic_service_task(hdev);
4353 
4354 	/* Handle reset and mbx again in case periodical task delays the
4355 	 * handling by calling hclge_task_schedule() in
4356 	 * hclge_periodic_service_task().
4357 	 */
4358 	hclge_reset_service_task(hdev);
4359 	hclge_mailbox_service_task(hdev);
4360 }
4361 
4362 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4363 {
4364 	/* VF handle has no client */
4365 	if (!handle->client)
4366 		return container_of(handle, struct hclge_vport, nic);
4367 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4368 		return container_of(handle, struct hclge_vport, roce);
4369 	else
4370 		return container_of(handle, struct hclge_vport, nic);
4371 }
4372 
4373 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4374 				  struct hnae3_vector_info *vector_info)
4375 {
4376 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2	64
4377 
4378 	vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4379 
4380 	/* need an extend offset to config vector >= 64 */
4381 	if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4382 		vector_info->io_addr = hdev->hw.io_base +
4383 				HCLGE_VECTOR_REG_BASE +
4384 				(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4385 	else
4386 		vector_info->io_addr = hdev->hw.io_base +
4387 				HCLGE_VECTOR_EXT_REG_BASE +
4388 				(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4389 				HCLGE_VECTOR_REG_OFFSET_H +
4390 				(idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4391 				HCLGE_VECTOR_REG_OFFSET;
4392 
4393 	hdev->vector_status[idx] = hdev->vport[0].vport_id;
4394 	hdev->vector_irq[idx] = vector_info->vector;
4395 }
4396 
4397 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4398 			    struct hnae3_vector_info *vector_info)
4399 {
4400 	struct hclge_vport *vport = hclge_get_vport(handle);
4401 	struct hnae3_vector_info *vector = vector_info;
4402 	struct hclge_dev *hdev = vport->back;
4403 	int alloc = 0;
4404 	u16 i = 0;
4405 	u16 j;
4406 
4407 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4408 	vector_num = min(hdev->num_msi_left, vector_num);
4409 
4410 	for (j = 0; j < vector_num; j++) {
4411 		while (++i < hdev->num_nic_msi) {
4412 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4413 				hclge_get_vector_info(hdev, i, vector);
4414 				vector++;
4415 				alloc++;
4416 
4417 				break;
4418 			}
4419 		}
4420 	}
4421 	hdev->num_msi_left -= alloc;
4422 	hdev->num_msi_used += alloc;
4423 
4424 	return alloc;
4425 }
4426 
4427 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4428 {
4429 	int i;
4430 
4431 	for (i = 0; i < hdev->num_msi; i++)
4432 		if (vector == hdev->vector_irq[i])
4433 			return i;
4434 
4435 	return -EINVAL;
4436 }
4437 
4438 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4439 {
4440 	struct hclge_vport *vport = hclge_get_vport(handle);
4441 	struct hclge_dev *hdev = vport->back;
4442 	int vector_id;
4443 
4444 	vector_id = hclge_get_vector_index(hdev, vector);
4445 	if (vector_id < 0) {
4446 		dev_err(&hdev->pdev->dev,
4447 			"Get vector index fail. vector = %d\n", vector);
4448 		return vector_id;
4449 	}
4450 
4451 	hclge_free_vector(hdev, vector_id);
4452 
4453 	return 0;
4454 }
4455 
4456 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4457 {
4458 	return HCLGE_RSS_KEY_SIZE;
4459 }
4460 
4461 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4462 				  const u8 hfunc, const u8 *key)
4463 {
4464 	struct hclge_rss_config_cmd *req;
4465 	unsigned int key_offset = 0;
4466 	struct hclge_desc desc;
4467 	int key_counts;
4468 	int key_size;
4469 	int ret;
4470 
4471 	key_counts = HCLGE_RSS_KEY_SIZE;
4472 	req = (struct hclge_rss_config_cmd *)desc.data;
4473 
4474 	while (key_counts) {
4475 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4476 					   false);
4477 
4478 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4479 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4480 
4481 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4482 		memcpy(req->hash_key,
4483 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4484 
4485 		key_counts -= key_size;
4486 		key_offset++;
4487 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4488 		if (ret) {
4489 			dev_err(&hdev->pdev->dev,
4490 				"Configure RSS config fail, status = %d\n",
4491 				ret);
4492 			return ret;
4493 		}
4494 	}
4495 	return 0;
4496 }
4497 
4498 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4499 {
4500 	struct hclge_rss_indirection_table_cmd *req;
4501 	struct hclge_desc desc;
4502 	int rss_cfg_tbl_num;
4503 	u8 rss_msb_oft;
4504 	u8 rss_msb_val;
4505 	int ret;
4506 	u16 qid;
4507 	int i;
4508 	u32 j;
4509 
4510 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4511 	rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4512 			  HCLGE_RSS_CFG_TBL_SIZE;
4513 
4514 	for (i = 0; i < rss_cfg_tbl_num; i++) {
4515 		hclge_cmd_setup_basic_desc
4516 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4517 
4518 		req->start_table_index =
4519 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4520 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4521 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4522 			qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4523 			req->rss_qid_l[j] = qid & 0xff;
4524 			rss_msb_oft =
4525 				j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4526 			rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4527 				(j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4528 			req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4529 		}
4530 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4531 		if (ret) {
4532 			dev_err(&hdev->pdev->dev,
4533 				"Configure rss indir table fail,status = %d\n",
4534 				ret);
4535 			return ret;
4536 		}
4537 	}
4538 	return 0;
4539 }
4540 
4541 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4542 				 u16 *tc_size, u16 *tc_offset)
4543 {
4544 	struct hclge_rss_tc_mode_cmd *req;
4545 	struct hclge_desc desc;
4546 	int ret;
4547 	int i;
4548 
4549 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4550 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4551 
4552 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4553 		u16 mode = 0;
4554 
4555 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4556 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4557 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4558 		hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4559 			      tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4560 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4561 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4562 
4563 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4564 	}
4565 
4566 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4567 	if (ret)
4568 		dev_err(&hdev->pdev->dev,
4569 			"Configure rss tc mode fail, status = %d\n", ret);
4570 
4571 	return ret;
4572 }
4573 
4574 static void hclge_get_rss_type(struct hclge_vport *vport)
4575 {
4576 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4577 	    vport->rss_tuple_sets.ipv4_udp_en ||
4578 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4579 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4580 	    vport->rss_tuple_sets.ipv6_udp_en ||
4581 	    vport->rss_tuple_sets.ipv6_sctp_en)
4582 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4583 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4584 		 vport->rss_tuple_sets.ipv6_fragment_en)
4585 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4586 	else
4587 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4588 }
4589 
4590 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4591 {
4592 	struct hclge_rss_input_tuple_cmd *req;
4593 	struct hclge_desc desc;
4594 	int ret;
4595 
4596 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4597 
4598 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4599 
4600 	/* Get the tuple cfg from pf */
4601 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4602 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4603 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4604 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4605 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4606 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4607 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4608 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4609 	hclge_get_rss_type(&hdev->vport[0]);
4610 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4611 	if (ret)
4612 		dev_err(&hdev->pdev->dev,
4613 			"Configure rss input fail, status = %d\n", ret);
4614 	return ret;
4615 }
4616 
4617 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4618 			 u8 *key, u8 *hfunc)
4619 {
4620 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4621 	struct hclge_vport *vport = hclge_get_vport(handle);
4622 	int i;
4623 
4624 	/* Get hash algorithm */
4625 	if (hfunc) {
4626 		switch (vport->rss_algo) {
4627 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4628 			*hfunc = ETH_RSS_HASH_TOP;
4629 			break;
4630 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4631 			*hfunc = ETH_RSS_HASH_XOR;
4632 			break;
4633 		default:
4634 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4635 			break;
4636 		}
4637 	}
4638 
4639 	/* Get the RSS Key required by the user */
4640 	if (key)
4641 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4642 
4643 	/* Get indirect table */
4644 	if (indir)
4645 		for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4646 			indir[i] =  vport->rss_indirection_tbl[i];
4647 
4648 	return 0;
4649 }
4650 
4651 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4652 			 const  u8 *key, const  u8 hfunc)
4653 {
4654 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4655 	struct hclge_vport *vport = hclge_get_vport(handle);
4656 	struct hclge_dev *hdev = vport->back;
4657 	u8 hash_algo;
4658 	int ret, i;
4659 
4660 	/* Set the RSS Hash Key if specififed by the user */
4661 	if (key) {
4662 		switch (hfunc) {
4663 		case ETH_RSS_HASH_TOP:
4664 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4665 			break;
4666 		case ETH_RSS_HASH_XOR:
4667 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4668 			break;
4669 		case ETH_RSS_HASH_NO_CHANGE:
4670 			hash_algo = vport->rss_algo;
4671 			break;
4672 		default:
4673 			return -EINVAL;
4674 		}
4675 
4676 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4677 		if (ret)
4678 			return ret;
4679 
4680 		/* Update the shadow RSS key with user specified qids */
4681 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4682 		vport->rss_algo = hash_algo;
4683 	}
4684 
4685 	/* Update the shadow RSS table with user specified qids */
4686 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4687 		vport->rss_indirection_tbl[i] = indir[i];
4688 
4689 	/* Update the hardware */
4690 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4691 }
4692 
4693 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4694 {
4695 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4696 
4697 	if (nfc->data & RXH_L4_B_2_3)
4698 		hash_sets |= HCLGE_D_PORT_BIT;
4699 	else
4700 		hash_sets &= ~HCLGE_D_PORT_BIT;
4701 
4702 	if (nfc->data & RXH_IP_SRC)
4703 		hash_sets |= HCLGE_S_IP_BIT;
4704 	else
4705 		hash_sets &= ~HCLGE_S_IP_BIT;
4706 
4707 	if (nfc->data & RXH_IP_DST)
4708 		hash_sets |= HCLGE_D_IP_BIT;
4709 	else
4710 		hash_sets &= ~HCLGE_D_IP_BIT;
4711 
4712 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4713 		hash_sets |= HCLGE_V_TAG_BIT;
4714 
4715 	return hash_sets;
4716 }
4717 
4718 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4719 				    struct ethtool_rxnfc *nfc,
4720 				    struct hclge_rss_input_tuple_cmd *req)
4721 {
4722 	struct hclge_dev *hdev = vport->back;
4723 	u8 tuple_sets;
4724 
4725 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4726 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4727 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4728 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4729 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4730 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4731 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4732 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4733 
4734 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4735 	switch (nfc->flow_type) {
4736 	case TCP_V4_FLOW:
4737 		req->ipv4_tcp_en = tuple_sets;
4738 		break;
4739 	case TCP_V6_FLOW:
4740 		req->ipv6_tcp_en = tuple_sets;
4741 		break;
4742 	case UDP_V4_FLOW:
4743 		req->ipv4_udp_en = tuple_sets;
4744 		break;
4745 	case UDP_V6_FLOW:
4746 		req->ipv6_udp_en = tuple_sets;
4747 		break;
4748 	case SCTP_V4_FLOW:
4749 		req->ipv4_sctp_en = tuple_sets;
4750 		break;
4751 	case SCTP_V6_FLOW:
4752 		if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4753 		    (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4754 			return -EINVAL;
4755 
4756 		req->ipv6_sctp_en = tuple_sets;
4757 		break;
4758 	case IPV4_FLOW:
4759 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4760 		break;
4761 	case IPV6_FLOW:
4762 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4763 		break;
4764 	default:
4765 		return -EINVAL;
4766 	}
4767 
4768 	return 0;
4769 }
4770 
4771 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4772 			       struct ethtool_rxnfc *nfc)
4773 {
4774 	struct hclge_vport *vport = hclge_get_vport(handle);
4775 	struct hclge_dev *hdev = vport->back;
4776 	struct hclge_rss_input_tuple_cmd *req;
4777 	struct hclge_desc desc;
4778 	int ret;
4779 
4780 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4781 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4782 		return -EINVAL;
4783 
4784 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4785 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4786 
4787 	ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4788 	if (ret) {
4789 		dev_err(&hdev->pdev->dev,
4790 			"failed to init rss tuple cmd, ret = %d\n", ret);
4791 		return ret;
4792 	}
4793 
4794 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4795 	if (ret) {
4796 		dev_err(&hdev->pdev->dev,
4797 			"Set rss tuple fail, status = %d\n", ret);
4798 		return ret;
4799 	}
4800 
4801 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4802 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4803 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4804 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4805 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4806 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4807 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4808 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4809 	hclge_get_rss_type(vport);
4810 	return 0;
4811 }
4812 
4813 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4814 				     u8 *tuple_sets)
4815 {
4816 	switch (flow_type) {
4817 	case TCP_V4_FLOW:
4818 		*tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4819 		break;
4820 	case UDP_V4_FLOW:
4821 		*tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4822 		break;
4823 	case TCP_V6_FLOW:
4824 		*tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4825 		break;
4826 	case UDP_V6_FLOW:
4827 		*tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4828 		break;
4829 	case SCTP_V4_FLOW:
4830 		*tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4831 		break;
4832 	case SCTP_V6_FLOW:
4833 		*tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4834 		break;
4835 	case IPV4_FLOW:
4836 	case IPV6_FLOW:
4837 		*tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4838 		break;
4839 	default:
4840 		return -EINVAL;
4841 	}
4842 
4843 	return 0;
4844 }
4845 
4846 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4847 {
4848 	u64 tuple_data = 0;
4849 
4850 	if (tuple_sets & HCLGE_D_PORT_BIT)
4851 		tuple_data |= RXH_L4_B_2_3;
4852 	if (tuple_sets & HCLGE_S_PORT_BIT)
4853 		tuple_data |= RXH_L4_B_0_1;
4854 	if (tuple_sets & HCLGE_D_IP_BIT)
4855 		tuple_data |= RXH_IP_DST;
4856 	if (tuple_sets & HCLGE_S_IP_BIT)
4857 		tuple_data |= RXH_IP_SRC;
4858 
4859 	return tuple_data;
4860 }
4861 
4862 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4863 			       struct ethtool_rxnfc *nfc)
4864 {
4865 	struct hclge_vport *vport = hclge_get_vport(handle);
4866 	u8 tuple_sets;
4867 	int ret;
4868 
4869 	nfc->data = 0;
4870 
4871 	ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4872 	if (ret || !tuple_sets)
4873 		return ret;
4874 
4875 	nfc->data = hclge_convert_rss_tuple(tuple_sets);
4876 
4877 	return 0;
4878 }
4879 
4880 static int hclge_get_tc_size(struct hnae3_handle *handle)
4881 {
4882 	struct hclge_vport *vport = hclge_get_vport(handle);
4883 	struct hclge_dev *hdev = vport->back;
4884 
4885 	return hdev->pf_rss_size_max;
4886 }
4887 
4888 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4889 {
4890 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4891 	struct hclge_vport *vport = hdev->vport;
4892 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4893 	u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4894 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4895 	struct hnae3_tc_info *tc_info;
4896 	u16 roundup_size;
4897 	u16 rss_size;
4898 	int i;
4899 
4900 	tc_info = &vport->nic.kinfo.tc_info;
4901 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4902 		rss_size = tc_info->tqp_count[i];
4903 		tc_valid[i] = 0;
4904 
4905 		if (!(hdev->hw_tc_map & BIT(i)))
4906 			continue;
4907 
4908 		/* tc_size set to hardware is the log2 of roundup power of two
4909 		 * of rss_size, the acutal queue size is limited by indirection
4910 		 * table.
4911 		 */
4912 		if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4913 		    rss_size == 0) {
4914 			dev_err(&hdev->pdev->dev,
4915 				"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4916 				rss_size);
4917 			return -EINVAL;
4918 		}
4919 
4920 		roundup_size = roundup_pow_of_two(rss_size);
4921 		roundup_size = ilog2(roundup_size);
4922 
4923 		tc_valid[i] = 1;
4924 		tc_size[i] = roundup_size;
4925 		tc_offset[i] = tc_info->tqp_offset[i];
4926 	}
4927 
4928 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4929 }
4930 
4931 int hclge_rss_init_hw(struct hclge_dev *hdev)
4932 {
4933 	struct hclge_vport *vport = hdev->vport;
4934 	u16 *rss_indir = vport[0].rss_indirection_tbl;
4935 	u8 *key = vport[0].rss_hash_key;
4936 	u8 hfunc = vport[0].rss_algo;
4937 	int ret;
4938 
4939 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4940 	if (ret)
4941 		return ret;
4942 
4943 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4944 	if (ret)
4945 		return ret;
4946 
4947 	ret = hclge_set_rss_input_tuple(hdev);
4948 	if (ret)
4949 		return ret;
4950 
4951 	return hclge_init_rss_tc_mode(hdev);
4952 }
4953 
4954 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4955 {
4956 	struct hclge_vport *vport = &hdev->vport[0];
4957 	int i;
4958 
4959 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4960 		vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
4961 }
4962 
4963 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4964 {
4965 	u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
4966 	int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4967 	struct hclge_vport *vport = &hdev->vport[0];
4968 	u16 *rss_ind_tbl;
4969 
4970 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4971 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4972 
4973 	vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4974 	vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4975 	vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
4976 	vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4977 	vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4978 	vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4979 	vport->rss_tuple_sets.ipv6_sctp_en =
4980 		hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4981 		HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4982 		HCLGE_RSS_INPUT_TUPLE_SCTP;
4983 	vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4984 
4985 	vport->rss_algo = rss_algo;
4986 
4987 	rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4988 				   sizeof(*rss_ind_tbl), GFP_KERNEL);
4989 	if (!rss_ind_tbl)
4990 		return -ENOMEM;
4991 
4992 	vport->rss_indirection_tbl = rss_ind_tbl;
4993 	memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
4994 
4995 	hclge_rss_indir_init_cfg(hdev);
4996 
4997 	return 0;
4998 }
4999 
5000 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5001 				int vector_id, bool en,
5002 				struct hnae3_ring_chain_node *ring_chain)
5003 {
5004 	struct hclge_dev *hdev = vport->back;
5005 	struct hnae3_ring_chain_node *node;
5006 	struct hclge_desc desc;
5007 	struct hclge_ctrl_vector_chain_cmd *req =
5008 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
5009 	enum hclge_cmd_status status;
5010 	enum hclge_opcode_type op;
5011 	u16 tqp_type_and_id;
5012 	int i;
5013 
5014 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5015 	hclge_cmd_setup_basic_desc(&desc, op, false);
5016 	req->int_vector_id_l = hnae3_get_field(vector_id,
5017 					       HCLGE_VECTOR_ID_L_M,
5018 					       HCLGE_VECTOR_ID_L_S);
5019 	req->int_vector_id_h = hnae3_get_field(vector_id,
5020 					       HCLGE_VECTOR_ID_H_M,
5021 					       HCLGE_VECTOR_ID_H_S);
5022 
5023 	i = 0;
5024 	for (node = ring_chain; node; node = node->next) {
5025 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5026 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
5027 				HCLGE_INT_TYPE_S,
5028 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5029 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5030 				HCLGE_TQP_ID_S, node->tqp_index);
5031 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5032 				HCLGE_INT_GL_IDX_S,
5033 				hnae3_get_field(node->int_gl_idx,
5034 						HNAE3_RING_GL_IDX_M,
5035 						HNAE3_RING_GL_IDX_S));
5036 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5037 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5038 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5039 			req->vfid = vport->vport_id;
5040 
5041 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
5042 			if (status) {
5043 				dev_err(&hdev->pdev->dev,
5044 					"Map TQP fail, status is %d.\n",
5045 					status);
5046 				return -EIO;
5047 			}
5048 			i = 0;
5049 
5050 			hclge_cmd_setup_basic_desc(&desc,
5051 						   op,
5052 						   false);
5053 			req->int_vector_id_l =
5054 				hnae3_get_field(vector_id,
5055 						HCLGE_VECTOR_ID_L_M,
5056 						HCLGE_VECTOR_ID_L_S);
5057 			req->int_vector_id_h =
5058 				hnae3_get_field(vector_id,
5059 						HCLGE_VECTOR_ID_H_M,
5060 						HCLGE_VECTOR_ID_H_S);
5061 		}
5062 	}
5063 
5064 	if (i > 0) {
5065 		req->int_cause_num = i;
5066 		req->vfid = vport->vport_id;
5067 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
5068 		if (status) {
5069 			dev_err(&hdev->pdev->dev,
5070 				"Map TQP fail, status is %d.\n", status);
5071 			return -EIO;
5072 		}
5073 	}
5074 
5075 	return 0;
5076 }
5077 
5078 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5079 				    struct hnae3_ring_chain_node *ring_chain)
5080 {
5081 	struct hclge_vport *vport = hclge_get_vport(handle);
5082 	struct hclge_dev *hdev = vport->back;
5083 	int vector_id;
5084 
5085 	vector_id = hclge_get_vector_index(hdev, vector);
5086 	if (vector_id < 0) {
5087 		dev_err(&hdev->pdev->dev,
5088 			"failed to get vector index. vector=%d\n", vector);
5089 		return vector_id;
5090 	}
5091 
5092 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5093 }
5094 
5095 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5096 				       struct hnae3_ring_chain_node *ring_chain)
5097 {
5098 	struct hclge_vport *vport = hclge_get_vport(handle);
5099 	struct hclge_dev *hdev = vport->back;
5100 	int vector_id, ret;
5101 
5102 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5103 		return 0;
5104 
5105 	vector_id = hclge_get_vector_index(hdev, vector);
5106 	if (vector_id < 0) {
5107 		dev_err(&handle->pdev->dev,
5108 			"Get vector index fail. ret =%d\n", vector_id);
5109 		return vector_id;
5110 	}
5111 
5112 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5113 	if (ret)
5114 		dev_err(&handle->pdev->dev,
5115 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5116 			vector_id, ret);
5117 
5118 	return ret;
5119 }
5120 
5121 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5122 				      bool en_uc, bool en_mc, bool en_bc)
5123 {
5124 	struct hclge_vport *vport = &hdev->vport[vf_id];
5125 	struct hnae3_handle *handle = &vport->nic;
5126 	struct hclge_promisc_cfg_cmd *req;
5127 	struct hclge_desc desc;
5128 	bool uc_tx_en = en_uc;
5129 	u8 promisc_cfg = 0;
5130 	int ret;
5131 
5132 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5133 
5134 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
5135 	req->vf_id = vf_id;
5136 
5137 	if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5138 		uc_tx_en = false;
5139 
5140 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5141 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5142 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5143 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5144 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5145 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5146 	req->extend_promisc = promisc_cfg;
5147 
5148 	/* to be compatible with DEVICE_VERSION_V1/2 */
5149 	promisc_cfg = 0;
5150 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5151 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5152 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5153 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5154 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5155 	req->promisc = promisc_cfg;
5156 
5157 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5158 	if (ret)
5159 		dev_err(&hdev->pdev->dev,
5160 			"failed to set vport %u promisc mode, ret = %d.\n",
5161 			vf_id, ret);
5162 
5163 	return ret;
5164 }
5165 
5166 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5167 				 bool en_mc_pmc, bool en_bc_pmc)
5168 {
5169 	return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5170 					  en_uc_pmc, en_mc_pmc, en_bc_pmc);
5171 }
5172 
5173 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5174 				  bool en_mc_pmc)
5175 {
5176 	struct hclge_vport *vport = hclge_get_vport(handle);
5177 	struct hclge_dev *hdev = vport->back;
5178 	bool en_bc_pmc = true;
5179 
5180 	/* For device whose version below V2, if broadcast promisc enabled,
5181 	 * vlan filter is always bypassed. So broadcast promisc should be
5182 	 * disabled until user enable promisc mode
5183 	 */
5184 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5185 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5186 
5187 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5188 					    en_bc_pmc);
5189 }
5190 
5191 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5192 {
5193 	struct hclge_vport *vport = hclge_get_vport(handle);
5194 
5195 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5196 }
5197 
5198 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5199 {
5200 	if (hlist_empty(&hdev->fd_rule_list))
5201 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5202 }
5203 
5204 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5205 {
5206 	if (!test_bit(location, hdev->fd_bmap)) {
5207 		set_bit(location, hdev->fd_bmap);
5208 		hdev->hclge_fd_rule_num++;
5209 	}
5210 }
5211 
5212 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5213 {
5214 	if (test_bit(location, hdev->fd_bmap)) {
5215 		clear_bit(location, hdev->fd_bmap);
5216 		hdev->hclge_fd_rule_num--;
5217 	}
5218 }
5219 
5220 static void hclge_fd_free_node(struct hclge_dev *hdev,
5221 			       struct hclge_fd_rule *rule)
5222 {
5223 	hlist_del(&rule->rule_node);
5224 	kfree(rule);
5225 	hclge_sync_fd_state(hdev);
5226 }
5227 
5228 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5229 				      struct hclge_fd_rule *old_rule,
5230 				      struct hclge_fd_rule *new_rule,
5231 				      enum HCLGE_FD_NODE_STATE state)
5232 {
5233 	switch (state) {
5234 	case HCLGE_FD_TO_ADD:
5235 	case HCLGE_FD_ACTIVE:
5236 		/* 1) if the new state is TO_ADD, just replace the old rule
5237 		 * with the same location, no matter its state, because the
5238 		 * new rule will be configured to the hardware.
5239 		 * 2) if the new state is ACTIVE, it means the new rule
5240 		 * has been configured to the hardware, so just replace
5241 		 * the old rule node with the same location.
5242 		 * 3) for it doesn't add a new node to the list, so it's
5243 		 * unnecessary to update the rule number and fd_bmap.
5244 		 */
5245 		new_rule->rule_node.next = old_rule->rule_node.next;
5246 		new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5247 		memcpy(old_rule, new_rule, sizeof(*old_rule));
5248 		kfree(new_rule);
5249 		break;
5250 	case HCLGE_FD_DELETED:
5251 		hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5252 		hclge_fd_free_node(hdev, old_rule);
5253 		break;
5254 	case HCLGE_FD_TO_DEL:
5255 		/* if new request is TO_DEL, and old rule is existent
5256 		 * 1) the state of old rule is TO_DEL, we need do nothing,
5257 		 * because we delete rule by location, other rule content
5258 		 * is unncessary.
5259 		 * 2) the state of old rule is ACTIVE, we need to change its
5260 		 * state to TO_DEL, so the rule will be deleted when periodic
5261 		 * task being scheduled.
5262 		 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5263 		 * been added to hardware, so we just delete the rule node from
5264 		 * fd_rule_list directly.
5265 		 */
5266 		if (old_rule->state == HCLGE_FD_TO_ADD) {
5267 			hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5268 			hclge_fd_free_node(hdev, old_rule);
5269 			return;
5270 		}
5271 		old_rule->state = HCLGE_FD_TO_DEL;
5272 		break;
5273 	}
5274 }
5275 
5276 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5277 						u16 location,
5278 						struct hclge_fd_rule **parent)
5279 {
5280 	struct hclge_fd_rule *rule;
5281 	struct hlist_node *node;
5282 
5283 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5284 		if (rule->location == location)
5285 			return rule;
5286 		else if (rule->location > location)
5287 			return NULL;
5288 		/* record the parent node, use to keep the nodes in fd_rule_list
5289 		 * in ascend order.
5290 		 */
5291 		*parent = rule;
5292 	}
5293 
5294 	return NULL;
5295 }
5296 
5297 /* insert fd rule node in ascend order according to rule->location */
5298 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5299 				      struct hclge_fd_rule *rule,
5300 				      struct hclge_fd_rule *parent)
5301 {
5302 	INIT_HLIST_NODE(&rule->rule_node);
5303 
5304 	if (parent)
5305 		hlist_add_behind(&rule->rule_node, &parent->rule_node);
5306 	else
5307 		hlist_add_head(&rule->rule_node, hlist);
5308 }
5309 
5310 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5311 				     struct hclge_fd_user_def_cfg *cfg)
5312 {
5313 	struct hclge_fd_user_def_cfg_cmd *req;
5314 	struct hclge_desc desc;
5315 	u16 data = 0;
5316 	int ret;
5317 
5318 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5319 
5320 	req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5321 
5322 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5323 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5324 			HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5325 	req->ol2_cfg = cpu_to_le16(data);
5326 
5327 	data = 0;
5328 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5329 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5330 			HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5331 	req->ol3_cfg = cpu_to_le16(data);
5332 
5333 	data = 0;
5334 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5335 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5336 			HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5337 	req->ol4_cfg = cpu_to_le16(data);
5338 
5339 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5340 	if (ret)
5341 		dev_err(&hdev->pdev->dev,
5342 			"failed to set fd user def data, ret= %d\n", ret);
5343 	return ret;
5344 }
5345 
5346 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5347 {
5348 	int ret;
5349 
5350 	if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5351 		return;
5352 
5353 	if (!locked)
5354 		spin_lock_bh(&hdev->fd_rule_lock);
5355 
5356 	ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5357 	if (ret)
5358 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5359 
5360 	if (!locked)
5361 		spin_unlock_bh(&hdev->fd_rule_lock);
5362 }
5363 
5364 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5365 					  struct hclge_fd_rule *rule)
5366 {
5367 	struct hlist_head *hlist = &hdev->fd_rule_list;
5368 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5369 	struct hclge_fd_user_def_info *info, *old_info;
5370 	struct hclge_fd_user_def_cfg *cfg;
5371 
5372 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5373 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5374 		return 0;
5375 
5376 	/* for valid layer is start from 1, so need minus 1 to get the cfg */
5377 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5378 	info = &rule->ep.user_def;
5379 
5380 	if (!cfg->ref_cnt || cfg->offset == info->offset)
5381 		return 0;
5382 
5383 	if (cfg->ref_cnt > 1)
5384 		goto error;
5385 
5386 	fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5387 	if (fd_rule) {
5388 		old_info = &fd_rule->ep.user_def;
5389 		if (info->layer == old_info->layer)
5390 			return 0;
5391 	}
5392 
5393 error:
5394 	dev_err(&hdev->pdev->dev,
5395 		"No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5396 		info->layer + 1);
5397 	return -ENOSPC;
5398 }
5399 
5400 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5401 					 struct hclge_fd_rule *rule)
5402 {
5403 	struct hclge_fd_user_def_cfg *cfg;
5404 
5405 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5406 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5407 		return;
5408 
5409 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5410 	if (!cfg->ref_cnt) {
5411 		cfg->offset = rule->ep.user_def.offset;
5412 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5413 	}
5414 	cfg->ref_cnt++;
5415 }
5416 
5417 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5418 					 struct hclge_fd_rule *rule)
5419 {
5420 	struct hclge_fd_user_def_cfg *cfg;
5421 
5422 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5423 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5424 		return;
5425 
5426 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5427 	if (!cfg->ref_cnt)
5428 		return;
5429 
5430 	cfg->ref_cnt--;
5431 	if (!cfg->ref_cnt) {
5432 		cfg->offset = 0;
5433 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5434 	}
5435 }
5436 
5437 static void hclge_update_fd_list(struct hclge_dev *hdev,
5438 				 enum HCLGE_FD_NODE_STATE state, u16 location,
5439 				 struct hclge_fd_rule *new_rule)
5440 {
5441 	struct hlist_head *hlist = &hdev->fd_rule_list;
5442 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5443 
5444 	fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5445 	if (fd_rule) {
5446 		hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5447 		if (state == HCLGE_FD_ACTIVE)
5448 			hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5449 		hclge_sync_fd_user_def_cfg(hdev, true);
5450 
5451 		hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5452 		return;
5453 	}
5454 
5455 	/* it's unlikely to fail here, because we have checked the rule
5456 	 * exist before.
5457 	 */
5458 	if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5459 		dev_warn(&hdev->pdev->dev,
5460 			 "failed to delete fd rule %u, it's inexistent\n",
5461 			 location);
5462 		return;
5463 	}
5464 
5465 	hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5466 	hclge_sync_fd_user_def_cfg(hdev, true);
5467 
5468 	hclge_fd_insert_rule_node(hlist, new_rule, parent);
5469 	hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5470 
5471 	if (state == HCLGE_FD_TO_ADD) {
5472 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5473 		hclge_task_schedule(hdev, 0);
5474 	}
5475 }
5476 
5477 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5478 {
5479 	struct hclge_get_fd_mode_cmd *req;
5480 	struct hclge_desc desc;
5481 	int ret;
5482 
5483 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5484 
5485 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
5486 
5487 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5488 	if (ret) {
5489 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5490 		return ret;
5491 	}
5492 
5493 	*fd_mode = req->mode;
5494 
5495 	return ret;
5496 }
5497 
5498 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5499 				   u32 *stage1_entry_num,
5500 				   u32 *stage2_entry_num,
5501 				   u16 *stage1_counter_num,
5502 				   u16 *stage2_counter_num)
5503 {
5504 	struct hclge_get_fd_allocation_cmd *req;
5505 	struct hclge_desc desc;
5506 	int ret;
5507 
5508 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5509 
5510 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5511 
5512 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5513 	if (ret) {
5514 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5515 			ret);
5516 		return ret;
5517 	}
5518 
5519 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5520 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5521 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5522 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5523 
5524 	return ret;
5525 }
5526 
5527 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5528 				   enum HCLGE_FD_STAGE stage_num)
5529 {
5530 	struct hclge_set_fd_key_config_cmd *req;
5531 	struct hclge_fd_key_cfg *stage;
5532 	struct hclge_desc desc;
5533 	int ret;
5534 
5535 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5536 
5537 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5538 	stage = &hdev->fd_cfg.key_cfg[stage_num];
5539 	req->stage = stage_num;
5540 	req->key_select = stage->key_sel;
5541 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5542 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5543 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5544 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5545 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5546 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5547 
5548 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5549 	if (ret)
5550 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5551 
5552 	return ret;
5553 }
5554 
5555 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5556 {
5557 	struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5558 
5559 	spin_lock_bh(&hdev->fd_rule_lock);
5560 	memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5561 	spin_unlock_bh(&hdev->fd_rule_lock);
5562 
5563 	hclge_fd_set_user_def_cmd(hdev, cfg);
5564 }
5565 
5566 static int hclge_init_fd_config(struct hclge_dev *hdev)
5567 {
5568 #define LOW_2_WORDS		0x03
5569 	struct hclge_fd_key_cfg *key_cfg;
5570 	int ret;
5571 
5572 	if (!hnae3_dev_fd_supported(hdev))
5573 		return 0;
5574 
5575 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5576 	if (ret)
5577 		return ret;
5578 
5579 	switch (hdev->fd_cfg.fd_mode) {
5580 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5581 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5582 		break;
5583 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5584 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5585 		break;
5586 	default:
5587 		dev_err(&hdev->pdev->dev,
5588 			"Unsupported flow director mode %u\n",
5589 			hdev->fd_cfg.fd_mode);
5590 		return -EOPNOTSUPP;
5591 	}
5592 
5593 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5594 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5595 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5596 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5597 	key_cfg->outer_sipv6_word_en = 0;
5598 	key_cfg->outer_dipv6_word_en = 0;
5599 
5600 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5601 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5602 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5603 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5604 
5605 	/* If use max 400bit key, we can support tuples for ether type */
5606 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5607 		key_cfg->tuple_active |=
5608 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5609 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5610 			key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5611 	}
5612 
5613 	/* roce_type is used to filter roce frames
5614 	 * dst_vport is used to specify the rule
5615 	 */
5616 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5617 
5618 	ret = hclge_get_fd_allocation(hdev,
5619 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5620 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5621 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5622 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5623 	if (ret)
5624 		return ret;
5625 
5626 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5627 }
5628 
5629 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5630 				int loc, u8 *key, bool is_add)
5631 {
5632 	struct hclge_fd_tcam_config_1_cmd *req1;
5633 	struct hclge_fd_tcam_config_2_cmd *req2;
5634 	struct hclge_fd_tcam_config_3_cmd *req3;
5635 	struct hclge_desc desc[3];
5636 	int ret;
5637 
5638 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5639 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5640 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5641 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5642 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5643 
5644 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5645 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5646 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5647 
5648 	req1->stage = stage;
5649 	req1->xy_sel = sel_x ? 1 : 0;
5650 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5651 	req1->index = cpu_to_le32(loc);
5652 	req1->entry_vld = sel_x ? is_add : 0;
5653 
5654 	if (key) {
5655 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5656 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5657 		       sizeof(req2->tcam_data));
5658 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5659 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5660 	}
5661 
5662 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
5663 	if (ret)
5664 		dev_err(&hdev->pdev->dev,
5665 			"config tcam key fail, ret=%d\n",
5666 			ret);
5667 
5668 	return ret;
5669 }
5670 
5671 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5672 			      struct hclge_fd_ad_data *action)
5673 {
5674 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5675 	struct hclge_fd_ad_config_cmd *req;
5676 	struct hclge_desc desc;
5677 	u64 ad_data = 0;
5678 	int ret;
5679 
5680 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5681 
5682 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
5683 	req->index = cpu_to_le32(loc);
5684 	req->stage = stage;
5685 
5686 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5687 		      action->write_rule_id_to_bd);
5688 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5689 			action->rule_id);
5690 	if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5691 		hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5692 			      action->override_tc);
5693 		hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5694 				HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5695 	}
5696 	ad_data <<= 32;
5697 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5698 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5699 		      action->forward_to_direct_queue);
5700 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5701 			action->queue_id);
5702 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5703 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5704 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5705 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5706 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5707 			action->counter_id);
5708 
5709 	req->ad_data = cpu_to_le64(ad_data);
5710 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5711 	if (ret)
5712 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5713 
5714 	return ret;
5715 }
5716 
5717 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5718 				   struct hclge_fd_rule *rule)
5719 {
5720 	int offset, moffset, ip_offset;
5721 	enum HCLGE_FD_KEY_OPT key_opt;
5722 	u16 tmp_x_s, tmp_y_s;
5723 	u32 tmp_x_l, tmp_y_l;
5724 	u8 *p = (u8 *)rule;
5725 	int i;
5726 
5727 	if (rule->unused_tuple & BIT(tuple_bit))
5728 		return true;
5729 
5730 	key_opt = tuple_key_info[tuple_bit].key_opt;
5731 	offset = tuple_key_info[tuple_bit].offset;
5732 	moffset = tuple_key_info[tuple_bit].moffset;
5733 
5734 	switch (key_opt) {
5735 	case KEY_OPT_U8:
5736 		calc_x(*key_x, p[offset], p[moffset]);
5737 		calc_y(*key_y, p[offset], p[moffset]);
5738 
5739 		return true;
5740 	case KEY_OPT_LE16:
5741 		calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5742 		calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5743 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5744 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5745 
5746 		return true;
5747 	case KEY_OPT_LE32:
5748 		calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5749 		calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5750 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5751 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5752 
5753 		return true;
5754 	case KEY_OPT_MAC:
5755 		for (i = 0; i < ETH_ALEN; i++) {
5756 			calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5757 			       p[moffset + i]);
5758 			calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5759 			       p[moffset + i]);
5760 		}
5761 
5762 		return true;
5763 	case KEY_OPT_IP:
5764 		ip_offset = IPV4_INDEX * sizeof(u32);
5765 		calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5766 		       *(u32 *)(&p[moffset + ip_offset]));
5767 		calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5768 		       *(u32 *)(&p[moffset + ip_offset]));
5769 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5770 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5771 
5772 		return true;
5773 	default:
5774 		return false;
5775 	}
5776 }
5777 
5778 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5779 				 u8 vf_id, u8 network_port_id)
5780 {
5781 	u32 port_number = 0;
5782 
5783 	if (port_type == HOST_PORT) {
5784 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5785 				pf_id);
5786 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5787 				vf_id);
5788 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5789 	} else {
5790 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5791 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5792 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5793 	}
5794 
5795 	return port_number;
5796 }
5797 
5798 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5799 				       __le32 *key_x, __le32 *key_y,
5800 				       struct hclge_fd_rule *rule)
5801 {
5802 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5803 	u8 cur_pos = 0, tuple_size, shift_bits;
5804 	unsigned int i;
5805 
5806 	for (i = 0; i < MAX_META_DATA; i++) {
5807 		tuple_size = meta_data_key_info[i].key_length;
5808 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5809 
5810 		switch (tuple_bit) {
5811 		case BIT(ROCE_TYPE):
5812 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5813 			cur_pos += tuple_size;
5814 			break;
5815 		case BIT(DST_VPORT):
5816 			port_number = hclge_get_port_number(HOST_PORT, 0,
5817 							    rule->vf_id, 0);
5818 			hnae3_set_field(meta_data,
5819 					GENMASK(cur_pos + tuple_size, cur_pos),
5820 					cur_pos, port_number);
5821 			cur_pos += tuple_size;
5822 			break;
5823 		default:
5824 			break;
5825 		}
5826 	}
5827 
5828 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5829 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5830 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5831 
5832 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5833 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5834 }
5835 
5836 /* A complete key is combined with meta data key and tuple key.
5837  * Meta data key is stored at the MSB region, and tuple key is stored at
5838  * the LSB region, unused bits will be filled 0.
5839  */
5840 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5841 			    struct hclge_fd_rule *rule)
5842 {
5843 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5844 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5845 	u8 *cur_key_x, *cur_key_y;
5846 	u8 meta_data_region;
5847 	u8 tuple_size;
5848 	int ret;
5849 	u32 i;
5850 
5851 	memset(key_x, 0, sizeof(key_x));
5852 	memset(key_y, 0, sizeof(key_y));
5853 	cur_key_x = key_x;
5854 	cur_key_y = key_y;
5855 
5856 	for (i = 0 ; i < MAX_TUPLE; i++) {
5857 		bool tuple_valid;
5858 
5859 		tuple_size = tuple_key_info[i].key_length / 8;
5860 		if (!(key_cfg->tuple_active & BIT(i)))
5861 			continue;
5862 
5863 		tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5864 						     cur_key_y, rule);
5865 		if (tuple_valid) {
5866 			cur_key_x += tuple_size;
5867 			cur_key_y += tuple_size;
5868 		}
5869 	}
5870 
5871 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5872 			MAX_META_DATA_LENGTH / 8;
5873 
5874 	hclge_fd_convert_meta_data(key_cfg,
5875 				   (__le32 *)(key_x + meta_data_region),
5876 				   (__le32 *)(key_y + meta_data_region),
5877 				   rule);
5878 
5879 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5880 				   true);
5881 	if (ret) {
5882 		dev_err(&hdev->pdev->dev,
5883 			"fd key_y config fail, loc=%u, ret=%d\n",
5884 			rule->queue_id, ret);
5885 		return ret;
5886 	}
5887 
5888 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5889 				   true);
5890 	if (ret)
5891 		dev_err(&hdev->pdev->dev,
5892 			"fd key_x config fail, loc=%u, ret=%d\n",
5893 			rule->queue_id, ret);
5894 	return ret;
5895 }
5896 
5897 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5898 			       struct hclge_fd_rule *rule)
5899 {
5900 	struct hclge_vport *vport = hdev->vport;
5901 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5902 	struct hclge_fd_ad_data ad_data;
5903 
5904 	memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5905 	ad_data.ad_id = rule->location;
5906 
5907 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5908 		ad_data.drop_packet = true;
5909 	} else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5910 		ad_data.override_tc = true;
5911 		ad_data.queue_id =
5912 			kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5913 		ad_data.tc_size =
5914 			ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5915 	} else {
5916 		ad_data.forward_to_direct_queue = true;
5917 		ad_data.queue_id = rule->queue_id;
5918 	}
5919 
5920 	ad_data.use_counter = false;
5921 	ad_data.counter_id = 0;
5922 
5923 	ad_data.use_next_stage = false;
5924 	ad_data.next_input_key = 0;
5925 
5926 	ad_data.write_rule_id_to_bd = true;
5927 	ad_data.rule_id = rule->location;
5928 
5929 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5930 }
5931 
5932 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5933 				       u32 *unused_tuple)
5934 {
5935 	if (!spec || !unused_tuple)
5936 		return -EINVAL;
5937 
5938 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5939 
5940 	if (!spec->ip4src)
5941 		*unused_tuple |= BIT(INNER_SRC_IP);
5942 
5943 	if (!spec->ip4dst)
5944 		*unused_tuple |= BIT(INNER_DST_IP);
5945 
5946 	if (!spec->psrc)
5947 		*unused_tuple |= BIT(INNER_SRC_PORT);
5948 
5949 	if (!spec->pdst)
5950 		*unused_tuple |= BIT(INNER_DST_PORT);
5951 
5952 	if (!spec->tos)
5953 		*unused_tuple |= BIT(INNER_IP_TOS);
5954 
5955 	return 0;
5956 }
5957 
5958 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5959 				    u32 *unused_tuple)
5960 {
5961 	if (!spec || !unused_tuple)
5962 		return -EINVAL;
5963 
5964 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5965 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5966 
5967 	if (!spec->ip4src)
5968 		*unused_tuple |= BIT(INNER_SRC_IP);
5969 
5970 	if (!spec->ip4dst)
5971 		*unused_tuple |= BIT(INNER_DST_IP);
5972 
5973 	if (!spec->tos)
5974 		*unused_tuple |= BIT(INNER_IP_TOS);
5975 
5976 	if (!spec->proto)
5977 		*unused_tuple |= BIT(INNER_IP_PROTO);
5978 
5979 	if (spec->l4_4_bytes)
5980 		return -EOPNOTSUPP;
5981 
5982 	if (spec->ip_ver != ETH_RX_NFC_IP4)
5983 		return -EOPNOTSUPP;
5984 
5985 	return 0;
5986 }
5987 
5988 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5989 				       u32 *unused_tuple)
5990 {
5991 	if (!spec || !unused_tuple)
5992 		return -EINVAL;
5993 
5994 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5995 
5996 	/* check whether src/dst ip address used */
5997 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5998 		*unused_tuple |= BIT(INNER_SRC_IP);
5999 
6000 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6001 		*unused_tuple |= BIT(INNER_DST_IP);
6002 
6003 	if (!spec->psrc)
6004 		*unused_tuple |= BIT(INNER_SRC_PORT);
6005 
6006 	if (!spec->pdst)
6007 		*unused_tuple |= BIT(INNER_DST_PORT);
6008 
6009 	if (!spec->tclass)
6010 		*unused_tuple |= BIT(INNER_IP_TOS);
6011 
6012 	return 0;
6013 }
6014 
6015 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6016 				    u32 *unused_tuple)
6017 {
6018 	if (!spec || !unused_tuple)
6019 		return -EINVAL;
6020 
6021 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6022 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6023 
6024 	/* check whether src/dst ip address used */
6025 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6026 		*unused_tuple |= BIT(INNER_SRC_IP);
6027 
6028 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6029 		*unused_tuple |= BIT(INNER_DST_IP);
6030 
6031 	if (!spec->l4_proto)
6032 		*unused_tuple |= BIT(INNER_IP_PROTO);
6033 
6034 	if (!spec->tclass)
6035 		*unused_tuple |= BIT(INNER_IP_TOS);
6036 
6037 	if (spec->l4_4_bytes)
6038 		return -EOPNOTSUPP;
6039 
6040 	return 0;
6041 }
6042 
6043 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6044 {
6045 	if (!spec || !unused_tuple)
6046 		return -EINVAL;
6047 
6048 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6049 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6050 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6051 
6052 	if (is_zero_ether_addr(spec->h_source))
6053 		*unused_tuple |= BIT(INNER_SRC_MAC);
6054 
6055 	if (is_zero_ether_addr(spec->h_dest))
6056 		*unused_tuple |= BIT(INNER_DST_MAC);
6057 
6058 	if (!spec->h_proto)
6059 		*unused_tuple |= BIT(INNER_ETH_TYPE);
6060 
6061 	return 0;
6062 }
6063 
6064 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6065 				    struct ethtool_rx_flow_spec *fs,
6066 				    u32 *unused_tuple)
6067 {
6068 	if (fs->flow_type & FLOW_EXT) {
6069 		if (fs->h_ext.vlan_etype) {
6070 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6071 			return -EOPNOTSUPP;
6072 		}
6073 
6074 		if (!fs->h_ext.vlan_tci)
6075 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6076 
6077 		if (fs->m_ext.vlan_tci &&
6078 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6079 			dev_err(&hdev->pdev->dev,
6080 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6081 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6082 			return -EINVAL;
6083 		}
6084 	} else {
6085 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6086 	}
6087 
6088 	if (fs->flow_type & FLOW_MAC_EXT) {
6089 		if (hdev->fd_cfg.fd_mode !=
6090 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6091 			dev_err(&hdev->pdev->dev,
6092 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
6093 			return -EOPNOTSUPP;
6094 		}
6095 
6096 		if (is_zero_ether_addr(fs->h_ext.h_dest))
6097 			*unused_tuple |= BIT(INNER_DST_MAC);
6098 		else
6099 			*unused_tuple &= ~BIT(INNER_DST_MAC);
6100 	}
6101 
6102 	return 0;
6103 }
6104 
6105 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6106 				       struct hclge_fd_user_def_info *info)
6107 {
6108 	switch (flow_type) {
6109 	case ETHER_FLOW:
6110 		info->layer = HCLGE_FD_USER_DEF_L2;
6111 		*unused_tuple &= ~BIT(INNER_L2_RSV);
6112 		break;
6113 	case IP_USER_FLOW:
6114 	case IPV6_USER_FLOW:
6115 		info->layer = HCLGE_FD_USER_DEF_L3;
6116 		*unused_tuple &= ~BIT(INNER_L3_RSV);
6117 		break;
6118 	case TCP_V4_FLOW:
6119 	case UDP_V4_FLOW:
6120 	case TCP_V6_FLOW:
6121 	case UDP_V6_FLOW:
6122 		info->layer = HCLGE_FD_USER_DEF_L4;
6123 		*unused_tuple &= ~BIT(INNER_L4_RSV);
6124 		break;
6125 	default:
6126 		return -EOPNOTSUPP;
6127 	}
6128 
6129 	return 0;
6130 }
6131 
6132 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6133 {
6134 	return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6135 }
6136 
6137 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6138 					 struct ethtool_rx_flow_spec *fs,
6139 					 u32 *unused_tuple,
6140 					 struct hclge_fd_user_def_info *info)
6141 {
6142 	u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6143 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6144 	u16 data, offset, data_mask, offset_mask;
6145 	int ret;
6146 
6147 	info->layer = HCLGE_FD_USER_DEF_NONE;
6148 	*unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6149 
6150 	if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6151 		return 0;
6152 
6153 	/* user-def data from ethtool is 64 bit value, the bit0~15 is used
6154 	 * for data, and bit32~47 is used for offset.
6155 	 */
6156 	data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6157 	data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6158 	offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6159 	offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6160 
6161 	if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6162 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6163 		return -EOPNOTSUPP;
6164 	}
6165 
6166 	if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6167 		dev_err(&hdev->pdev->dev,
6168 			"user-def offset[%u] should be no more than %u\n",
6169 			offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6170 		return -EINVAL;
6171 	}
6172 
6173 	if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6174 		dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6175 		return -EINVAL;
6176 	}
6177 
6178 	ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6179 	if (ret) {
6180 		dev_err(&hdev->pdev->dev,
6181 			"unsupported flow type for user-def bytes, ret = %d\n",
6182 			ret);
6183 		return ret;
6184 	}
6185 
6186 	info->data = data;
6187 	info->data_mask = data_mask;
6188 	info->offset = offset;
6189 
6190 	return 0;
6191 }
6192 
6193 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6194 			       struct ethtool_rx_flow_spec *fs,
6195 			       u32 *unused_tuple,
6196 			       struct hclge_fd_user_def_info *info)
6197 {
6198 	u32 flow_type;
6199 	int ret;
6200 
6201 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6202 		dev_err(&hdev->pdev->dev,
6203 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
6204 			fs->location,
6205 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6206 		return -EINVAL;
6207 	}
6208 
6209 	ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6210 	if (ret)
6211 		return ret;
6212 
6213 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6214 	switch (flow_type) {
6215 	case SCTP_V4_FLOW:
6216 	case TCP_V4_FLOW:
6217 	case UDP_V4_FLOW:
6218 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6219 						  unused_tuple);
6220 		break;
6221 	case IP_USER_FLOW:
6222 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6223 					       unused_tuple);
6224 		break;
6225 	case SCTP_V6_FLOW:
6226 	case TCP_V6_FLOW:
6227 	case UDP_V6_FLOW:
6228 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6229 						  unused_tuple);
6230 		break;
6231 	case IPV6_USER_FLOW:
6232 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6233 					       unused_tuple);
6234 		break;
6235 	case ETHER_FLOW:
6236 		if (hdev->fd_cfg.fd_mode !=
6237 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6238 			dev_err(&hdev->pdev->dev,
6239 				"ETHER_FLOW is not supported in current fd mode!\n");
6240 			return -EOPNOTSUPP;
6241 		}
6242 
6243 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6244 						 unused_tuple);
6245 		break;
6246 	default:
6247 		dev_err(&hdev->pdev->dev,
6248 			"unsupported protocol type, protocol type = %#x\n",
6249 			flow_type);
6250 		return -EOPNOTSUPP;
6251 	}
6252 
6253 	if (ret) {
6254 		dev_err(&hdev->pdev->dev,
6255 			"failed to check flow union tuple, ret = %d\n",
6256 			ret);
6257 		return ret;
6258 	}
6259 
6260 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6261 }
6262 
6263 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6264 				      struct ethtool_rx_flow_spec *fs,
6265 				      struct hclge_fd_rule *rule, u8 ip_proto)
6266 {
6267 	rule->tuples.src_ip[IPV4_INDEX] =
6268 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6269 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6270 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6271 
6272 	rule->tuples.dst_ip[IPV4_INDEX] =
6273 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6274 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6275 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6276 
6277 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6278 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6279 
6280 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6281 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6282 
6283 	rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6284 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6285 
6286 	rule->tuples.ether_proto = ETH_P_IP;
6287 	rule->tuples_mask.ether_proto = 0xFFFF;
6288 
6289 	rule->tuples.ip_proto = ip_proto;
6290 	rule->tuples_mask.ip_proto = 0xFF;
6291 }
6292 
6293 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6294 				   struct ethtool_rx_flow_spec *fs,
6295 				   struct hclge_fd_rule *rule)
6296 {
6297 	rule->tuples.src_ip[IPV4_INDEX] =
6298 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6299 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6300 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6301 
6302 	rule->tuples.dst_ip[IPV4_INDEX] =
6303 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6304 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6305 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6306 
6307 	rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6308 	rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6309 
6310 	rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6311 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6312 
6313 	rule->tuples.ether_proto = ETH_P_IP;
6314 	rule->tuples_mask.ether_proto = 0xFFFF;
6315 }
6316 
6317 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6318 				      struct ethtool_rx_flow_spec *fs,
6319 				      struct hclge_fd_rule *rule, u8 ip_proto)
6320 {
6321 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6322 			  IPV6_SIZE);
6323 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6324 			  IPV6_SIZE);
6325 
6326 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6327 			  IPV6_SIZE);
6328 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6329 			  IPV6_SIZE);
6330 
6331 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6332 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6333 
6334 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6335 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6336 
6337 	rule->tuples.ether_proto = ETH_P_IPV6;
6338 	rule->tuples_mask.ether_proto = 0xFFFF;
6339 
6340 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6341 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6342 
6343 	rule->tuples.ip_proto = ip_proto;
6344 	rule->tuples_mask.ip_proto = 0xFF;
6345 }
6346 
6347 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6348 				   struct ethtool_rx_flow_spec *fs,
6349 				   struct hclge_fd_rule *rule)
6350 {
6351 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6352 			  IPV6_SIZE);
6353 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6354 			  IPV6_SIZE);
6355 
6356 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6357 			  IPV6_SIZE);
6358 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6359 			  IPV6_SIZE);
6360 
6361 	rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6362 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6363 
6364 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6365 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6366 
6367 	rule->tuples.ether_proto = ETH_P_IPV6;
6368 	rule->tuples_mask.ether_proto = 0xFFFF;
6369 }
6370 
6371 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6372 				     struct ethtool_rx_flow_spec *fs,
6373 				     struct hclge_fd_rule *rule)
6374 {
6375 	ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6376 	ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6377 
6378 	ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6379 	ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6380 
6381 	rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6382 	rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6383 }
6384 
6385 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6386 					struct hclge_fd_rule *rule)
6387 {
6388 	switch (info->layer) {
6389 	case HCLGE_FD_USER_DEF_L2:
6390 		rule->tuples.l2_user_def = info->data;
6391 		rule->tuples_mask.l2_user_def = info->data_mask;
6392 		break;
6393 	case HCLGE_FD_USER_DEF_L3:
6394 		rule->tuples.l3_user_def = info->data;
6395 		rule->tuples_mask.l3_user_def = info->data_mask;
6396 		break;
6397 	case HCLGE_FD_USER_DEF_L4:
6398 		rule->tuples.l4_user_def = (u32)info->data << 16;
6399 		rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6400 		break;
6401 	default:
6402 		break;
6403 	}
6404 
6405 	rule->ep.user_def = *info;
6406 }
6407 
6408 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6409 			      struct ethtool_rx_flow_spec *fs,
6410 			      struct hclge_fd_rule *rule,
6411 			      struct hclge_fd_user_def_info *info)
6412 {
6413 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6414 
6415 	switch (flow_type) {
6416 	case SCTP_V4_FLOW:
6417 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6418 		break;
6419 	case TCP_V4_FLOW:
6420 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6421 		break;
6422 	case UDP_V4_FLOW:
6423 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6424 		break;
6425 	case IP_USER_FLOW:
6426 		hclge_fd_get_ip4_tuple(hdev, fs, rule);
6427 		break;
6428 	case SCTP_V6_FLOW:
6429 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6430 		break;
6431 	case TCP_V6_FLOW:
6432 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6433 		break;
6434 	case UDP_V6_FLOW:
6435 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6436 		break;
6437 	case IPV6_USER_FLOW:
6438 		hclge_fd_get_ip6_tuple(hdev, fs, rule);
6439 		break;
6440 	case ETHER_FLOW:
6441 		hclge_fd_get_ether_tuple(hdev, fs, rule);
6442 		break;
6443 	default:
6444 		return -EOPNOTSUPP;
6445 	}
6446 
6447 	if (fs->flow_type & FLOW_EXT) {
6448 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6449 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6450 		hclge_fd_get_user_def_tuple(info, rule);
6451 	}
6452 
6453 	if (fs->flow_type & FLOW_MAC_EXT) {
6454 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6455 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6456 	}
6457 
6458 	return 0;
6459 }
6460 
6461 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6462 				struct hclge_fd_rule *rule)
6463 {
6464 	int ret;
6465 
6466 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6467 	if (ret)
6468 		return ret;
6469 
6470 	return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6471 }
6472 
6473 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6474 				     struct hclge_fd_rule *rule)
6475 {
6476 	int ret;
6477 
6478 	spin_lock_bh(&hdev->fd_rule_lock);
6479 
6480 	if (hdev->fd_active_type != rule->rule_type &&
6481 	    (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6482 	     hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6483 		dev_err(&hdev->pdev->dev,
6484 			"mode conflict(new type %d, active type %d), please delete existent rules first\n",
6485 			rule->rule_type, hdev->fd_active_type);
6486 		spin_unlock_bh(&hdev->fd_rule_lock);
6487 		return -EINVAL;
6488 	}
6489 
6490 	ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6491 	if (ret)
6492 		goto out;
6493 
6494 	ret = hclge_clear_arfs_rules(hdev);
6495 	if (ret)
6496 		goto out;
6497 
6498 	ret = hclge_fd_config_rule(hdev, rule);
6499 	if (ret)
6500 		goto out;
6501 
6502 	rule->state = HCLGE_FD_ACTIVE;
6503 	hdev->fd_active_type = rule->rule_type;
6504 	hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6505 
6506 out:
6507 	spin_unlock_bh(&hdev->fd_rule_lock);
6508 	return ret;
6509 }
6510 
6511 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6512 {
6513 	struct hclge_vport *vport = hclge_get_vport(handle);
6514 	struct hclge_dev *hdev = vport->back;
6515 
6516 	return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6517 }
6518 
6519 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6520 				      u16 *vport_id, u8 *action, u16 *queue_id)
6521 {
6522 	struct hclge_vport *vport = hdev->vport;
6523 
6524 	if (ring_cookie == RX_CLS_FLOW_DISC) {
6525 		*action = HCLGE_FD_ACTION_DROP_PACKET;
6526 	} else {
6527 		u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6528 		u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6529 		u16 tqps;
6530 
6531 		if (vf > hdev->num_req_vfs) {
6532 			dev_err(&hdev->pdev->dev,
6533 				"Error: vf id (%u) > max vf num (%u)\n",
6534 				vf, hdev->num_req_vfs);
6535 			return -EINVAL;
6536 		}
6537 
6538 		*vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6539 		tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6540 
6541 		if (ring >= tqps) {
6542 			dev_err(&hdev->pdev->dev,
6543 				"Error: queue id (%u) > max tqp num (%u)\n",
6544 				ring, tqps - 1);
6545 			return -EINVAL;
6546 		}
6547 
6548 		*action = HCLGE_FD_ACTION_SELECT_QUEUE;
6549 		*queue_id = ring;
6550 	}
6551 
6552 	return 0;
6553 }
6554 
6555 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6556 			      struct ethtool_rxnfc *cmd)
6557 {
6558 	struct hclge_vport *vport = hclge_get_vport(handle);
6559 	struct hclge_dev *hdev = vport->back;
6560 	struct hclge_fd_user_def_info info;
6561 	u16 dst_vport_id = 0, q_index = 0;
6562 	struct ethtool_rx_flow_spec *fs;
6563 	struct hclge_fd_rule *rule;
6564 	u32 unused = 0;
6565 	u8 action;
6566 	int ret;
6567 
6568 	if (!hnae3_dev_fd_supported(hdev)) {
6569 		dev_err(&hdev->pdev->dev,
6570 			"flow table director is not supported\n");
6571 		return -EOPNOTSUPP;
6572 	}
6573 
6574 	if (!hdev->fd_en) {
6575 		dev_err(&hdev->pdev->dev,
6576 			"please enable flow director first\n");
6577 		return -EOPNOTSUPP;
6578 	}
6579 
6580 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6581 
6582 	ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6583 	if (ret)
6584 		return ret;
6585 
6586 	ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6587 					 &action, &q_index);
6588 	if (ret)
6589 		return ret;
6590 
6591 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6592 	if (!rule)
6593 		return -ENOMEM;
6594 
6595 	ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6596 	if (ret) {
6597 		kfree(rule);
6598 		return ret;
6599 	}
6600 
6601 	rule->flow_type = fs->flow_type;
6602 	rule->location = fs->location;
6603 	rule->unused_tuple = unused;
6604 	rule->vf_id = dst_vport_id;
6605 	rule->queue_id = q_index;
6606 	rule->action = action;
6607 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
6608 
6609 	ret = hclge_add_fd_entry_common(hdev, rule);
6610 	if (ret)
6611 		kfree(rule);
6612 
6613 	return ret;
6614 }
6615 
6616 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6617 			      struct ethtool_rxnfc *cmd)
6618 {
6619 	struct hclge_vport *vport = hclge_get_vport(handle);
6620 	struct hclge_dev *hdev = vport->back;
6621 	struct ethtool_rx_flow_spec *fs;
6622 	int ret;
6623 
6624 	if (!hnae3_dev_fd_supported(hdev))
6625 		return -EOPNOTSUPP;
6626 
6627 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6628 
6629 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6630 		return -EINVAL;
6631 
6632 	spin_lock_bh(&hdev->fd_rule_lock);
6633 	if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6634 	    !test_bit(fs->location, hdev->fd_bmap)) {
6635 		dev_err(&hdev->pdev->dev,
6636 			"Delete fail, rule %u is inexistent\n", fs->location);
6637 		spin_unlock_bh(&hdev->fd_rule_lock);
6638 		return -ENOENT;
6639 	}
6640 
6641 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6642 				   NULL, false);
6643 	if (ret)
6644 		goto out;
6645 
6646 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6647 
6648 out:
6649 	spin_unlock_bh(&hdev->fd_rule_lock);
6650 	return ret;
6651 }
6652 
6653 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6654 					 bool clear_list)
6655 {
6656 	struct hclge_fd_rule *rule;
6657 	struct hlist_node *node;
6658 	u16 location;
6659 
6660 	if (!hnae3_dev_fd_supported(hdev))
6661 		return;
6662 
6663 	spin_lock_bh(&hdev->fd_rule_lock);
6664 
6665 	for_each_set_bit(location, hdev->fd_bmap,
6666 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6667 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6668 				     NULL, false);
6669 
6670 	if (clear_list) {
6671 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6672 					  rule_node) {
6673 			hlist_del(&rule->rule_node);
6674 			kfree(rule);
6675 		}
6676 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6677 		hdev->hclge_fd_rule_num = 0;
6678 		bitmap_zero(hdev->fd_bmap,
6679 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6680 	}
6681 
6682 	spin_unlock_bh(&hdev->fd_rule_lock);
6683 }
6684 
6685 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6686 {
6687 	hclge_clear_fd_rules_in_list(hdev, true);
6688 	hclge_fd_disable_user_def(hdev);
6689 }
6690 
6691 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6692 {
6693 	struct hclge_vport *vport = hclge_get_vport(handle);
6694 	struct hclge_dev *hdev = vport->back;
6695 	struct hclge_fd_rule *rule;
6696 	struct hlist_node *node;
6697 
6698 	/* Return ok here, because reset error handling will check this
6699 	 * return value. If error is returned here, the reset process will
6700 	 * fail.
6701 	 */
6702 	if (!hnae3_dev_fd_supported(hdev))
6703 		return 0;
6704 
6705 	/* if fd is disabled, should not restore it when reset */
6706 	if (!hdev->fd_en)
6707 		return 0;
6708 
6709 	spin_lock_bh(&hdev->fd_rule_lock);
6710 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6711 		if (rule->state == HCLGE_FD_ACTIVE)
6712 			rule->state = HCLGE_FD_TO_ADD;
6713 	}
6714 	spin_unlock_bh(&hdev->fd_rule_lock);
6715 	set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6716 
6717 	return 0;
6718 }
6719 
6720 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6721 				 struct ethtool_rxnfc *cmd)
6722 {
6723 	struct hclge_vport *vport = hclge_get_vport(handle);
6724 	struct hclge_dev *hdev = vport->back;
6725 
6726 	if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6727 		return -EOPNOTSUPP;
6728 
6729 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
6730 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6731 
6732 	return 0;
6733 }
6734 
6735 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6736 				     struct ethtool_tcpip4_spec *spec,
6737 				     struct ethtool_tcpip4_spec *spec_mask)
6738 {
6739 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6740 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6741 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6742 
6743 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6744 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6745 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6746 
6747 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6748 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6749 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6750 
6751 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6752 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6753 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6754 
6755 	spec->tos = rule->tuples.ip_tos;
6756 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6757 			0 : rule->tuples_mask.ip_tos;
6758 }
6759 
6760 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6761 				  struct ethtool_usrip4_spec *spec,
6762 				  struct ethtool_usrip4_spec *spec_mask)
6763 {
6764 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6765 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6766 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6767 
6768 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6769 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6770 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6771 
6772 	spec->tos = rule->tuples.ip_tos;
6773 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6774 			0 : rule->tuples_mask.ip_tos;
6775 
6776 	spec->proto = rule->tuples.ip_proto;
6777 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6778 			0 : rule->tuples_mask.ip_proto;
6779 
6780 	spec->ip_ver = ETH_RX_NFC_IP4;
6781 }
6782 
6783 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6784 				     struct ethtool_tcpip6_spec *spec,
6785 				     struct ethtool_tcpip6_spec *spec_mask)
6786 {
6787 	cpu_to_be32_array(spec->ip6src,
6788 			  rule->tuples.src_ip, IPV6_SIZE);
6789 	cpu_to_be32_array(spec->ip6dst,
6790 			  rule->tuples.dst_ip, IPV6_SIZE);
6791 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6792 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6793 	else
6794 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6795 				  IPV6_SIZE);
6796 
6797 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6798 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6799 	else
6800 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6801 				  IPV6_SIZE);
6802 
6803 	spec->tclass = rule->tuples.ip_tos;
6804 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6805 			0 : rule->tuples_mask.ip_tos;
6806 
6807 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6808 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6809 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6810 
6811 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6812 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6813 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6814 }
6815 
6816 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6817 				  struct ethtool_usrip6_spec *spec,
6818 				  struct ethtool_usrip6_spec *spec_mask)
6819 {
6820 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6821 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6822 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6823 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6824 	else
6825 		cpu_to_be32_array(spec_mask->ip6src,
6826 				  rule->tuples_mask.src_ip, IPV6_SIZE);
6827 
6828 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6829 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6830 	else
6831 		cpu_to_be32_array(spec_mask->ip6dst,
6832 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
6833 
6834 	spec->tclass = rule->tuples.ip_tos;
6835 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6836 			0 : rule->tuples_mask.ip_tos;
6837 
6838 	spec->l4_proto = rule->tuples.ip_proto;
6839 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6840 			0 : rule->tuples_mask.ip_proto;
6841 }
6842 
6843 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6844 				    struct ethhdr *spec,
6845 				    struct ethhdr *spec_mask)
6846 {
6847 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6848 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6849 
6850 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6851 		eth_zero_addr(spec_mask->h_source);
6852 	else
6853 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6854 
6855 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
6856 		eth_zero_addr(spec_mask->h_dest);
6857 	else
6858 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6859 
6860 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6861 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6862 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6863 }
6864 
6865 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6866 				       struct hclge_fd_rule *rule)
6867 {
6868 	if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6869 	    HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6870 		fs->h_ext.data[0] = 0;
6871 		fs->h_ext.data[1] = 0;
6872 		fs->m_ext.data[0] = 0;
6873 		fs->m_ext.data[1] = 0;
6874 	} else {
6875 		fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6876 		fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6877 		fs->m_ext.data[0] =
6878 				cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6879 		fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6880 	}
6881 }
6882 
6883 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6884 				  struct hclge_fd_rule *rule)
6885 {
6886 	if (fs->flow_type & FLOW_EXT) {
6887 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6888 		fs->m_ext.vlan_tci =
6889 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6890 				0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6891 
6892 		hclge_fd_get_user_def_info(fs, rule);
6893 	}
6894 
6895 	if (fs->flow_type & FLOW_MAC_EXT) {
6896 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6897 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6898 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6899 		else
6900 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6901 					rule->tuples_mask.dst_mac);
6902 	}
6903 }
6904 
6905 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6906 				  struct ethtool_rxnfc *cmd)
6907 {
6908 	struct hclge_vport *vport = hclge_get_vport(handle);
6909 	struct hclge_fd_rule *rule = NULL;
6910 	struct hclge_dev *hdev = vport->back;
6911 	struct ethtool_rx_flow_spec *fs;
6912 	struct hlist_node *node2;
6913 
6914 	if (!hnae3_dev_fd_supported(hdev))
6915 		return -EOPNOTSUPP;
6916 
6917 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6918 
6919 	spin_lock_bh(&hdev->fd_rule_lock);
6920 
6921 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6922 		if (rule->location >= fs->location)
6923 			break;
6924 	}
6925 
6926 	if (!rule || fs->location != rule->location) {
6927 		spin_unlock_bh(&hdev->fd_rule_lock);
6928 
6929 		return -ENOENT;
6930 	}
6931 
6932 	fs->flow_type = rule->flow_type;
6933 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6934 	case SCTP_V4_FLOW:
6935 	case TCP_V4_FLOW:
6936 	case UDP_V4_FLOW:
6937 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6938 					 &fs->m_u.tcp_ip4_spec);
6939 		break;
6940 	case IP_USER_FLOW:
6941 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6942 				      &fs->m_u.usr_ip4_spec);
6943 		break;
6944 	case SCTP_V6_FLOW:
6945 	case TCP_V6_FLOW:
6946 	case UDP_V6_FLOW:
6947 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6948 					 &fs->m_u.tcp_ip6_spec);
6949 		break;
6950 	case IPV6_USER_FLOW:
6951 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6952 				      &fs->m_u.usr_ip6_spec);
6953 		break;
6954 	/* The flow type of fd rule has been checked before adding in to rule
6955 	 * list. As other flow types have been handled, it must be ETHER_FLOW
6956 	 * for the default case
6957 	 */
6958 	default:
6959 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6960 					&fs->m_u.ether_spec);
6961 		break;
6962 	}
6963 
6964 	hclge_fd_get_ext_info(fs, rule);
6965 
6966 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6967 		fs->ring_cookie = RX_CLS_FLOW_DISC;
6968 	} else {
6969 		u64 vf_id;
6970 
6971 		fs->ring_cookie = rule->queue_id;
6972 		vf_id = rule->vf_id;
6973 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6974 		fs->ring_cookie |= vf_id;
6975 	}
6976 
6977 	spin_unlock_bh(&hdev->fd_rule_lock);
6978 
6979 	return 0;
6980 }
6981 
6982 static int hclge_get_all_rules(struct hnae3_handle *handle,
6983 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
6984 {
6985 	struct hclge_vport *vport = hclge_get_vport(handle);
6986 	struct hclge_dev *hdev = vport->back;
6987 	struct hclge_fd_rule *rule;
6988 	struct hlist_node *node2;
6989 	int cnt = 0;
6990 
6991 	if (!hnae3_dev_fd_supported(hdev))
6992 		return -EOPNOTSUPP;
6993 
6994 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6995 
6996 	spin_lock_bh(&hdev->fd_rule_lock);
6997 	hlist_for_each_entry_safe(rule, node2,
6998 				  &hdev->fd_rule_list, rule_node) {
6999 		if (cnt == cmd->rule_cnt) {
7000 			spin_unlock_bh(&hdev->fd_rule_lock);
7001 			return -EMSGSIZE;
7002 		}
7003 
7004 		if (rule->state == HCLGE_FD_TO_DEL)
7005 			continue;
7006 
7007 		rule_locs[cnt] = rule->location;
7008 		cnt++;
7009 	}
7010 
7011 	spin_unlock_bh(&hdev->fd_rule_lock);
7012 
7013 	cmd->rule_cnt = cnt;
7014 
7015 	return 0;
7016 }
7017 
7018 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7019 				     struct hclge_fd_rule_tuples *tuples)
7020 {
7021 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7022 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7023 
7024 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7025 	tuples->ip_proto = fkeys->basic.ip_proto;
7026 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7027 
7028 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7029 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7030 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7031 	} else {
7032 		int i;
7033 
7034 		for (i = 0; i < IPV6_SIZE; i++) {
7035 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7036 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7037 		}
7038 	}
7039 }
7040 
7041 /* traverse all rules, check whether an existed rule has the same tuples */
7042 static struct hclge_fd_rule *
7043 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7044 			  const struct hclge_fd_rule_tuples *tuples)
7045 {
7046 	struct hclge_fd_rule *rule = NULL;
7047 	struct hlist_node *node;
7048 
7049 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7050 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7051 			return rule;
7052 	}
7053 
7054 	return NULL;
7055 }
7056 
7057 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7058 				     struct hclge_fd_rule *rule)
7059 {
7060 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7061 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7062 			     BIT(INNER_SRC_PORT);
7063 	rule->action = 0;
7064 	rule->vf_id = 0;
7065 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7066 	rule->state = HCLGE_FD_TO_ADD;
7067 	if (tuples->ether_proto == ETH_P_IP) {
7068 		if (tuples->ip_proto == IPPROTO_TCP)
7069 			rule->flow_type = TCP_V4_FLOW;
7070 		else
7071 			rule->flow_type = UDP_V4_FLOW;
7072 	} else {
7073 		if (tuples->ip_proto == IPPROTO_TCP)
7074 			rule->flow_type = TCP_V6_FLOW;
7075 		else
7076 			rule->flow_type = UDP_V6_FLOW;
7077 	}
7078 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7079 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7080 }
7081 
7082 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7083 				      u16 flow_id, struct flow_keys *fkeys)
7084 {
7085 	struct hclge_vport *vport = hclge_get_vport(handle);
7086 	struct hclge_fd_rule_tuples new_tuples = {};
7087 	struct hclge_dev *hdev = vport->back;
7088 	struct hclge_fd_rule *rule;
7089 	u16 bit_id;
7090 
7091 	if (!hnae3_dev_fd_supported(hdev))
7092 		return -EOPNOTSUPP;
7093 
7094 	/* when there is already fd rule existed add by user,
7095 	 * arfs should not work
7096 	 */
7097 	spin_lock_bh(&hdev->fd_rule_lock);
7098 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7099 	    hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7100 		spin_unlock_bh(&hdev->fd_rule_lock);
7101 		return -EOPNOTSUPP;
7102 	}
7103 
7104 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7105 
7106 	/* check is there flow director filter existed for this flow,
7107 	 * if not, create a new filter for it;
7108 	 * if filter exist with different queue id, modify the filter;
7109 	 * if filter exist with same queue id, do nothing
7110 	 */
7111 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7112 	if (!rule) {
7113 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7114 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7115 			spin_unlock_bh(&hdev->fd_rule_lock);
7116 			return -ENOSPC;
7117 		}
7118 
7119 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7120 		if (!rule) {
7121 			spin_unlock_bh(&hdev->fd_rule_lock);
7122 			return -ENOMEM;
7123 		}
7124 
7125 		rule->location = bit_id;
7126 		rule->arfs.flow_id = flow_id;
7127 		rule->queue_id = queue_id;
7128 		hclge_fd_build_arfs_rule(&new_tuples, rule);
7129 		hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7130 		hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7131 	} else if (rule->queue_id != queue_id) {
7132 		rule->queue_id = queue_id;
7133 		rule->state = HCLGE_FD_TO_ADD;
7134 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7135 		hclge_task_schedule(hdev, 0);
7136 	}
7137 	spin_unlock_bh(&hdev->fd_rule_lock);
7138 	return rule->location;
7139 }
7140 
7141 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7142 {
7143 #ifdef CONFIG_RFS_ACCEL
7144 	struct hnae3_handle *handle = &hdev->vport[0].nic;
7145 	struct hclge_fd_rule *rule;
7146 	struct hlist_node *node;
7147 
7148 	spin_lock_bh(&hdev->fd_rule_lock);
7149 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7150 		spin_unlock_bh(&hdev->fd_rule_lock);
7151 		return;
7152 	}
7153 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7154 		if (rule->state != HCLGE_FD_ACTIVE)
7155 			continue;
7156 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7157 					rule->arfs.flow_id, rule->location)) {
7158 			rule->state = HCLGE_FD_TO_DEL;
7159 			set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7160 		}
7161 	}
7162 	spin_unlock_bh(&hdev->fd_rule_lock);
7163 #endif
7164 }
7165 
7166 /* make sure being called after lock up with fd_rule_lock */
7167 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7168 {
7169 #ifdef CONFIG_RFS_ACCEL
7170 	struct hclge_fd_rule *rule;
7171 	struct hlist_node *node;
7172 	int ret;
7173 
7174 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7175 		return 0;
7176 
7177 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7178 		switch (rule->state) {
7179 		case HCLGE_FD_TO_DEL:
7180 		case HCLGE_FD_ACTIVE:
7181 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7182 						   rule->location, NULL, false);
7183 			if (ret)
7184 				return ret;
7185 			fallthrough;
7186 		case HCLGE_FD_TO_ADD:
7187 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7188 			hlist_del(&rule->rule_node);
7189 			kfree(rule);
7190 			break;
7191 		default:
7192 			break;
7193 		}
7194 	}
7195 	hclge_sync_fd_state(hdev);
7196 
7197 #endif
7198 	return 0;
7199 }
7200 
7201 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7202 				    struct hclge_fd_rule *rule)
7203 {
7204 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7205 		struct flow_match_basic match;
7206 		u16 ethtype_key, ethtype_mask;
7207 
7208 		flow_rule_match_basic(flow, &match);
7209 		ethtype_key = ntohs(match.key->n_proto);
7210 		ethtype_mask = ntohs(match.mask->n_proto);
7211 
7212 		if (ethtype_key == ETH_P_ALL) {
7213 			ethtype_key = 0;
7214 			ethtype_mask = 0;
7215 		}
7216 		rule->tuples.ether_proto = ethtype_key;
7217 		rule->tuples_mask.ether_proto = ethtype_mask;
7218 		rule->tuples.ip_proto = match.key->ip_proto;
7219 		rule->tuples_mask.ip_proto = match.mask->ip_proto;
7220 	} else {
7221 		rule->unused_tuple |= BIT(INNER_IP_PROTO);
7222 		rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7223 	}
7224 }
7225 
7226 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7227 				  struct hclge_fd_rule *rule)
7228 {
7229 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7230 		struct flow_match_eth_addrs match;
7231 
7232 		flow_rule_match_eth_addrs(flow, &match);
7233 		ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7234 		ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7235 		ether_addr_copy(rule->tuples.src_mac, match.key->src);
7236 		ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7237 	} else {
7238 		rule->unused_tuple |= BIT(INNER_DST_MAC);
7239 		rule->unused_tuple |= BIT(INNER_SRC_MAC);
7240 	}
7241 }
7242 
7243 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7244 				   struct hclge_fd_rule *rule)
7245 {
7246 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7247 		struct flow_match_vlan match;
7248 
7249 		flow_rule_match_vlan(flow, &match);
7250 		rule->tuples.vlan_tag1 = match.key->vlan_id |
7251 				(match.key->vlan_priority << VLAN_PRIO_SHIFT);
7252 		rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7253 				(match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7254 	} else {
7255 		rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7256 	}
7257 }
7258 
7259 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7260 				 struct hclge_fd_rule *rule)
7261 {
7262 	u16 addr_type = 0;
7263 
7264 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7265 		struct flow_match_control match;
7266 
7267 		flow_rule_match_control(flow, &match);
7268 		addr_type = match.key->addr_type;
7269 	}
7270 
7271 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7272 		struct flow_match_ipv4_addrs match;
7273 
7274 		flow_rule_match_ipv4_addrs(flow, &match);
7275 		rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7276 		rule->tuples_mask.src_ip[IPV4_INDEX] =
7277 						be32_to_cpu(match.mask->src);
7278 		rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7279 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
7280 						be32_to_cpu(match.mask->dst);
7281 	} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7282 		struct flow_match_ipv6_addrs match;
7283 
7284 		flow_rule_match_ipv6_addrs(flow, &match);
7285 		be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7286 				  IPV6_SIZE);
7287 		be32_to_cpu_array(rule->tuples_mask.src_ip,
7288 				  match.mask->src.s6_addr32, IPV6_SIZE);
7289 		be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7290 				  IPV6_SIZE);
7291 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
7292 				  match.mask->dst.s6_addr32, IPV6_SIZE);
7293 	} else {
7294 		rule->unused_tuple |= BIT(INNER_SRC_IP);
7295 		rule->unused_tuple |= BIT(INNER_DST_IP);
7296 	}
7297 }
7298 
7299 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7300 				   struct hclge_fd_rule *rule)
7301 {
7302 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7303 		struct flow_match_ports match;
7304 
7305 		flow_rule_match_ports(flow, &match);
7306 
7307 		rule->tuples.src_port = be16_to_cpu(match.key->src);
7308 		rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7309 		rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7310 		rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7311 	} else {
7312 		rule->unused_tuple |= BIT(INNER_SRC_PORT);
7313 		rule->unused_tuple |= BIT(INNER_DST_PORT);
7314 	}
7315 }
7316 
7317 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7318 				  struct flow_cls_offload *cls_flower,
7319 				  struct hclge_fd_rule *rule)
7320 {
7321 	struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7322 	struct flow_dissector *dissector = flow->match.dissector;
7323 
7324 	if (dissector->used_keys &
7325 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7326 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
7327 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7328 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
7329 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7330 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7331 	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7332 		dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7333 			dissector->used_keys);
7334 		return -EOPNOTSUPP;
7335 	}
7336 
7337 	hclge_get_cls_key_basic(flow, rule);
7338 	hclge_get_cls_key_mac(flow, rule);
7339 	hclge_get_cls_key_vlan(flow, rule);
7340 	hclge_get_cls_key_ip(flow, rule);
7341 	hclge_get_cls_key_port(flow, rule);
7342 
7343 	return 0;
7344 }
7345 
7346 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7347 				  struct flow_cls_offload *cls_flower, int tc)
7348 {
7349 	u32 prio = cls_flower->common.prio;
7350 
7351 	if (tc < 0 || tc > hdev->tc_max) {
7352 		dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7353 		return -EINVAL;
7354 	}
7355 
7356 	if (prio == 0 ||
7357 	    prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7358 		dev_err(&hdev->pdev->dev,
7359 			"prio %u should be in range[1, %u]\n",
7360 			prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7361 		return -EINVAL;
7362 	}
7363 
7364 	if (test_bit(prio - 1, hdev->fd_bmap)) {
7365 		dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7366 		return -EINVAL;
7367 	}
7368 	return 0;
7369 }
7370 
7371 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7372 				struct flow_cls_offload *cls_flower,
7373 				int tc)
7374 {
7375 	struct hclge_vport *vport = hclge_get_vport(handle);
7376 	struct hclge_dev *hdev = vport->back;
7377 	struct hclge_fd_rule *rule;
7378 	int ret;
7379 
7380 	ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7381 	if (ret) {
7382 		dev_err(&hdev->pdev->dev,
7383 			"failed to check cls flower params, ret = %d\n", ret);
7384 		return ret;
7385 	}
7386 
7387 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7388 	if (!rule)
7389 		return -ENOMEM;
7390 
7391 	ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7392 	if (ret) {
7393 		kfree(rule);
7394 		return ret;
7395 	}
7396 
7397 	rule->action = HCLGE_FD_ACTION_SELECT_TC;
7398 	rule->cls_flower.tc = tc;
7399 	rule->location = cls_flower->common.prio - 1;
7400 	rule->vf_id = 0;
7401 	rule->cls_flower.cookie = cls_flower->cookie;
7402 	rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7403 
7404 	ret = hclge_add_fd_entry_common(hdev, rule);
7405 	if (ret)
7406 		kfree(rule);
7407 
7408 	return ret;
7409 }
7410 
7411 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7412 						   unsigned long cookie)
7413 {
7414 	struct hclge_fd_rule *rule;
7415 	struct hlist_node *node;
7416 
7417 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7418 		if (rule->cls_flower.cookie == cookie)
7419 			return rule;
7420 	}
7421 
7422 	return NULL;
7423 }
7424 
7425 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7426 				struct flow_cls_offload *cls_flower)
7427 {
7428 	struct hclge_vport *vport = hclge_get_vport(handle);
7429 	struct hclge_dev *hdev = vport->back;
7430 	struct hclge_fd_rule *rule;
7431 	int ret;
7432 
7433 	spin_lock_bh(&hdev->fd_rule_lock);
7434 
7435 	rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7436 	if (!rule) {
7437 		spin_unlock_bh(&hdev->fd_rule_lock);
7438 		return -EINVAL;
7439 	}
7440 
7441 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7442 				   NULL, false);
7443 	if (ret) {
7444 		spin_unlock_bh(&hdev->fd_rule_lock);
7445 		return ret;
7446 	}
7447 
7448 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7449 	spin_unlock_bh(&hdev->fd_rule_lock);
7450 
7451 	return 0;
7452 }
7453 
7454 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7455 {
7456 	struct hclge_fd_rule *rule;
7457 	struct hlist_node *node;
7458 	int ret = 0;
7459 
7460 	if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7461 		return;
7462 
7463 	spin_lock_bh(&hdev->fd_rule_lock);
7464 
7465 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7466 		switch (rule->state) {
7467 		case HCLGE_FD_TO_ADD:
7468 			ret = hclge_fd_config_rule(hdev, rule);
7469 			if (ret)
7470 				goto out;
7471 			rule->state = HCLGE_FD_ACTIVE;
7472 			break;
7473 		case HCLGE_FD_TO_DEL:
7474 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7475 						   rule->location, NULL, false);
7476 			if (ret)
7477 				goto out;
7478 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7479 			hclge_fd_free_node(hdev, rule);
7480 			break;
7481 		default:
7482 			break;
7483 		}
7484 	}
7485 
7486 out:
7487 	if (ret)
7488 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7489 
7490 	spin_unlock_bh(&hdev->fd_rule_lock);
7491 }
7492 
7493 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7494 {
7495 	if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7496 		bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7497 
7498 		hclge_clear_fd_rules_in_list(hdev, clear_list);
7499 	}
7500 
7501 	hclge_sync_fd_user_def_cfg(hdev, false);
7502 
7503 	hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7504 }
7505 
7506 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7507 {
7508 	struct hclge_vport *vport = hclge_get_vport(handle);
7509 	struct hclge_dev *hdev = vport->back;
7510 
7511 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7512 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7513 }
7514 
7515 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7516 {
7517 	struct hclge_vport *vport = hclge_get_vport(handle);
7518 	struct hclge_dev *hdev = vport->back;
7519 
7520 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7521 }
7522 
7523 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7524 {
7525 	struct hclge_vport *vport = hclge_get_vport(handle);
7526 	struct hclge_dev *hdev = vport->back;
7527 
7528 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7529 }
7530 
7531 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7532 {
7533 	struct hclge_vport *vport = hclge_get_vport(handle);
7534 	struct hclge_dev *hdev = vport->back;
7535 
7536 	return hdev->rst_stats.hw_reset_done_cnt;
7537 }
7538 
7539 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7540 {
7541 	struct hclge_vport *vport = hclge_get_vport(handle);
7542 	struct hclge_dev *hdev = vport->back;
7543 
7544 	hdev->fd_en = enable;
7545 
7546 	if (!enable)
7547 		set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7548 	else
7549 		hclge_restore_fd_entries(handle);
7550 
7551 	hclge_task_schedule(hdev, 0);
7552 }
7553 
7554 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7555 {
7556 	struct hclge_desc desc;
7557 	struct hclge_config_mac_mode_cmd *req =
7558 		(struct hclge_config_mac_mode_cmd *)desc.data;
7559 	u32 loop_en = 0;
7560 	int ret;
7561 
7562 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7563 
7564 	if (enable) {
7565 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7566 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7567 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7568 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7569 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7570 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7571 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7572 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7573 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7574 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7575 	}
7576 
7577 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7578 
7579 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7580 	if (ret)
7581 		dev_err(&hdev->pdev->dev,
7582 			"mac enable fail, ret =%d.\n", ret);
7583 }
7584 
7585 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7586 				     u8 switch_param, u8 param_mask)
7587 {
7588 	struct hclge_mac_vlan_switch_cmd *req;
7589 	struct hclge_desc desc;
7590 	u32 func_id;
7591 	int ret;
7592 
7593 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7594 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7595 
7596 	/* read current config parameter */
7597 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7598 				   true);
7599 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7600 	req->func_id = cpu_to_le32(func_id);
7601 
7602 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7603 	if (ret) {
7604 		dev_err(&hdev->pdev->dev,
7605 			"read mac vlan switch parameter fail, ret = %d\n", ret);
7606 		return ret;
7607 	}
7608 
7609 	/* modify and write new config parameter */
7610 	hclge_cmd_reuse_desc(&desc, false);
7611 	req->switch_param = (req->switch_param & param_mask) | switch_param;
7612 	req->param_mask = param_mask;
7613 
7614 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7615 	if (ret)
7616 		dev_err(&hdev->pdev->dev,
7617 			"set mac vlan switch parameter fail, ret = %d\n", ret);
7618 	return ret;
7619 }
7620 
7621 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7622 				       int link_ret)
7623 {
7624 #define HCLGE_PHY_LINK_STATUS_NUM  200
7625 
7626 	struct phy_device *phydev = hdev->hw.mac.phydev;
7627 	int i = 0;
7628 	int ret;
7629 
7630 	do {
7631 		ret = phy_read_status(phydev);
7632 		if (ret) {
7633 			dev_err(&hdev->pdev->dev,
7634 				"phy update link status fail, ret = %d\n", ret);
7635 			return;
7636 		}
7637 
7638 		if (phydev->link == link_ret)
7639 			break;
7640 
7641 		msleep(HCLGE_LINK_STATUS_MS);
7642 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7643 }
7644 
7645 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7646 {
7647 #define HCLGE_MAC_LINK_STATUS_NUM  100
7648 
7649 	int link_status;
7650 	int i = 0;
7651 	int ret;
7652 
7653 	do {
7654 		ret = hclge_get_mac_link_status(hdev, &link_status);
7655 		if (ret)
7656 			return ret;
7657 		if (link_status == link_ret)
7658 			return 0;
7659 
7660 		msleep(HCLGE_LINK_STATUS_MS);
7661 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7662 	return -EBUSY;
7663 }
7664 
7665 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7666 					  bool is_phy)
7667 {
7668 	int link_ret;
7669 
7670 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7671 
7672 	if (is_phy)
7673 		hclge_phy_link_status_wait(hdev, link_ret);
7674 
7675 	return hclge_mac_link_status_wait(hdev, link_ret);
7676 }
7677 
7678 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7679 {
7680 	struct hclge_config_mac_mode_cmd *req;
7681 	struct hclge_desc desc;
7682 	u32 loop_en;
7683 	int ret;
7684 
7685 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7686 	/* 1 Read out the MAC mode config at first */
7687 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7688 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7689 	if (ret) {
7690 		dev_err(&hdev->pdev->dev,
7691 			"mac loopback get fail, ret =%d.\n", ret);
7692 		return ret;
7693 	}
7694 
7695 	/* 2 Then setup the loopback flag */
7696 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7697 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7698 
7699 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7700 
7701 	/* 3 Config mac work mode with loopback flag
7702 	 * and its original configure parameters
7703 	 */
7704 	hclge_cmd_reuse_desc(&desc, false);
7705 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7706 	if (ret)
7707 		dev_err(&hdev->pdev->dev,
7708 			"mac loopback set fail, ret =%d.\n", ret);
7709 	return ret;
7710 }
7711 
7712 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7713 				     enum hnae3_loop loop_mode)
7714 {
7715 #define HCLGE_COMMON_LB_RETRY_MS	10
7716 #define HCLGE_COMMON_LB_RETRY_NUM	100
7717 
7718 	struct hclge_common_lb_cmd *req;
7719 	struct hclge_desc desc;
7720 	int ret, i = 0;
7721 	u8 loop_mode_b;
7722 
7723 	req = (struct hclge_common_lb_cmd *)desc.data;
7724 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7725 
7726 	switch (loop_mode) {
7727 	case HNAE3_LOOP_SERIAL_SERDES:
7728 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7729 		break;
7730 	case HNAE3_LOOP_PARALLEL_SERDES:
7731 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7732 		break;
7733 	case HNAE3_LOOP_PHY:
7734 		loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7735 		break;
7736 	default:
7737 		dev_err(&hdev->pdev->dev,
7738 			"unsupported common loopback mode %d\n", loop_mode);
7739 		return -ENOTSUPP;
7740 	}
7741 
7742 	if (en) {
7743 		req->enable = loop_mode_b;
7744 		req->mask = loop_mode_b;
7745 	} else {
7746 		req->mask = loop_mode_b;
7747 	}
7748 
7749 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7750 	if (ret) {
7751 		dev_err(&hdev->pdev->dev,
7752 			"common loopback set fail, ret = %d\n", ret);
7753 		return ret;
7754 	}
7755 
7756 	do {
7757 		msleep(HCLGE_COMMON_LB_RETRY_MS);
7758 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7759 					   true);
7760 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7761 		if (ret) {
7762 			dev_err(&hdev->pdev->dev,
7763 				"common loopback get, ret = %d\n", ret);
7764 			return ret;
7765 		}
7766 	} while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7767 		 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7768 
7769 	if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7770 		dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7771 		return -EBUSY;
7772 	} else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7773 		dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7774 		return -EIO;
7775 	}
7776 	return ret;
7777 }
7778 
7779 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7780 				     enum hnae3_loop loop_mode)
7781 {
7782 	int ret;
7783 
7784 	ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7785 	if (ret)
7786 		return ret;
7787 
7788 	hclge_cfg_mac_mode(hdev, en);
7789 
7790 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7791 	if (ret)
7792 		dev_err(&hdev->pdev->dev,
7793 			"serdes loopback config mac mode timeout\n");
7794 
7795 	return ret;
7796 }
7797 
7798 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7799 				     struct phy_device *phydev)
7800 {
7801 	int ret;
7802 
7803 	if (!phydev->suspended) {
7804 		ret = phy_suspend(phydev);
7805 		if (ret)
7806 			return ret;
7807 	}
7808 
7809 	ret = phy_resume(phydev);
7810 	if (ret)
7811 		return ret;
7812 
7813 	return phy_loopback(phydev, true);
7814 }
7815 
7816 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7817 				      struct phy_device *phydev)
7818 {
7819 	int ret;
7820 
7821 	ret = phy_loopback(phydev, false);
7822 	if (ret)
7823 		return ret;
7824 
7825 	return phy_suspend(phydev);
7826 }
7827 
7828 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7829 {
7830 	struct phy_device *phydev = hdev->hw.mac.phydev;
7831 	int ret;
7832 
7833 	if (!phydev) {
7834 		if (hnae3_dev_phy_imp_supported(hdev))
7835 			return hclge_set_common_loopback(hdev, en,
7836 							 HNAE3_LOOP_PHY);
7837 		return -ENOTSUPP;
7838 	}
7839 
7840 	if (en)
7841 		ret = hclge_enable_phy_loopback(hdev, phydev);
7842 	else
7843 		ret = hclge_disable_phy_loopback(hdev, phydev);
7844 	if (ret) {
7845 		dev_err(&hdev->pdev->dev,
7846 			"set phy loopback fail, ret = %d\n", ret);
7847 		return ret;
7848 	}
7849 
7850 	hclge_cfg_mac_mode(hdev, en);
7851 
7852 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7853 	if (ret)
7854 		dev_err(&hdev->pdev->dev,
7855 			"phy loopback config mac mode timeout\n");
7856 
7857 	return ret;
7858 }
7859 
7860 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7861 				     u16 stream_id, bool enable)
7862 {
7863 	struct hclge_desc desc;
7864 	struct hclge_cfg_com_tqp_queue_cmd *req =
7865 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7866 
7867 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7868 	req->tqp_id = cpu_to_le16(tqp_id);
7869 	req->stream_id = cpu_to_le16(stream_id);
7870 	if (enable)
7871 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7872 
7873 	return hclge_cmd_send(&hdev->hw, &desc, 1);
7874 }
7875 
7876 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7877 {
7878 	struct hclge_vport *vport = hclge_get_vport(handle);
7879 	struct hclge_dev *hdev = vport->back;
7880 	int ret;
7881 	u16 i;
7882 
7883 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
7884 		ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7885 		if (ret)
7886 			return ret;
7887 	}
7888 	return 0;
7889 }
7890 
7891 static int hclge_set_loopback(struct hnae3_handle *handle,
7892 			      enum hnae3_loop loop_mode, bool en)
7893 {
7894 	struct hclge_vport *vport = hclge_get_vport(handle);
7895 	struct hclge_dev *hdev = vport->back;
7896 	int ret;
7897 
7898 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7899 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7900 	 * the same, the packets are looped back in the SSU. If SSU loopback
7901 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7902 	 */
7903 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7904 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7905 
7906 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7907 						HCLGE_SWITCH_ALW_LPBK_MASK);
7908 		if (ret)
7909 			return ret;
7910 	}
7911 
7912 	switch (loop_mode) {
7913 	case HNAE3_LOOP_APP:
7914 		ret = hclge_set_app_loopback(hdev, en);
7915 		break;
7916 	case HNAE3_LOOP_SERIAL_SERDES:
7917 	case HNAE3_LOOP_PARALLEL_SERDES:
7918 		ret = hclge_set_common_loopback(hdev, en, loop_mode);
7919 		break;
7920 	case HNAE3_LOOP_PHY:
7921 		ret = hclge_set_phy_loopback(hdev, en);
7922 		break;
7923 	default:
7924 		ret = -ENOTSUPP;
7925 		dev_err(&hdev->pdev->dev,
7926 			"loop_mode %d is not supported\n", loop_mode);
7927 		break;
7928 	}
7929 
7930 	if (ret)
7931 		return ret;
7932 
7933 	ret = hclge_tqp_enable(handle, en);
7934 	if (ret)
7935 		dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
7936 			en ? "enable" : "disable", ret);
7937 
7938 	return ret;
7939 }
7940 
7941 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7942 {
7943 	int ret;
7944 
7945 	ret = hclge_set_app_loopback(hdev, false);
7946 	if (ret)
7947 		return ret;
7948 
7949 	ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7950 	if (ret)
7951 		return ret;
7952 
7953 	return hclge_cfg_common_loopback(hdev, false,
7954 					 HNAE3_LOOP_PARALLEL_SERDES);
7955 }
7956 
7957 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7958 {
7959 	struct hclge_vport *vport = hclge_get_vport(handle);
7960 	struct hnae3_knic_private_info *kinfo;
7961 	struct hnae3_queue *queue;
7962 	struct hclge_tqp *tqp;
7963 	int i;
7964 
7965 	kinfo = &vport->nic.kinfo;
7966 	for (i = 0; i < kinfo->num_tqps; i++) {
7967 		queue = handle->kinfo.tqp[i];
7968 		tqp = container_of(queue, struct hclge_tqp, q);
7969 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7970 	}
7971 }
7972 
7973 static void hclge_flush_link_update(struct hclge_dev *hdev)
7974 {
7975 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
7976 
7977 	unsigned long last = hdev->serv_processed_cnt;
7978 	int i = 0;
7979 
7980 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7981 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7982 	       last == hdev->serv_processed_cnt)
7983 		usleep_range(1, 1);
7984 }
7985 
7986 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7987 {
7988 	struct hclge_vport *vport = hclge_get_vport(handle);
7989 	struct hclge_dev *hdev = vport->back;
7990 
7991 	if (enable) {
7992 		hclge_task_schedule(hdev, 0);
7993 	} else {
7994 		/* Set the DOWN flag here to disable link updating */
7995 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
7996 
7997 		/* flush memory to make sure DOWN is seen by service task */
7998 		smp_mb__before_atomic();
7999 		hclge_flush_link_update(hdev);
8000 	}
8001 }
8002 
8003 static int hclge_ae_start(struct hnae3_handle *handle)
8004 {
8005 	struct hclge_vport *vport = hclge_get_vport(handle);
8006 	struct hclge_dev *hdev = vport->back;
8007 
8008 	/* mac enable */
8009 	hclge_cfg_mac_mode(hdev, true);
8010 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8011 	hdev->hw.mac.link = 0;
8012 
8013 	/* reset tqp stats */
8014 	hclge_reset_tqp_stats(handle);
8015 
8016 	hclge_mac_start_phy(hdev);
8017 
8018 	return 0;
8019 }
8020 
8021 static void hclge_ae_stop(struct hnae3_handle *handle)
8022 {
8023 	struct hclge_vport *vport = hclge_get_vport(handle);
8024 	struct hclge_dev *hdev = vport->back;
8025 
8026 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8027 	spin_lock_bh(&hdev->fd_rule_lock);
8028 	hclge_clear_arfs_rules(hdev);
8029 	spin_unlock_bh(&hdev->fd_rule_lock);
8030 
8031 	/* If it is not PF reset, the firmware will disable the MAC,
8032 	 * so it only need to stop phy here.
8033 	 */
8034 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8035 	    hdev->reset_type != HNAE3_FUNC_RESET) {
8036 		hclge_mac_stop_phy(hdev);
8037 		hclge_update_link_status(hdev);
8038 		return;
8039 	}
8040 
8041 	hclge_reset_tqp(handle);
8042 
8043 	hclge_config_mac_tnl_int(hdev, false);
8044 
8045 	/* Mac disable */
8046 	hclge_cfg_mac_mode(hdev, false);
8047 
8048 	hclge_mac_stop_phy(hdev);
8049 
8050 	/* reset tqp stats */
8051 	hclge_reset_tqp_stats(handle);
8052 	hclge_update_link_status(hdev);
8053 }
8054 
8055 int hclge_vport_start(struct hclge_vport *vport)
8056 {
8057 	struct hclge_dev *hdev = vport->back;
8058 
8059 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8060 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8061 	vport->last_active_jiffies = jiffies;
8062 
8063 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8064 		if (vport->vport_id) {
8065 			hclge_restore_mac_table_common(vport);
8066 			hclge_restore_vport_vlan_table(vport);
8067 		} else {
8068 			hclge_restore_hw_table(hdev);
8069 		}
8070 	}
8071 
8072 	clear_bit(vport->vport_id, hdev->vport_config_block);
8073 
8074 	return 0;
8075 }
8076 
8077 void hclge_vport_stop(struct hclge_vport *vport)
8078 {
8079 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8080 }
8081 
8082 static int hclge_client_start(struct hnae3_handle *handle)
8083 {
8084 	struct hclge_vport *vport = hclge_get_vport(handle);
8085 
8086 	return hclge_vport_start(vport);
8087 }
8088 
8089 static void hclge_client_stop(struct hnae3_handle *handle)
8090 {
8091 	struct hclge_vport *vport = hclge_get_vport(handle);
8092 
8093 	hclge_vport_stop(vport);
8094 }
8095 
8096 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8097 					 u16 cmdq_resp, u8  resp_code,
8098 					 enum hclge_mac_vlan_tbl_opcode op)
8099 {
8100 	struct hclge_dev *hdev = vport->back;
8101 
8102 	if (cmdq_resp) {
8103 		dev_err(&hdev->pdev->dev,
8104 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8105 			cmdq_resp);
8106 		return -EIO;
8107 	}
8108 
8109 	if (op == HCLGE_MAC_VLAN_ADD) {
8110 		if (!resp_code || resp_code == 1)
8111 			return 0;
8112 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8113 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
8114 			return -ENOSPC;
8115 
8116 		dev_err(&hdev->pdev->dev,
8117 			"add mac addr failed for undefined, code=%u.\n",
8118 			resp_code);
8119 		return -EIO;
8120 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
8121 		if (!resp_code) {
8122 			return 0;
8123 		} else if (resp_code == 1) {
8124 			dev_dbg(&hdev->pdev->dev,
8125 				"remove mac addr failed for miss.\n");
8126 			return -ENOENT;
8127 		}
8128 
8129 		dev_err(&hdev->pdev->dev,
8130 			"remove mac addr failed for undefined, code=%u.\n",
8131 			resp_code);
8132 		return -EIO;
8133 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
8134 		if (!resp_code) {
8135 			return 0;
8136 		} else if (resp_code == 1) {
8137 			dev_dbg(&hdev->pdev->dev,
8138 				"lookup mac addr failed for miss.\n");
8139 			return -ENOENT;
8140 		}
8141 
8142 		dev_err(&hdev->pdev->dev,
8143 			"lookup mac addr failed for undefined, code=%u.\n",
8144 			resp_code);
8145 		return -EIO;
8146 	}
8147 
8148 	dev_err(&hdev->pdev->dev,
8149 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8150 
8151 	return -EINVAL;
8152 }
8153 
8154 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8155 {
8156 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8157 
8158 	unsigned int word_num;
8159 	unsigned int bit_num;
8160 
8161 	if (vfid > 255 || vfid < 0)
8162 		return -EIO;
8163 
8164 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8165 		word_num = vfid / 32;
8166 		bit_num  = vfid % 32;
8167 		if (clr)
8168 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8169 		else
8170 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8171 	} else {
8172 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8173 		bit_num  = vfid % 32;
8174 		if (clr)
8175 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8176 		else
8177 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8178 	}
8179 
8180 	return 0;
8181 }
8182 
8183 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8184 {
8185 #define HCLGE_DESC_NUMBER 3
8186 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8187 	int i, j;
8188 
8189 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8190 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8191 			if (desc[i].data[j])
8192 				return false;
8193 
8194 	return true;
8195 }
8196 
8197 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8198 				   const u8 *addr, bool is_mc)
8199 {
8200 	const unsigned char *mac_addr = addr;
8201 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8202 		       (mac_addr[0]) | (mac_addr[1] << 8);
8203 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8204 
8205 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8206 	if (is_mc) {
8207 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8208 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8209 	}
8210 
8211 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8212 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8213 }
8214 
8215 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8216 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
8217 {
8218 	struct hclge_dev *hdev = vport->back;
8219 	struct hclge_desc desc;
8220 	u8 resp_code;
8221 	u16 retval;
8222 	int ret;
8223 
8224 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8225 
8226 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8227 
8228 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8229 	if (ret) {
8230 		dev_err(&hdev->pdev->dev,
8231 			"del mac addr failed for cmd_send, ret =%d.\n",
8232 			ret);
8233 		return ret;
8234 	}
8235 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8236 	retval = le16_to_cpu(desc.retval);
8237 
8238 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8239 					     HCLGE_MAC_VLAN_REMOVE);
8240 }
8241 
8242 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8243 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
8244 				     struct hclge_desc *desc,
8245 				     bool is_mc)
8246 {
8247 	struct hclge_dev *hdev = vport->back;
8248 	u8 resp_code;
8249 	u16 retval;
8250 	int ret;
8251 
8252 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8253 	if (is_mc) {
8254 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8255 		memcpy(desc[0].data,
8256 		       req,
8257 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8258 		hclge_cmd_setup_basic_desc(&desc[1],
8259 					   HCLGE_OPC_MAC_VLAN_ADD,
8260 					   true);
8261 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8262 		hclge_cmd_setup_basic_desc(&desc[2],
8263 					   HCLGE_OPC_MAC_VLAN_ADD,
8264 					   true);
8265 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
8266 	} else {
8267 		memcpy(desc[0].data,
8268 		       req,
8269 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8270 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
8271 	}
8272 	if (ret) {
8273 		dev_err(&hdev->pdev->dev,
8274 			"lookup mac addr failed for cmd_send, ret =%d.\n",
8275 			ret);
8276 		return ret;
8277 	}
8278 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8279 	retval = le16_to_cpu(desc[0].retval);
8280 
8281 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8282 					     HCLGE_MAC_VLAN_LKUP);
8283 }
8284 
8285 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8286 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
8287 				  struct hclge_desc *mc_desc)
8288 {
8289 	struct hclge_dev *hdev = vport->back;
8290 	int cfg_status;
8291 	u8 resp_code;
8292 	u16 retval;
8293 	int ret;
8294 
8295 	if (!mc_desc) {
8296 		struct hclge_desc desc;
8297 
8298 		hclge_cmd_setup_basic_desc(&desc,
8299 					   HCLGE_OPC_MAC_VLAN_ADD,
8300 					   false);
8301 		memcpy(desc.data, req,
8302 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8303 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8304 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8305 		retval = le16_to_cpu(desc.retval);
8306 
8307 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8308 							   resp_code,
8309 							   HCLGE_MAC_VLAN_ADD);
8310 	} else {
8311 		hclge_cmd_reuse_desc(&mc_desc[0], false);
8312 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8313 		hclge_cmd_reuse_desc(&mc_desc[1], false);
8314 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8315 		hclge_cmd_reuse_desc(&mc_desc[2], false);
8316 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8317 		memcpy(mc_desc[0].data, req,
8318 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8319 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8320 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8321 		retval = le16_to_cpu(mc_desc[0].retval);
8322 
8323 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8324 							   resp_code,
8325 							   HCLGE_MAC_VLAN_ADD);
8326 	}
8327 
8328 	if (ret) {
8329 		dev_err(&hdev->pdev->dev,
8330 			"add mac addr failed for cmd_send, ret =%d.\n",
8331 			ret);
8332 		return ret;
8333 	}
8334 
8335 	return cfg_status;
8336 }
8337 
8338 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8339 			       u16 *allocated_size)
8340 {
8341 	struct hclge_umv_spc_alc_cmd *req;
8342 	struct hclge_desc desc;
8343 	int ret;
8344 
8345 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8346 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8347 
8348 	req->space_size = cpu_to_le32(space_size);
8349 
8350 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8351 	if (ret) {
8352 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8353 			ret);
8354 		return ret;
8355 	}
8356 
8357 	*allocated_size = le32_to_cpu(desc.data[1]);
8358 
8359 	return 0;
8360 }
8361 
8362 static int hclge_init_umv_space(struct hclge_dev *hdev)
8363 {
8364 	u16 allocated_size = 0;
8365 	int ret;
8366 
8367 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8368 	if (ret)
8369 		return ret;
8370 
8371 	if (allocated_size < hdev->wanted_umv_size)
8372 		dev_warn(&hdev->pdev->dev,
8373 			 "failed to alloc umv space, want %u, get %u\n",
8374 			 hdev->wanted_umv_size, allocated_size);
8375 
8376 	hdev->max_umv_size = allocated_size;
8377 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8378 	hdev->share_umv_size = hdev->priv_umv_size +
8379 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8380 
8381 	return 0;
8382 }
8383 
8384 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8385 {
8386 	struct hclge_vport *vport;
8387 	int i;
8388 
8389 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8390 		vport = &hdev->vport[i];
8391 		vport->used_umv_num = 0;
8392 	}
8393 
8394 	mutex_lock(&hdev->vport_lock);
8395 	hdev->share_umv_size = hdev->priv_umv_size +
8396 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8397 	mutex_unlock(&hdev->vport_lock);
8398 }
8399 
8400 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8401 {
8402 	struct hclge_dev *hdev = vport->back;
8403 	bool is_full;
8404 
8405 	if (need_lock)
8406 		mutex_lock(&hdev->vport_lock);
8407 
8408 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8409 		   hdev->share_umv_size == 0);
8410 
8411 	if (need_lock)
8412 		mutex_unlock(&hdev->vport_lock);
8413 
8414 	return is_full;
8415 }
8416 
8417 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8418 {
8419 	struct hclge_dev *hdev = vport->back;
8420 
8421 	if (is_free) {
8422 		if (vport->used_umv_num > hdev->priv_umv_size)
8423 			hdev->share_umv_size++;
8424 
8425 		if (vport->used_umv_num > 0)
8426 			vport->used_umv_num--;
8427 	} else {
8428 		if (vport->used_umv_num >= hdev->priv_umv_size &&
8429 		    hdev->share_umv_size > 0)
8430 			hdev->share_umv_size--;
8431 		vport->used_umv_num++;
8432 	}
8433 }
8434 
8435 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8436 						  const u8 *mac_addr)
8437 {
8438 	struct hclge_mac_node *mac_node, *tmp;
8439 
8440 	list_for_each_entry_safe(mac_node, tmp, list, node)
8441 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8442 			return mac_node;
8443 
8444 	return NULL;
8445 }
8446 
8447 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8448 				  enum HCLGE_MAC_NODE_STATE state)
8449 {
8450 	switch (state) {
8451 	/* from set_rx_mode or tmp_add_list */
8452 	case HCLGE_MAC_TO_ADD:
8453 		if (mac_node->state == HCLGE_MAC_TO_DEL)
8454 			mac_node->state = HCLGE_MAC_ACTIVE;
8455 		break;
8456 	/* only from set_rx_mode */
8457 	case HCLGE_MAC_TO_DEL:
8458 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
8459 			list_del(&mac_node->node);
8460 			kfree(mac_node);
8461 		} else {
8462 			mac_node->state = HCLGE_MAC_TO_DEL;
8463 		}
8464 		break;
8465 	/* only from tmp_add_list, the mac_node->state won't be
8466 	 * ACTIVE.
8467 	 */
8468 	case HCLGE_MAC_ACTIVE:
8469 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8470 			mac_node->state = HCLGE_MAC_ACTIVE;
8471 
8472 		break;
8473 	}
8474 }
8475 
8476 int hclge_update_mac_list(struct hclge_vport *vport,
8477 			  enum HCLGE_MAC_NODE_STATE state,
8478 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
8479 			  const unsigned char *addr)
8480 {
8481 	struct hclge_dev *hdev = vport->back;
8482 	struct hclge_mac_node *mac_node;
8483 	struct list_head *list;
8484 
8485 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8486 		&vport->uc_mac_list : &vport->mc_mac_list;
8487 
8488 	spin_lock_bh(&vport->mac_list_lock);
8489 
8490 	/* if the mac addr is already in the mac list, no need to add a new
8491 	 * one into it, just check the mac addr state, convert it to a new
8492 	 * state, or just remove it, or do nothing.
8493 	 */
8494 	mac_node = hclge_find_mac_node(list, addr);
8495 	if (mac_node) {
8496 		hclge_update_mac_node(mac_node, state);
8497 		spin_unlock_bh(&vport->mac_list_lock);
8498 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8499 		return 0;
8500 	}
8501 
8502 	/* if this address is never added, unnecessary to delete */
8503 	if (state == HCLGE_MAC_TO_DEL) {
8504 		spin_unlock_bh(&vport->mac_list_lock);
8505 		dev_err(&hdev->pdev->dev,
8506 			"failed to delete address %pM from mac list\n",
8507 			addr);
8508 		return -ENOENT;
8509 	}
8510 
8511 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8512 	if (!mac_node) {
8513 		spin_unlock_bh(&vport->mac_list_lock);
8514 		return -ENOMEM;
8515 	}
8516 
8517 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8518 
8519 	mac_node->state = state;
8520 	ether_addr_copy(mac_node->mac_addr, addr);
8521 	list_add_tail(&mac_node->node, list);
8522 
8523 	spin_unlock_bh(&vport->mac_list_lock);
8524 
8525 	return 0;
8526 }
8527 
8528 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8529 			     const unsigned char *addr)
8530 {
8531 	struct hclge_vport *vport = hclge_get_vport(handle);
8532 
8533 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8534 				     addr);
8535 }
8536 
8537 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8538 			     const unsigned char *addr)
8539 {
8540 	struct hclge_dev *hdev = vport->back;
8541 	struct hclge_mac_vlan_tbl_entry_cmd req;
8542 	struct hclge_desc desc;
8543 	u16 egress_port = 0;
8544 	int ret;
8545 
8546 	/* mac addr check */
8547 	if (is_zero_ether_addr(addr) ||
8548 	    is_broadcast_ether_addr(addr) ||
8549 	    is_multicast_ether_addr(addr)) {
8550 		dev_err(&hdev->pdev->dev,
8551 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8552 			 addr, is_zero_ether_addr(addr),
8553 			 is_broadcast_ether_addr(addr),
8554 			 is_multicast_ether_addr(addr));
8555 		return -EINVAL;
8556 	}
8557 
8558 	memset(&req, 0, sizeof(req));
8559 
8560 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8561 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8562 
8563 	req.egress_port = cpu_to_le16(egress_port);
8564 
8565 	hclge_prepare_mac_addr(&req, addr, false);
8566 
8567 	/* Lookup the mac address in the mac_vlan table, and add
8568 	 * it if the entry is inexistent. Repeated unicast entry
8569 	 * is not allowed in the mac vlan table.
8570 	 */
8571 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8572 	if (ret == -ENOENT) {
8573 		mutex_lock(&hdev->vport_lock);
8574 		if (!hclge_is_umv_space_full(vport, false)) {
8575 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8576 			if (!ret)
8577 				hclge_update_umv_space(vport, false);
8578 			mutex_unlock(&hdev->vport_lock);
8579 			return ret;
8580 		}
8581 		mutex_unlock(&hdev->vport_lock);
8582 
8583 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8584 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8585 				hdev->priv_umv_size);
8586 
8587 		return -ENOSPC;
8588 	}
8589 
8590 	/* check if we just hit the duplicate */
8591 	if (!ret) {
8592 		dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8593 			 vport->vport_id, addr);
8594 		return 0;
8595 	}
8596 
8597 	dev_err(&hdev->pdev->dev,
8598 		"PF failed to add unicast entry(%pM) in the MAC table\n",
8599 		addr);
8600 
8601 	return ret;
8602 }
8603 
8604 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8605 			    const unsigned char *addr)
8606 {
8607 	struct hclge_vport *vport = hclge_get_vport(handle);
8608 
8609 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8610 				     addr);
8611 }
8612 
8613 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8614 			    const unsigned char *addr)
8615 {
8616 	struct hclge_dev *hdev = vport->back;
8617 	struct hclge_mac_vlan_tbl_entry_cmd req;
8618 	int ret;
8619 
8620 	/* mac addr check */
8621 	if (is_zero_ether_addr(addr) ||
8622 	    is_broadcast_ether_addr(addr) ||
8623 	    is_multicast_ether_addr(addr)) {
8624 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8625 			addr);
8626 		return -EINVAL;
8627 	}
8628 
8629 	memset(&req, 0, sizeof(req));
8630 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8631 	hclge_prepare_mac_addr(&req, addr, false);
8632 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
8633 	if (!ret) {
8634 		mutex_lock(&hdev->vport_lock);
8635 		hclge_update_umv_space(vport, true);
8636 		mutex_unlock(&hdev->vport_lock);
8637 	} else if (ret == -ENOENT) {
8638 		ret = 0;
8639 	}
8640 
8641 	return ret;
8642 }
8643 
8644 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8645 			     const unsigned char *addr)
8646 {
8647 	struct hclge_vport *vport = hclge_get_vport(handle);
8648 
8649 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8650 				     addr);
8651 }
8652 
8653 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8654 			     const unsigned char *addr)
8655 {
8656 	struct hclge_dev *hdev = vport->back;
8657 	struct hclge_mac_vlan_tbl_entry_cmd req;
8658 	struct hclge_desc desc[3];
8659 	int status;
8660 
8661 	/* mac addr check */
8662 	if (!is_multicast_ether_addr(addr)) {
8663 		dev_err(&hdev->pdev->dev,
8664 			"Add mc mac err! invalid mac:%pM.\n",
8665 			 addr);
8666 		return -EINVAL;
8667 	}
8668 	memset(&req, 0, sizeof(req));
8669 	hclge_prepare_mac_addr(&req, addr, true);
8670 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8671 	if (status) {
8672 		/* This mac addr do not exist, add new entry for it */
8673 		memset(desc[0].data, 0, sizeof(desc[0].data));
8674 		memset(desc[1].data, 0, sizeof(desc[0].data));
8675 		memset(desc[2].data, 0, sizeof(desc[0].data));
8676 	}
8677 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8678 	if (status)
8679 		return status;
8680 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8681 	/* if already overflow, not to print each time */
8682 	if (status == -ENOSPC &&
8683 	    !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8684 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8685 
8686 	return status;
8687 }
8688 
8689 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8690 			    const unsigned char *addr)
8691 {
8692 	struct hclge_vport *vport = hclge_get_vport(handle);
8693 
8694 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8695 				     addr);
8696 }
8697 
8698 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8699 			    const unsigned char *addr)
8700 {
8701 	struct hclge_dev *hdev = vport->back;
8702 	struct hclge_mac_vlan_tbl_entry_cmd req;
8703 	enum hclge_cmd_status status;
8704 	struct hclge_desc desc[3];
8705 
8706 	/* mac addr check */
8707 	if (!is_multicast_ether_addr(addr)) {
8708 		dev_dbg(&hdev->pdev->dev,
8709 			"Remove mc mac err! invalid mac:%pM.\n",
8710 			 addr);
8711 		return -EINVAL;
8712 	}
8713 
8714 	memset(&req, 0, sizeof(req));
8715 	hclge_prepare_mac_addr(&req, addr, true);
8716 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8717 	if (!status) {
8718 		/* This mac addr exist, remove this handle's VFID for it */
8719 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8720 		if (status)
8721 			return status;
8722 
8723 		if (hclge_is_all_function_id_zero(desc))
8724 			/* All the vfid is zero, so need to delete this entry */
8725 			status = hclge_remove_mac_vlan_tbl(vport, &req);
8726 		else
8727 			/* Not all the vfid is zero, update the vfid */
8728 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8729 	} else if (status == -ENOENT) {
8730 		status = 0;
8731 	}
8732 
8733 	return status;
8734 }
8735 
8736 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8737 				      struct list_head *list,
8738 				      int (*sync)(struct hclge_vport *,
8739 						  const unsigned char *))
8740 {
8741 	struct hclge_mac_node *mac_node, *tmp;
8742 	int ret;
8743 
8744 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8745 		ret = sync(vport, mac_node->mac_addr);
8746 		if (!ret) {
8747 			mac_node->state = HCLGE_MAC_ACTIVE;
8748 		} else {
8749 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8750 				&vport->state);
8751 			break;
8752 		}
8753 	}
8754 }
8755 
8756 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8757 					struct list_head *list,
8758 					int (*unsync)(struct hclge_vport *,
8759 						      const unsigned char *))
8760 {
8761 	struct hclge_mac_node *mac_node, *tmp;
8762 	int ret;
8763 
8764 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8765 		ret = unsync(vport, mac_node->mac_addr);
8766 		if (!ret || ret == -ENOENT) {
8767 			list_del(&mac_node->node);
8768 			kfree(mac_node);
8769 		} else {
8770 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8771 				&vport->state);
8772 			break;
8773 		}
8774 	}
8775 }
8776 
8777 static bool hclge_sync_from_add_list(struct list_head *add_list,
8778 				     struct list_head *mac_list)
8779 {
8780 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8781 	bool all_added = true;
8782 
8783 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8784 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8785 			all_added = false;
8786 
8787 		/* if the mac address from tmp_add_list is not in the
8788 		 * uc/mc_mac_list, it means have received a TO_DEL request
8789 		 * during the time window of adding the mac address into mac
8790 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8791 		 * then it will be removed at next time. else it must be TO_ADD,
8792 		 * this address hasn't been added into mac table,
8793 		 * so just remove the mac node.
8794 		 */
8795 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8796 		if (new_node) {
8797 			hclge_update_mac_node(new_node, mac_node->state);
8798 			list_del(&mac_node->node);
8799 			kfree(mac_node);
8800 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8801 			mac_node->state = HCLGE_MAC_TO_DEL;
8802 			list_del(&mac_node->node);
8803 			list_add_tail(&mac_node->node, mac_list);
8804 		} else {
8805 			list_del(&mac_node->node);
8806 			kfree(mac_node);
8807 		}
8808 	}
8809 
8810 	return all_added;
8811 }
8812 
8813 static void hclge_sync_from_del_list(struct list_head *del_list,
8814 				     struct list_head *mac_list)
8815 {
8816 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8817 
8818 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8819 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8820 		if (new_node) {
8821 			/* If the mac addr exists in the mac list, it means
8822 			 * received a new TO_ADD request during the time window
8823 			 * of configuring the mac address. For the mac node
8824 			 * state is TO_ADD, and the address is already in the
8825 			 * in the hardware(due to delete fail), so we just need
8826 			 * to change the mac node state to ACTIVE.
8827 			 */
8828 			new_node->state = HCLGE_MAC_ACTIVE;
8829 			list_del(&mac_node->node);
8830 			kfree(mac_node);
8831 		} else {
8832 			list_del(&mac_node->node);
8833 			list_add_tail(&mac_node->node, mac_list);
8834 		}
8835 	}
8836 }
8837 
8838 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8839 					enum HCLGE_MAC_ADDR_TYPE mac_type,
8840 					bool is_all_added)
8841 {
8842 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8843 		if (is_all_added)
8844 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8845 		else
8846 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8847 	} else {
8848 		if (is_all_added)
8849 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8850 		else
8851 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8852 	}
8853 }
8854 
8855 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8856 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
8857 {
8858 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8859 	struct list_head tmp_add_list, tmp_del_list;
8860 	struct list_head *list;
8861 	bool all_added;
8862 
8863 	INIT_LIST_HEAD(&tmp_add_list);
8864 	INIT_LIST_HEAD(&tmp_del_list);
8865 
8866 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
8867 	 * we can add/delete these mac addr outside the spin lock
8868 	 */
8869 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8870 		&vport->uc_mac_list : &vport->mc_mac_list;
8871 
8872 	spin_lock_bh(&vport->mac_list_lock);
8873 
8874 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8875 		switch (mac_node->state) {
8876 		case HCLGE_MAC_TO_DEL:
8877 			list_del(&mac_node->node);
8878 			list_add_tail(&mac_node->node, &tmp_del_list);
8879 			break;
8880 		case HCLGE_MAC_TO_ADD:
8881 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8882 			if (!new_node)
8883 				goto stop_traverse;
8884 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8885 			new_node->state = mac_node->state;
8886 			list_add_tail(&new_node->node, &tmp_add_list);
8887 			break;
8888 		default:
8889 			break;
8890 		}
8891 	}
8892 
8893 stop_traverse:
8894 	spin_unlock_bh(&vport->mac_list_lock);
8895 
8896 	/* delete first, in order to get max mac table space for adding */
8897 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8898 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8899 					    hclge_rm_uc_addr_common);
8900 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8901 					  hclge_add_uc_addr_common);
8902 	} else {
8903 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8904 					    hclge_rm_mc_addr_common);
8905 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8906 					  hclge_add_mc_addr_common);
8907 	}
8908 
8909 	/* if some mac addresses were added/deleted fail, move back to the
8910 	 * mac_list, and retry at next time.
8911 	 */
8912 	spin_lock_bh(&vport->mac_list_lock);
8913 
8914 	hclge_sync_from_del_list(&tmp_del_list, list);
8915 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8916 
8917 	spin_unlock_bh(&vport->mac_list_lock);
8918 
8919 	hclge_update_overflow_flags(vport, mac_type, all_added);
8920 }
8921 
8922 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8923 {
8924 	struct hclge_dev *hdev = vport->back;
8925 
8926 	if (test_bit(vport->vport_id, hdev->vport_config_block))
8927 		return false;
8928 
8929 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8930 		return true;
8931 
8932 	return false;
8933 }
8934 
8935 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8936 {
8937 	int i;
8938 
8939 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8940 		struct hclge_vport *vport = &hdev->vport[i];
8941 
8942 		if (!hclge_need_sync_mac_table(vport))
8943 			continue;
8944 
8945 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8946 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8947 	}
8948 }
8949 
8950 static void hclge_build_del_list(struct list_head *list,
8951 				 bool is_del_list,
8952 				 struct list_head *tmp_del_list)
8953 {
8954 	struct hclge_mac_node *mac_cfg, *tmp;
8955 
8956 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8957 		switch (mac_cfg->state) {
8958 		case HCLGE_MAC_TO_DEL:
8959 		case HCLGE_MAC_ACTIVE:
8960 			list_del(&mac_cfg->node);
8961 			list_add_tail(&mac_cfg->node, tmp_del_list);
8962 			break;
8963 		case HCLGE_MAC_TO_ADD:
8964 			if (is_del_list) {
8965 				list_del(&mac_cfg->node);
8966 				kfree(mac_cfg);
8967 			}
8968 			break;
8969 		}
8970 	}
8971 }
8972 
8973 static void hclge_unsync_del_list(struct hclge_vport *vport,
8974 				  int (*unsync)(struct hclge_vport *vport,
8975 						const unsigned char *addr),
8976 				  bool is_del_list,
8977 				  struct list_head *tmp_del_list)
8978 {
8979 	struct hclge_mac_node *mac_cfg, *tmp;
8980 	int ret;
8981 
8982 	list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8983 		ret = unsync(vport, mac_cfg->mac_addr);
8984 		if (!ret || ret == -ENOENT) {
8985 			/* clear all mac addr from hardware, but remain these
8986 			 * mac addr in the mac list, and restore them after
8987 			 * vf reset finished.
8988 			 */
8989 			if (!is_del_list &&
8990 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
8991 				mac_cfg->state = HCLGE_MAC_TO_ADD;
8992 			} else {
8993 				list_del(&mac_cfg->node);
8994 				kfree(mac_cfg);
8995 			}
8996 		} else if (is_del_list) {
8997 			mac_cfg->state = HCLGE_MAC_TO_DEL;
8998 		}
8999 	}
9000 }
9001 
9002 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9003 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
9004 {
9005 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9006 	struct hclge_dev *hdev = vport->back;
9007 	struct list_head tmp_del_list, *list;
9008 
9009 	if (mac_type == HCLGE_MAC_ADDR_UC) {
9010 		list = &vport->uc_mac_list;
9011 		unsync = hclge_rm_uc_addr_common;
9012 	} else {
9013 		list = &vport->mc_mac_list;
9014 		unsync = hclge_rm_mc_addr_common;
9015 	}
9016 
9017 	INIT_LIST_HEAD(&tmp_del_list);
9018 
9019 	if (!is_del_list)
9020 		set_bit(vport->vport_id, hdev->vport_config_block);
9021 
9022 	spin_lock_bh(&vport->mac_list_lock);
9023 
9024 	hclge_build_del_list(list, is_del_list, &tmp_del_list);
9025 
9026 	spin_unlock_bh(&vport->mac_list_lock);
9027 
9028 	hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9029 
9030 	spin_lock_bh(&vport->mac_list_lock);
9031 
9032 	hclge_sync_from_del_list(&tmp_del_list, list);
9033 
9034 	spin_unlock_bh(&vport->mac_list_lock);
9035 }
9036 
9037 /* remove all mac address when uninitailize */
9038 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9039 					enum HCLGE_MAC_ADDR_TYPE mac_type)
9040 {
9041 	struct hclge_mac_node *mac_node, *tmp;
9042 	struct hclge_dev *hdev = vport->back;
9043 	struct list_head tmp_del_list, *list;
9044 
9045 	INIT_LIST_HEAD(&tmp_del_list);
9046 
9047 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9048 		&vport->uc_mac_list : &vport->mc_mac_list;
9049 
9050 	spin_lock_bh(&vport->mac_list_lock);
9051 
9052 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9053 		switch (mac_node->state) {
9054 		case HCLGE_MAC_TO_DEL:
9055 		case HCLGE_MAC_ACTIVE:
9056 			list_del(&mac_node->node);
9057 			list_add_tail(&mac_node->node, &tmp_del_list);
9058 			break;
9059 		case HCLGE_MAC_TO_ADD:
9060 			list_del(&mac_node->node);
9061 			kfree(mac_node);
9062 			break;
9063 		}
9064 	}
9065 
9066 	spin_unlock_bh(&vport->mac_list_lock);
9067 
9068 	if (mac_type == HCLGE_MAC_ADDR_UC)
9069 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9070 					    hclge_rm_uc_addr_common);
9071 	else
9072 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9073 					    hclge_rm_mc_addr_common);
9074 
9075 	if (!list_empty(&tmp_del_list))
9076 		dev_warn(&hdev->pdev->dev,
9077 			 "uninit %s mac list for vport %u not completely.\n",
9078 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9079 			 vport->vport_id);
9080 
9081 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9082 		list_del(&mac_node->node);
9083 		kfree(mac_node);
9084 	}
9085 }
9086 
9087 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9088 {
9089 	struct hclge_vport *vport;
9090 	int i;
9091 
9092 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9093 		vport = &hdev->vport[i];
9094 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9095 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9096 	}
9097 }
9098 
9099 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9100 					      u16 cmdq_resp, u8 resp_code)
9101 {
9102 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
9103 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
9104 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
9105 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
9106 
9107 	int return_status;
9108 
9109 	if (cmdq_resp) {
9110 		dev_err(&hdev->pdev->dev,
9111 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9112 			cmdq_resp);
9113 		return -EIO;
9114 	}
9115 
9116 	switch (resp_code) {
9117 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
9118 	case HCLGE_ETHERTYPE_ALREADY_ADD:
9119 		return_status = 0;
9120 		break;
9121 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9122 		dev_err(&hdev->pdev->dev,
9123 			"add mac ethertype failed for manager table overflow.\n");
9124 		return_status = -EIO;
9125 		break;
9126 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
9127 		dev_err(&hdev->pdev->dev,
9128 			"add mac ethertype failed for key conflict.\n");
9129 		return_status = -EIO;
9130 		break;
9131 	default:
9132 		dev_err(&hdev->pdev->dev,
9133 			"add mac ethertype failed for undefined, code=%u.\n",
9134 			resp_code);
9135 		return_status = -EIO;
9136 	}
9137 
9138 	return return_status;
9139 }
9140 
9141 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9142 				     u8 *mac_addr)
9143 {
9144 	struct hclge_mac_vlan_tbl_entry_cmd req;
9145 	struct hclge_dev *hdev = vport->back;
9146 	struct hclge_desc desc;
9147 	u16 egress_port = 0;
9148 	int i;
9149 
9150 	if (is_zero_ether_addr(mac_addr))
9151 		return false;
9152 
9153 	memset(&req, 0, sizeof(req));
9154 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9155 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9156 	req.egress_port = cpu_to_le16(egress_port);
9157 	hclge_prepare_mac_addr(&req, mac_addr, false);
9158 
9159 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9160 		return true;
9161 
9162 	vf_idx += HCLGE_VF_VPORT_START_NUM;
9163 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9164 		if (i != vf_idx &&
9165 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9166 			return true;
9167 
9168 	return false;
9169 }
9170 
9171 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9172 			    u8 *mac_addr)
9173 {
9174 	struct hclge_vport *vport = hclge_get_vport(handle);
9175 	struct hclge_dev *hdev = vport->back;
9176 
9177 	vport = hclge_get_vf_vport(hdev, vf);
9178 	if (!vport)
9179 		return -EINVAL;
9180 
9181 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9182 		dev_info(&hdev->pdev->dev,
9183 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
9184 			 mac_addr);
9185 		return 0;
9186 	}
9187 
9188 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9189 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9190 			mac_addr);
9191 		return -EEXIST;
9192 	}
9193 
9194 	ether_addr_copy(vport->vf_info.mac, mac_addr);
9195 
9196 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9197 		dev_info(&hdev->pdev->dev,
9198 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9199 			 vf, mac_addr);
9200 		return hclge_inform_reset_assert_to_vf(vport);
9201 	}
9202 
9203 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9204 		 vf, mac_addr);
9205 	return 0;
9206 }
9207 
9208 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9209 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
9210 {
9211 	struct hclge_desc desc;
9212 	u8 resp_code;
9213 	u16 retval;
9214 	int ret;
9215 
9216 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9217 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9218 
9219 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9220 	if (ret) {
9221 		dev_err(&hdev->pdev->dev,
9222 			"add mac ethertype failed for cmd_send, ret =%d.\n",
9223 			ret);
9224 		return ret;
9225 	}
9226 
9227 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9228 	retval = le16_to_cpu(desc.retval);
9229 
9230 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9231 }
9232 
9233 static int init_mgr_tbl(struct hclge_dev *hdev)
9234 {
9235 	int ret;
9236 	int i;
9237 
9238 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9239 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9240 		if (ret) {
9241 			dev_err(&hdev->pdev->dev,
9242 				"add mac ethertype failed, ret =%d.\n",
9243 				ret);
9244 			return ret;
9245 		}
9246 	}
9247 
9248 	return 0;
9249 }
9250 
9251 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9252 {
9253 	struct hclge_vport *vport = hclge_get_vport(handle);
9254 	struct hclge_dev *hdev = vport->back;
9255 
9256 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
9257 }
9258 
9259 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9260 				       const u8 *old_addr, const u8 *new_addr)
9261 {
9262 	struct list_head *list = &vport->uc_mac_list;
9263 	struct hclge_mac_node *old_node, *new_node;
9264 
9265 	new_node = hclge_find_mac_node(list, new_addr);
9266 	if (!new_node) {
9267 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9268 		if (!new_node)
9269 			return -ENOMEM;
9270 
9271 		new_node->state = HCLGE_MAC_TO_ADD;
9272 		ether_addr_copy(new_node->mac_addr, new_addr);
9273 		list_add(&new_node->node, list);
9274 	} else {
9275 		if (new_node->state == HCLGE_MAC_TO_DEL)
9276 			new_node->state = HCLGE_MAC_ACTIVE;
9277 
9278 		/* make sure the new addr is in the list head, avoid dev
9279 		 * addr may be not re-added into mac table for the umv space
9280 		 * limitation after global/imp reset which will clear mac
9281 		 * table by hardware.
9282 		 */
9283 		list_move(&new_node->node, list);
9284 	}
9285 
9286 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9287 		old_node = hclge_find_mac_node(list, old_addr);
9288 		if (old_node) {
9289 			if (old_node->state == HCLGE_MAC_TO_ADD) {
9290 				list_del(&old_node->node);
9291 				kfree(old_node);
9292 			} else {
9293 				old_node->state = HCLGE_MAC_TO_DEL;
9294 			}
9295 		}
9296 	}
9297 
9298 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9299 
9300 	return 0;
9301 }
9302 
9303 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9304 			      bool is_first)
9305 {
9306 	const unsigned char *new_addr = (const unsigned char *)p;
9307 	struct hclge_vport *vport = hclge_get_vport(handle);
9308 	struct hclge_dev *hdev = vport->back;
9309 	unsigned char *old_addr = NULL;
9310 	int ret;
9311 
9312 	/* mac addr check */
9313 	if (is_zero_ether_addr(new_addr) ||
9314 	    is_broadcast_ether_addr(new_addr) ||
9315 	    is_multicast_ether_addr(new_addr)) {
9316 		dev_err(&hdev->pdev->dev,
9317 			"change uc mac err! invalid mac: %pM.\n",
9318 			 new_addr);
9319 		return -EINVAL;
9320 	}
9321 
9322 	ret = hclge_pause_addr_cfg(hdev, new_addr);
9323 	if (ret) {
9324 		dev_err(&hdev->pdev->dev,
9325 			"failed to configure mac pause address, ret = %d\n",
9326 			ret);
9327 		return ret;
9328 	}
9329 
9330 	if (!is_first)
9331 		old_addr = hdev->hw.mac.mac_addr;
9332 
9333 	spin_lock_bh(&vport->mac_list_lock);
9334 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9335 	if (ret) {
9336 		dev_err(&hdev->pdev->dev,
9337 			"failed to change the mac addr:%pM, ret = %d\n",
9338 			new_addr, ret);
9339 		spin_unlock_bh(&vport->mac_list_lock);
9340 
9341 		if (!is_first)
9342 			hclge_pause_addr_cfg(hdev, old_addr);
9343 
9344 		return ret;
9345 	}
9346 	/* we must update dev addr with spin lock protect, preventing dev addr
9347 	 * being removed by set_rx_mode path.
9348 	 */
9349 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9350 	spin_unlock_bh(&vport->mac_list_lock);
9351 
9352 	hclge_task_schedule(hdev, 0);
9353 
9354 	return 0;
9355 }
9356 
9357 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9358 {
9359 	struct mii_ioctl_data *data = if_mii(ifr);
9360 
9361 	if (!hnae3_dev_phy_imp_supported(hdev))
9362 		return -EOPNOTSUPP;
9363 
9364 	switch (cmd) {
9365 	case SIOCGMIIPHY:
9366 		data->phy_id = hdev->hw.mac.phy_addr;
9367 		/* this command reads phy id and register at the same time */
9368 		fallthrough;
9369 	case SIOCGMIIREG:
9370 		data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9371 		return 0;
9372 
9373 	case SIOCSMIIREG:
9374 		return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9375 	default:
9376 		return -EOPNOTSUPP;
9377 	}
9378 }
9379 
9380 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9381 			  int cmd)
9382 {
9383 	struct hclge_vport *vport = hclge_get_vport(handle);
9384 	struct hclge_dev *hdev = vport->back;
9385 
9386 	if (!hdev->hw.mac.phydev)
9387 		return hclge_mii_ioctl(hdev, ifr, cmd);
9388 
9389 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9390 }
9391 
9392 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9393 					     bool bypass_en)
9394 {
9395 	struct hclge_port_vlan_filter_bypass_cmd *req;
9396 	struct hclge_desc desc;
9397 	int ret;
9398 
9399 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9400 	req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9401 	req->vf_id = vf_id;
9402 	hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9403 		      bypass_en ? 1 : 0);
9404 
9405 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9406 	if (ret)
9407 		dev_err(&hdev->pdev->dev,
9408 			"failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9409 			vf_id, ret);
9410 
9411 	return ret;
9412 }
9413 
9414 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9415 				      u8 fe_type, bool filter_en, u8 vf_id)
9416 {
9417 	struct hclge_vlan_filter_ctrl_cmd *req;
9418 	struct hclge_desc desc;
9419 	int ret;
9420 
9421 	/* read current vlan filter parameter */
9422 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9423 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9424 	req->vlan_type = vlan_type;
9425 	req->vf_id = vf_id;
9426 
9427 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9428 	if (ret) {
9429 		dev_err(&hdev->pdev->dev,
9430 			"failed to get vlan filter config, ret = %d.\n", ret);
9431 		return ret;
9432 	}
9433 
9434 	/* modify and write new config parameter */
9435 	hclge_cmd_reuse_desc(&desc, false);
9436 	req->vlan_fe = filter_en ?
9437 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9438 
9439 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9440 	if (ret)
9441 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9442 			ret);
9443 
9444 	return ret;
9445 }
9446 
9447 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9448 {
9449 	struct hclge_dev *hdev = vport->back;
9450 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9451 	int ret;
9452 
9453 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9454 		return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9455 						  HCLGE_FILTER_FE_EGRESS_V1_B,
9456 						  enable, vport->vport_id);
9457 
9458 	ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9459 					 HCLGE_FILTER_FE_EGRESS, enable,
9460 					 vport->vport_id);
9461 	if (ret)
9462 		return ret;
9463 
9464 	if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps))
9465 		ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9466 							!enable);
9467 	else if (!vport->vport_id)
9468 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9469 						 HCLGE_FILTER_FE_INGRESS,
9470 						 enable, 0);
9471 
9472 	return ret;
9473 }
9474 
9475 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9476 {
9477 	struct hnae3_handle *handle = &vport->nic;
9478 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9479 	struct hclge_dev *hdev = vport->back;
9480 
9481 	if (vport->vport_id) {
9482 		if (vport->port_base_vlan_cfg.state !=
9483 			HNAE3_PORT_BASE_VLAN_DISABLE)
9484 			return true;
9485 
9486 		if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9487 			return false;
9488 	} else if (handle->netdev_flags & HNAE3_USER_UPE) {
9489 		return false;
9490 	}
9491 
9492 	if (!vport->req_vlan_fltr_en)
9493 		return false;
9494 
9495 	/* compatible with former device, always enable vlan filter */
9496 	if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9497 		return true;
9498 
9499 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9500 		if (vlan->vlan_id != 0)
9501 			return true;
9502 
9503 	return false;
9504 }
9505 
9506 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9507 {
9508 	struct hclge_dev *hdev = vport->back;
9509 	bool need_en;
9510 	int ret;
9511 
9512 	mutex_lock(&hdev->vport_lock);
9513 
9514 	vport->req_vlan_fltr_en = request_en;
9515 
9516 	need_en = hclge_need_enable_vport_vlan_filter(vport);
9517 	if (need_en == vport->cur_vlan_fltr_en) {
9518 		mutex_unlock(&hdev->vport_lock);
9519 		return 0;
9520 	}
9521 
9522 	ret = hclge_set_vport_vlan_filter(vport, need_en);
9523 	if (ret) {
9524 		mutex_unlock(&hdev->vport_lock);
9525 		return ret;
9526 	}
9527 
9528 	vport->cur_vlan_fltr_en = need_en;
9529 
9530 	mutex_unlock(&hdev->vport_lock);
9531 
9532 	return 0;
9533 }
9534 
9535 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9536 {
9537 	struct hclge_vport *vport = hclge_get_vport(handle);
9538 
9539 	return hclge_enable_vport_vlan_filter(vport, enable);
9540 }
9541 
9542 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9543 					bool is_kill, u16 vlan,
9544 					struct hclge_desc *desc)
9545 {
9546 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
9547 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
9548 	u8 vf_byte_val;
9549 	u8 vf_byte_off;
9550 	int ret;
9551 
9552 	hclge_cmd_setup_basic_desc(&desc[0],
9553 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9554 	hclge_cmd_setup_basic_desc(&desc[1],
9555 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9556 
9557 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9558 
9559 	vf_byte_off = vfid / 8;
9560 	vf_byte_val = 1 << (vfid % 8);
9561 
9562 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9563 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9564 
9565 	req0->vlan_id  = cpu_to_le16(vlan);
9566 	req0->vlan_cfg = is_kill;
9567 
9568 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9569 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9570 	else
9571 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9572 
9573 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
9574 	if (ret) {
9575 		dev_err(&hdev->pdev->dev,
9576 			"Send vf vlan command fail, ret =%d.\n",
9577 			ret);
9578 		return ret;
9579 	}
9580 
9581 	return 0;
9582 }
9583 
9584 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9585 					  bool is_kill, struct hclge_desc *desc)
9586 {
9587 	struct hclge_vlan_filter_vf_cfg_cmd *req;
9588 
9589 	req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9590 
9591 	if (!is_kill) {
9592 #define HCLGE_VF_VLAN_NO_ENTRY	2
9593 		if (!req->resp_code || req->resp_code == 1)
9594 			return 0;
9595 
9596 		if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9597 			set_bit(vfid, hdev->vf_vlan_full);
9598 			dev_warn(&hdev->pdev->dev,
9599 				 "vf vlan table is full, vf vlan filter is disabled\n");
9600 			return 0;
9601 		}
9602 
9603 		dev_err(&hdev->pdev->dev,
9604 			"Add vf vlan filter fail, ret =%u.\n",
9605 			req->resp_code);
9606 	} else {
9607 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
9608 		if (!req->resp_code)
9609 			return 0;
9610 
9611 		/* vf vlan filter is disabled when vf vlan table is full,
9612 		 * then new vlan id will not be added into vf vlan table.
9613 		 * Just return 0 without warning, avoid massive verbose
9614 		 * print logs when unload.
9615 		 */
9616 		if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9617 			return 0;
9618 
9619 		dev_err(&hdev->pdev->dev,
9620 			"Kill vf vlan filter fail, ret =%u.\n",
9621 			req->resp_code);
9622 	}
9623 
9624 	return -EIO;
9625 }
9626 
9627 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9628 				    bool is_kill, u16 vlan)
9629 {
9630 	struct hclge_vport *vport = &hdev->vport[vfid];
9631 	struct hclge_desc desc[2];
9632 	int ret;
9633 
9634 	/* if vf vlan table is full, firmware will close vf vlan filter, it
9635 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
9636 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
9637 	 * new vlan, because tx packets with these vlan id will be dropped.
9638 	 */
9639 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9640 		if (vport->vf_info.spoofchk && vlan) {
9641 			dev_err(&hdev->pdev->dev,
9642 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
9643 			return -EPERM;
9644 		}
9645 		return 0;
9646 	}
9647 
9648 	ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9649 	if (ret)
9650 		return ret;
9651 
9652 	return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9653 }
9654 
9655 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9656 				      u16 vlan_id, bool is_kill)
9657 {
9658 	struct hclge_vlan_filter_pf_cfg_cmd *req;
9659 	struct hclge_desc desc;
9660 	u8 vlan_offset_byte_val;
9661 	u8 vlan_offset_byte;
9662 	u8 vlan_offset_160;
9663 	int ret;
9664 
9665 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9666 
9667 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9668 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9669 			   HCLGE_VLAN_BYTE_SIZE;
9670 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9671 
9672 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9673 	req->vlan_offset = vlan_offset_160;
9674 	req->vlan_cfg = is_kill;
9675 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9676 
9677 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9678 	if (ret)
9679 		dev_err(&hdev->pdev->dev,
9680 			"port vlan command, send fail, ret =%d.\n", ret);
9681 	return ret;
9682 }
9683 
9684 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9685 				    u16 vport_id, u16 vlan_id,
9686 				    bool is_kill)
9687 {
9688 	u16 vport_idx, vport_num = 0;
9689 	int ret;
9690 
9691 	if (is_kill && !vlan_id)
9692 		return 0;
9693 
9694 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9695 	if (ret) {
9696 		dev_err(&hdev->pdev->dev,
9697 			"Set %u vport vlan filter config fail, ret =%d.\n",
9698 			vport_id, ret);
9699 		return ret;
9700 	}
9701 
9702 	/* vlan 0 may be added twice when 8021q module is enabled */
9703 	if (!is_kill && !vlan_id &&
9704 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
9705 		return 0;
9706 
9707 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9708 		dev_err(&hdev->pdev->dev,
9709 			"Add port vlan failed, vport %u is already in vlan %u\n",
9710 			vport_id, vlan_id);
9711 		return -EINVAL;
9712 	}
9713 
9714 	if (is_kill &&
9715 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9716 		dev_err(&hdev->pdev->dev,
9717 			"Delete port vlan failed, vport %u is not in vlan %u\n",
9718 			vport_id, vlan_id);
9719 		return -EINVAL;
9720 	}
9721 
9722 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9723 		vport_num++;
9724 
9725 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9726 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9727 						 is_kill);
9728 
9729 	return ret;
9730 }
9731 
9732 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9733 {
9734 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9735 	struct hclge_vport_vtag_tx_cfg_cmd *req;
9736 	struct hclge_dev *hdev = vport->back;
9737 	struct hclge_desc desc;
9738 	u16 bmap_index;
9739 	int status;
9740 
9741 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9742 
9743 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9744 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9745 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9746 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9747 		      vcfg->accept_tag1 ? 1 : 0);
9748 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9749 		      vcfg->accept_untag1 ? 1 : 0);
9750 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9751 		      vcfg->accept_tag2 ? 1 : 0);
9752 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9753 		      vcfg->accept_untag2 ? 1 : 0);
9754 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9755 		      vcfg->insert_tag1_en ? 1 : 0);
9756 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9757 		      vcfg->insert_tag2_en ? 1 : 0);
9758 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9759 		      vcfg->tag_shift_mode_en ? 1 : 0);
9760 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9761 
9762 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9763 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9764 			HCLGE_VF_NUM_PER_BYTE;
9765 	req->vf_bitmap[bmap_index] =
9766 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9767 
9768 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9769 	if (status)
9770 		dev_err(&hdev->pdev->dev,
9771 			"Send port txvlan cfg command fail, ret =%d\n",
9772 			status);
9773 
9774 	return status;
9775 }
9776 
9777 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9778 {
9779 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9780 	struct hclge_vport_vtag_rx_cfg_cmd *req;
9781 	struct hclge_dev *hdev = vport->back;
9782 	struct hclge_desc desc;
9783 	u16 bmap_index;
9784 	int status;
9785 
9786 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9787 
9788 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9789 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9790 		      vcfg->strip_tag1_en ? 1 : 0);
9791 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9792 		      vcfg->strip_tag2_en ? 1 : 0);
9793 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9794 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
9795 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9796 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
9797 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9798 		      vcfg->strip_tag1_discard_en ? 1 : 0);
9799 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9800 		      vcfg->strip_tag2_discard_en ? 1 : 0);
9801 
9802 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9803 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9804 			HCLGE_VF_NUM_PER_BYTE;
9805 	req->vf_bitmap[bmap_index] =
9806 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9807 
9808 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9809 	if (status)
9810 		dev_err(&hdev->pdev->dev,
9811 			"Send port rxvlan cfg command fail, ret =%d\n",
9812 			status);
9813 
9814 	return status;
9815 }
9816 
9817 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9818 				  u16 port_base_vlan_state,
9819 				  u16 vlan_tag, u8 qos)
9820 {
9821 	int ret;
9822 
9823 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9824 		vport->txvlan_cfg.accept_tag1 = true;
9825 		vport->txvlan_cfg.insert_tag1_en = false;
9826 		vport->txvlan_cfg.default_tag1 = 0;
9827 	} else {
9828 		struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9829 
9830 		vport->txvlan_cfg.accept_tag1 =
9831 			ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9832 		vport->txvlan_cfg.insert_tag1_en = true;
9833 		vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9834 						 vlan_tag;
9835 	}
9836 
9837 	vport->txvlan_cfg.accept_untag1 = true;
9838 
9839 	/* accept_tag2 and accept_untag2 are not supported on
9840 	 * pdev revision(0x20), new revision support them,
9841 	 * this two fields can not be configured by user.
9842 	 */
9843 	vport->txvlan_cfg.accept_tag2 = true;
9844 	vport->txvlan_cfg.accept_untag2 = true;
9845 	vport->txvlan_cfg.insert_tag2_en = false;
9846 	vport->txvlan_cfg.default_tag2 = 0;
9847 	vport->txvlan_cfg.tag_shift_mode_en = true;
9848 
9849 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9850 		vport->rxvlan_cfg.strip_tag1_en = false;
9851 		vport->rxvlan_cfg.strip_tag2_en =
9852 				vport->rxvlan_cfg.rx_vlan_offload_en;
9853 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
9854 	} else {
9855 		vport->rxvlan_cfg.strip_tag1_en =
9856 				vport->rxvlan_cfg.rx_vlan_offload_en;
9857 		vport->rxvlan_cfg.strip_tag2_en = true;
9858 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
9859 	}
9860 
9861 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
9862 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9863 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9864 
9865 	ret = hclge_set_vlan_tx_offload_cfg(vport);
9866 	if (ret)
9867 		return ret;
9868 
9869 	return hclge_set_vlan_rx_offload_cfg(vport);
9870 }
9871 
9872 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9873 {
9874 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9875 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9876 	struct hclge_desc desc;
9877 	int status;
9878 
9879 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9880 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9881 	rx_req->ot_fst_vlan_type =
9882 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9883 	rx_req->ot_sec_vlan_type =
9884 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9885 	rx_req->in_fst_vlan_type =
9886 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9887 	rx_req->in_sec_vlan_type =
9888 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9889 
9890 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9891 	if (status) {
9892 		dev_err(&hdev->pdev->dev,
9893 			"Send rxvlan protocol type command fail, ret =%d\n",
9894 			status);
9895 		return status;
9896 	}
9897 
9898 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9899 
9900 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9901 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9902 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9903 
9904 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9905 	if (status)
9906 		dev_err(&hdev->pdev->dev,
9907 			"Send txvlan protocol type command fail, ret =%d\n",
9908 			status);
9909 
9910 	return status;
9911 }
9912 
9913 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9914 {
9915 #define HCLGE_DEF_VLAN_TYPE		0x8100
9916 
9917 	struct hnae3_handle *handle = &hdev->vport[0].nic;
9918 	struct hclge_vport *vport;
9919 	int ret;
9920 	int i;
9921 
9922 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9923 		/* for revision 0x21, vf vlan filter is per function */
9924 		for (i = 0; i < hdev->num_alloc_vport; i++) {
9925 			vport = &hdev->vport[i];
9926 			ret = hclge_set_vlan_filter_ctrl(hdev,
9927 							 HCLGE_FILTER_TYPE_VF,
9928 							 HCLGE_FILTER_FE_EGRESS,
9929 							 true,
9930 							 vport->vport_id);
9931 			if (ret)
9932 				return ret;
9933 			vport->cur_vlan_fltr_en = true;
9934 		}
9935 
9936 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9937 						 HCLGE_FILTER_FE_INGRESS, true,
9938 						 0);
9939 		if (ret)
9940 			return ret;
9941 	} else {
9942 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9943 						 HCLGE_FILTER_FE_EGRESS_V1_B,
9944 						 true, 0);
9945 		if (ret)
9946 			return ret;
9947 	}
9948 
9949 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9950 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9951 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9952 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9953 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9954 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9955 
9956 	ret = hclge_set_vlan_protocol_type(hdev);
9957 	if (ret)
9958 		return ret;
9959 
9960 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9961 		u16 vlan_tag;
9962 		u8 qos;
9963 
9964 		vport = &hdev->vport[i];
9965 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9966 		qos = vport->port_base_vlan_cfg.vlan_info.qos;
9967 
9968 		ret = hclge_vlan_offload_cfg(vport,
9969 					     vport->port_base_vlan_cfg.state,
9970 					     vlan_tag, qos);
9971 		if (ret)
9972 			return ret;
9973 	}
9974 
9975 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9976 }
9977 
9978 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9979 				       bool writen_to_tbl)
9980 {
9981 	struct hclge_vport_vlan_cfg *vlan;
9982 
9983 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9984 	if (!vlan)
9985 		return;
9986 
9987 	vlan->hd_tbl_status = writen_to_tbl;
9988 	vlan->vlan_id = vlan_id;
9989 
9990 	list_add_tail(&vlan->node, &vport->vlan_list);
9991 }
9992 
9993 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9994 {
9995 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9996 	struct hclge_dev *hdev = vport->back;
9997 	int ret;
9998 
9999 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10000 		if (!vlan->hd_tbl_status) {
10001 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10002 						       vport->vport_id,
10003 						       vlan->vlan_id, false);
10004 			if (ret) {
10005 				dev_err(&hdev->pdev->dev,
10006 					"restore vport vlan list failed, ret=%d\n",
10007 					ret);
10008 				return ret;
10009 			}
10010 		}
10011 		vlan->hd_tbl_status = true;
10012 	}
10013 
10014 	return 0;
10015 }
10016 
10017 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10018 				      bool is_write_tbl)
10019 {
10020 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10021 	struct hclge_dev *hdev = vport->back;
10022 
10023 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10024 		if (vlan->vlan_id == vlan_id) {
10025 			if (is_write_tbl && vlan->hd_tbl_status)
10026 				hclge_set_vlan_filter_hw(hdev,
10027 							 htons(ETH_P_8021Q),
10028 							 vport->vport_id,
10029 							 vlan_id,
10030 							 true);
10031 
10032 			list_del(&vlan->node);
10033 			kfree(vlan);
10034 			break;
10035 		}
10036 	}
10037 }
10038 
10039 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10040 {
10041 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10042 	struct hclge_dev *hdev = vport->back;
10043 
10044 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10045 		if (vlan->hd_tbl_status)
10046 			hclge_set_vlan_filter_hw(hdev,
10047 						 htons(ETH_P_8021Q),
10048 						 vport->vport_id,
10049 						 vlan->vlan_id,
10050 						 true);
10051 
10052 		vlan->hd_tbl_status = false;
10053 		if (is_del_list) {
10054 			list_del(&vlan->node);
10055 			kfree(vlan);
10056 		}
10057 	}
10058 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
10059 }
10060 
10061 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10062 {
10063 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10064 	struct hclge_vport *vport;
10065 	int i;
10066 
10067 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10068 		vport = &hdev->vport[i];
10069 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10070 			list_del(&vlan->node);
10071 			kfree(vlan);
10072 		}
10073 	}
10074 }
10075 
10076 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10077 {
10078 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10079 	struct hclge_dev *hdev = vport->back;
10080 	u16 vlan_proto;
10081 	u16 vlan_id;
10082 	u16 state;
10083 	int ret;
10084 
10085 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10086 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10087 	state = vport->port_base_vlan_cfg.state;
10088 
10089 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10090 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10091 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10092 					 vport->vport_id, vlan_id,
10093 					 false);
10094 		return;
10095 	}
10096 
10097 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10098 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10099 					       vport->vport_id,
10100 					       vlan->vlan_id, false);
10101 		if (ret)
10102 			break;
10103 		vlan->hd_tbl_status = true;
10104 	}
10105 }
10106 
10107 /* For global reset and imp reset, hardware will clear the mac table,
10108  * so we change the mac address state from ACTIVE to TO_ADD, then they
10109  * can be restored in the service task after reset complete. Furtherly,
10110  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10111  * be restored after reset, so just remove these mac nodes from mac_list.
10112  */
10113 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10114 {
10115 	struct hclge_mac_node *mac_node, *tmp;
10116 
10117 	list_for_each_entry_safe(mac_node, tmp, list, node) {
10118 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
10119 			mac_node->state = HCLGE_MAC_TO_ADD;
10120 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10121 			list_del(&mac_node->node);
10122 			kfree(mac_node);
10123 		}
10124 	}
10125 }
10126 
10127 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10128 {
10129 	spin_lock_bh(&vport->mac_list_lock);
10130 
10131 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10132 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10133 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10134 
10135 	spin_unlock_bh(&vport->mac_list_lock);
10136 }
10137 
10138 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10139 {
10140 	struct hclge_vport *vport = &hdev->vport[0];
10141 	struct hnae3_handle *handle = &vport->nic;
10142 
10143 	hclge_restore_mac_table_common(vport);
10144 	hclge_restore_vport_vlan_table(vport);
10145 	set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10146 	hclge_restore_fd_entries(handle);
10147 }
10148 
10149 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10150 {
10151 	struct hclge_vport *vport = hclge_get_vport(handle);
10152 
10153 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10154 		vport->rxvlan_cfg.strip_tag1_en = false;
10155 		vport->rxvlan_cfg.strip_tag2_en = enable;
10156 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
10157 	} else {
10158 		vport->rxvlan_cfg.strip_tag1_en = enable;
10159 		vport->rxvlan_cfg.strip_tag2_en = true;
10160 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
10161 	}
10162 
10163 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
10164 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10165 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10166 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10167 
10168 	return hclge_set_vlan_rx_offload_cfg(vport);
10169 }
10170 
10171 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10172 {
10173 	struct hclge_dev *hdev = vport->back;
10174 
10175 	if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10176 		set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10177 }
10178 
10179 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10180 					    u16 port_base_vlan_state,
10181 					    struct hclge_vlan_info *new_info,
10182 					    struct hclge_vlan_info *old_info)
10183 {
10184 	struct hclge_dev *hdev = vport->back;
10185 	int ret;
10186 
10187 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10188 		hclge_rm_vport_all_vlan_table(vport, false);
10189 		/* force clear VLAN 0 */
10190 		ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10191 		if (ret)
10192 			return ret;
10193 		return hclge_set_vlan_filter_hw(hdev,
10194 						 htons(new_info->vlan_proto),
10195 						 vport->vport_id,
10196 						 new_info->vlan_tag,
10197 						 false);
10198 	}
10199 
10200 	/* force add VLAN 0 */
10201 	ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10202 	if (ret)
10203 		return ret;
10204 
10205 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10206 				       vport->vport_id, old_info->vlan_tag,
10207 				       true);
10208 	if (ret)
10209 		return ret;
10210 
10211 	return hclge_add_vport_all_vlan_table(vport);
10212 }
10213 
10214 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10215 					  const struct hclge_vlan_info *old_cfg)
10216 {
10217 	if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10218 		return true;
10219 
10220 	if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10221 		return true;
10222 
10223 	return false;
10224 }
10225 
10226 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10227 				    struct hclge_vlan_info *vlan_info)
10228 {
10229 	struct hnae3_handle *nic = &vport->nic;
10230 	struct hclge_vlan_info *old_vlan_info;
10231 	struct hclge_dev *hdev = vport->back;
10232 	int ret;
10233 
10234 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10235 
10236 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10237 				     vlan_info->qos);
10238 	if (ret)
10239 		return ret;
10240 
10241 	if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10242 		goto out;
10243 
10244 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10245 		/* add new VLAN tag */
10246 		ret = hclge_set_vlan_filter_hw(hdev,
10247 					       htons(vlan_info->vlan_proto),
10248 					       vport->vport_id,
10249 					       vlan_info->vlan_tag,
10250 					       false);
10251 		if (ret)
10252 			return ret;
10253 
10254 		/* remove old VLAN tag */
10255 		if (old_vlan_info->vlan_tag == 0)
10256 			ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10257 						       true, 0);
10258 		else
10259 			ret = hclge_set_vlan_filter_hw(hdev,
10260 						       htons(ETH_P_8021Q),
10261 						       vport->vport_id,
10262 						       old_vlan_info->vlan_tag,
10263 						       true);
10264 		if (ret) {
10265 			dev_err(&hdev->pdev->dev,
10266 				"failed to clear vport%u port base vlan %u, ret = %d.\n",
10267 				vport->vport_id, old_vlan_info->vlan_tag, ret);
10268 			return ret;
10269 		}
10270 
10271 		goto out;
10272 	}
10273 
10274 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10275 					       old_vlan_info);
10276 	if (ret)
10277 		return ret;
10278 
10279 out:
10280 	vport->port_base_vlan_cfg.state = state;
10281 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10282 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10283 	else
10284 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10285 
10286 	vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10287 	hclge_set_vport_vlan_fltr_change(vport);
10288 
10289 	return 0;
10290 }
10291 
10292 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10293 					  enum hnae3_port_base_vlan_state state,
10294 					  u16 vlan, u8 qos)
10295 {
10296 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10297 		if (!vlan && !qos)
10298 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10299 
10300 		return HNAE3_PORT_BASE_VLAN_ENABLE;
10301 	}
10302 
10303 	if (!vlan && !qos)
10304 		return HNAE3_PORT_BASE_VLAN_DISABLE;
10305 
10306 	if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10307 	    vport->port_base_vlan_cfg.vlan_info.qos == qos)
10308 		return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10309 
10310 	return HNAE3_PORT_BASE_VLAN_MODIFY;
10311 }
10312 
10313 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10314 				    u16 vlan, u8 qos, __be16 proto)
10315 {
10316 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10317 	struct hclge_vport *vport = hclge_get_vport(handle);
10318 	struct hclge_dev *hdev = vport->back;
10319 	struct hclge_vlan_info vlan_info;
10320 	u16 state;
10321 	int ret;
10322 
10323 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10324 		return -EOPNOTSUPP;
10325 
10326 	vport = hclge_get_vf_vport(hdev, vfid);
10327 	if (!vport)
10328 		return -EINVAL;
10329 
10330 	/* qos is a 3 bits value, so can not be bigger than 7 */
10331 	if (vlan > VLAN_N_VID - 1 || qos > 7)
10332 		return -EINVAL;
10333 	if (proto != htons(ETH_P_8021Q))
10334 		return -EPROTONOSUPPORT;
10335 
10336 	state = hclge_get_port_base_vlan_state(vport,
10337 					       vport->port_base_vlan_cfg.state,
10338 					       vlan, qos);
10339 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10340 		return 0;
10341 
10342 	vlan_info.vlan_tag = vlan;
10343 	vlan_info.qos = qos;
10344 	vlan_info.vlan_proto = ntohs(proto);
10345 
10346 	ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10347 	if (ret) {
10348 		dev_err(&hdev->pdev->dev,
10349 			"failed to update port base vlan for vf %d, ret = %d\n",
10350 			vfid, ret);
10351 		return ret;
10352 	}
10353 
10354 	/* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10355 	 * VLAN state.
10356 	 */
10357 	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10358 	    test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10359 		hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10360 						  vport->vport_id, state,
10361 						  &vlan_info);
10362 
10363 	return 0;
10364 }
10365 
10366 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10367 {
10368 	struct hclge_vlan_info *vlan_info;
10369 	struct hclge_vport *vport;
10370 	int ret;
10371 	int vf;
10372 
10373 	/* clear port base vlan for all vf */
10374 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10375 		vport = &hdev->vport[vf];
10376 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10377 
10378 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10379 					       vport->vport_id,
10380 					       vlan_info->vlan_tag, true);
10381 		if (ret)
10382 			dev_err(&hdev->pdev->dev,
10383 				"failed to clear vf vlan for vf%d, ret = %d\n",
10384 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10385 	}
10386 }
10387 
10388 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10389 			  u16 vlan_id, bool is_kill)
10390 {
10391 	struct hclge_vport *vport = hclge_get_vport(handle);
10392 	struct hclge_dev *hdev = vport->back;
10393 	bool writen_to_tbl = false;
10394 	int ret = 0;
10395 
10396 	/* When device is resetting or reset failed, firmware is unable to
10397 	 * handle mailbox. Just record the vlan id, and remove it after
10398 	 * reset finished.
10399 	 */
10400 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10401 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10402 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10403 		return -EBUSY;
10404 	}
10405 
10406 	/* when port base vlan enabled, we use port base vlan as the vlan
10407 	 * filter entry. In this case, we don't update vlan filter table
10408 	 * when user add new vlan or remove exist vlan, just update the vport
10409 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
10410 	 * table until port base vlan disabled
10411 	 */
10412 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10413 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10414 					       vlan_id, is_kill);
10415 		writen_to_tbl = true;
10416 	}
10417 
10418 	if (!ret) {
10419 		if (is_kill)
10420 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10421 		else
10422 			hclge_add_vport_vlan_table(vport, vlan_id,
10423 						   writen_to_tbl);
10424 	} else if (is_kill) {
10425 		/* when remove hw vlan filter failed, record the vlan id,
10426 		 * and try to remove it from hw later, to be consistence
10427 		 * with stack
10428 		 */
10429 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10430 	}
10431 
10432 	hclge_set_vport_vlan_fltr_change(vport);
10433 
10434 	return ret;
10435 }
10436 
10437 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10438 {
10439 	struct hclge_vport *vport;
10440 	int ret;
10441 	u16 i;
10442 
10443 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10444 		vport = &hdev->vport[i];
10445 		if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10446 					&vport->state))
10447 			continue;
10448 
10449 		ret = hclge_enable_vport_vlan_filter(vport,
10450 						     vport->req_vlan_fltr_en);
10451 		if (ret) {
10452 			dev_err(&hdev->pdev->dev,
10453 				"failed to sync vlan filter state for vport%u, ret = %d\n",
10454 				vport->vport_id, ret);
10455 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10456 				&vport->state);
10457 			return;
10458 		}
10459 	}
10460 }
10461 
10462 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10463 {
10464 #define HCLGE_MAX_SYNC_COUNT	60
10465 
10466 	int i, ret, sync_cnt = 0;
10467 	u16 vlan_id;
10468 
10469 	/* start from vport 1 for PF is always alive */
10470 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10471 		struct hclge_vport *vport = &hdev->vport[i];
10472 
10473 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10474 					 VLAN_N_VID);
10475 		while (vlan_id != VLAN_N_VID) {
10476 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10477 						       vport->vport_id, vlan_id,
10478 						       true);
10479 			if (ret && ret != -EINVAL)
10480 				return;
10481 
10482 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10483 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10484 			hclge_set_vport_vlan_fltr_change(vport);
10485 
10486 			sync_cnt++;
10487 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10488 				return;
10489 
10490 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10491 						 VLAN_N_VID);
10492 		}
10493 	}
10494 
10495 	hclge_sync_vlan_fltr_state(hdev);
10496 }
10497 
10498 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10499 {
10500 	struct hclge_config_max_frm_size_cmd *req;
10501 	struct hclge_desc desc;
10502 
10503 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10504 
10505 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10506 	req->max_frm_size = cpu_to_le16(new_mps);
10507 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10508 
10509 	return hclge_cmd_send(&hdev->hw, &desc, 1);
10510 }
10511 
10512 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10513 {
10514 	struct hclge_vport *vport = hclge_get_vport(handle);
10515 
10516 	return hclge_set_vport_mtu(vport, new_mtu);
10517 }
10518 
10519 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10520 {
10521 	struct hclge_dev *hdev = vport->back;
10522 	int i, max_frm_size, ret;
10523 
10524 	/* HW supprt 2 layer vlan */
10525 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10526 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10527 	    max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10528 		return -EINVAL;
10529 
10530 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10531 	mutex_lock(&hdev->vport_lock);
10532 	/* VF's mps must fit within hdev->mps */
10533 	if (vport->vport_id && max_frm_size > hdev->mps) {
10534 		mutex_unlock(&hdev->vport_lock);
10535 		return -EINVAL;
10536 	} else if (vport->vport_id) {
10537 		vport->mps = max_frm_size;
10538 		mutex_unlock(&hdev->vport_lock);
10539 		return 0;
10540 	}
10541 
10542 	/* PF's mps must be greater then VF's mps */
10543 	for (i = 1; i < hdev->num_alloc_vport; i++)
10544 		if (max_frm_size < hdev->vport[i].mps) {
10545 			mutex_unlock(&hdev->vport_lock);
10546 			return -EINVAL;
10547 		}
10548 
10549 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10550 
10551 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
10552 	if (ret) {
10553 		dev_err(&hdev->pdev->dev,
10554 			"Change mtu fail, ret =%d\n", ret);
10555 		goto out;
10556 	}
10557 
10558 	hdev->mps = max_frm_size;
10559 	vport->mps = max_frm_size;
10560 
10561 	ret = hclge_buffer_alloc(hdev);
10562 	if (ret)
10563 		dev_err(&hdev->pdev->dev,
10564 			"Allocate buffer fail, ret =%d\n", ret);
10565 
10566 out:
10567 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10568 	mutex_unlock(&hdev->vport_lock);
10569 	return ret;
10570 }
10571 
10572 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10573 				    bool enable)
10574 {
10575 	struct hclge_reset_tqp_queue_cmd *req;
10576 	struct hclge_desc desc;
10577 	int ret;
10578 
10579 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10580 
10581 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10582 	req->tqp_id = cpu_to_le16(queue_id);
10583 	if (enable)
10584 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10585 
10586 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10587 	if (ret) {
10588 		dev_err(&hdev->pdev->dev,
10589 			"Send tqp reset cmd error, status =%d\n", ret);
10590 		return ret;
10591 	}
10592 
10593 	return 0;
10594 }
10595 
10596 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10597 {
10598 	struct hclge_reset_tqp_queue_cmd *req;
10599 	struct hclge_desc desc;
10600 	int ret;
10601 
10602 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10603 
10604 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10605 	req->tqp_id = cpu_to_le16(queue_id);
10606 
10607 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10608 	if (ret) {
10609 		dev_err(&hdev->pdev->dev,
10610 			"Get reset status error, status =%d\n", ret);
10611 		return ret;
10612 	}
10613 
10614 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10615 }
10616 
10617 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10618 {
10619 	struct hnae3_queue *queue;
10620 	struct hclge_tqp *tqp;
10621 
10622 	queue = handle->kinfo.tqp[queue_id];
10623 	tqp = container_of(queue, struct hclge_tqp, q);
10624 
10625 	return tqp->index;
10626 }
10627 
10628 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10629 {
10630 	struct hclge_vport *vport = hclge_get_vport(handle);
10631 	struct hclge_dev *hdev = vport->back;
10632 	u16 reset_try_times = 0;
10633 	int reset_status;
10634 	u16 queue_gid;
10635 	int ret;
10636 	u16 i;
10637 
10638 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
10639 		queue_gid = hclge_covert_handle_qid_global(handle, i);
10640 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10641 		if (ret) {
10642 			dev_err(&hdev->pdev->dev,
10643 				"failed to send reset tqp cmd, ret = %d\n",
10644 				ret);
10645 			return ret;
10646 		}
10647 
10648 		while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10649 			reset_status = hclge_get_reset_status(hdev, queue_gid);
10650 			if (reset_status)
10651 				break;
10652 
10653 			/* Wait for tqp hw reset */
10654 			usleep_range(1000, 1200);
10655 		}
10656 
10657 		if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10658 			dev_err(&hdev->pdev->dev,
10659 				"wait for tqp hw reset timeout\n");
10660 			return -ETIME;
10661 		}
10662 
10663 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10664 		if (ret) {
10665 			dev_err(&hdev->pdev->dev,
10666 				"failed to deassert soft reset, ret = %d\n",
10667 				ret);
10668 			return ret;
10669 		}
10670 		reset_try_times = 0;
10671 	}
10672 	return 0;
10673 }
10674 
10675 static int hclge_reset_rcb(struct hnae3_handle *handle)
10676 {
10677 #define HCLGE_RESET_RCB_NOT_SUPPORT	0U
10678 #define HCLGE_RESET_RCB_SUCCESS		1U
10679 
10680 	struct hclge_vport *vport = hclge_get_vport(handle);
10681 	struct hclge_dev *hdev = vport->back;
10682 	struct hclge_reset_cmd *req;
10683 	struct hclge_desc desc;
10684 	u8 return_status;
10685 	u16 queue_gid;
10686 	int ret;
10687 
10688 	queue_gid = hclge_covert_handle_qid_global(handle, 0);
10689 
10690 	req = (struct hclge_reset_cmd *)desc.data;
10691 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10692 	hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10693 	req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10694 	req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10695 
10696 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10697 	if (ret) {
10698 		dev_err(&hdev->pdev->dev,
10699 			"failed to send rcb reset cmd, ret = %d\n", ret);
10700 		return ret;
10701 	}
10702 
10703 	return_status = req->fun_reset_rcb_return_status;
10704 	if (return_status == HCLGE_RESET_RCB_SUCCESS)
10705 		return 0;
10706 
10707 	if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10708 		dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10709 			return_status);
10710 		return -EIO;
10711 	}
10712 
10713 	/* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10714 	 * again to reset all tqps
10715 	 */
10716 	return hclge_reset_tqp_cmd(handle);
10717 }
10718 
10719 int hclge_reset_tqp(struct hnae3_handle *handle)
10720 {
10721 	struct hclge_vport *vport = hclge_get_vport(handle);
10722 	struct hclge_dev *hdev = vport->back;
10723 	int ret;
10724 
10725 	/* only need to disable PF's tqp */
10726 	if (!vport->vport_id) {
10727 		ret = hclge_tqp_enable(handle, false);
10728 		if (ret) {
10729 			dev_err(&hdev->pdev->dev,
10730 				"failed to disable tqp, ret = %d\n", ret);
10731 			return ret;
10732 		}
10733 	}
10734 
10735 	return hclge_reset_rcb(handle);
10736 }
10737 
10738 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10739 {
10740 	struct hclge_vport *vport = hclge_get_vport(handle);
10741 	struct hclge_dev *hdev = vport->back;
10742 
10743 	return hdev->fw_version;
10744 }
10745 
10746 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10747 {
10748 	struct phy_device *phydev = hdev->hw.mac.phydev;
10749 
10750 	if (!phydev)
10751 		return;
10752 
10753 	phy_set_asym_pause(phydev, rx_en, tx_en);
10754 }
10755 
10756 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10757 {
10758 	int ret;
10759 
10760 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10761 		return 0;
10762 
10763 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10764 	if (ret)
10765 		dev_err(&hdev->pdev->dev,
10766 			"configure pauseparam error, ret = %d.\n", ret);
10767 
10768 	return ret;
10769 }
10770 
10771 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10772 {
10773 	struct phy_device *phydev = hdev->hw.mac.phydev;
10774 	u16 remote_advertising = 0;
10775 	u16 local_advertising;
10776 	u32 rx_pause, tx_pause;
10777 	u8 flowctl;
10778 
10779 	if (!phydev->link || !phydev->autoneg)
10780 		return 0;
10781 
10782 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10783 
10784 	if (phydev->pause)
10785 		remote_advertising = LPA_PAUSE_CAP;
10786 
10787 	if (phydev->asym_pause)
10788 		remote_advertising |= LPA_PAUSE_ASYM;
10789 
10790 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10791 					   remote_advertising);
10792 	tx_pause = flowctl & FLOW_CTRL_TX;
10793 	rx_pause = flowctl & FLOW_CTRL_RX;
10794 
10795 	if (phydev->duplex == HCLGE_MAC_HALF) {
10796 		tx_pause = 0;
10797 		rx_pause = 0;
10798 	}
10799 
10800 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10801 }
10802 
10803 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10804 				 u32 *rx_en, u32 *tx_en)
10805 {
10806 	struct hclge_vport *vport = hclge_get_vport(handle);
10807 	struct hclge_dev *hdev = vport->back;
10808 	u8 media_type = hdev->hw.mac.media_type;
10809 
10810 	*auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10811 		    hclge_get_autoneg(handle) : 0;
10812 
10813 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10814 		*rx_en = 0;
10815 		*tx_en = 0;
10816 		return;
10817 	}
10818 
10819 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10820 		*rx_en = 1;
10821 		*tx_en = 0;
10822 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10823 		*tx_en = 1;
10824 		*rx_en = 0;
10825 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10826 		*rx_en = 1;
10827 		*tx_en = 1;
10828 	} else {
10829 		*rx_en = 0;
10830 		*tx_en = 0;
10831 	}
10832 }
10833 
10834 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10835 					 u32 rx_en, u32 tx_en)
10836 {
10837 	if (rx_en && tx_en)
10838 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
10839 	else if (rx_en && !tx_en)
10840 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10841 	else if (!rx_en && tx_en)
10842 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10843 	else
10844 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
10845 
10846 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10847 }
10848 
10849 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10850 				u32 rx_en, u32 tx_en)
10851 {
10852 	struct hclge_vport *vport = hclge_get_vport(handle);
10853 	struct hclge_dev *hdev = vport->back;
10854 	struct phy_device *phydev = hdev->hw.mac.phydev;
10855 	u32 fc_autoneg;
10856 
10857 	if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10858 		fc_autoneg = hclge_get_autoneg(handle);
10859 		if (auto_neg != fc_autoneg) {
10860 			dev_info(&hdev->pdev->dev,
10861 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10862 			return -EOPNOTSUPP;
10863 		}
10864 	}
10865 
10866 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10867 		dev_info(&hdev->pdev->dev,
10868 			 "Priority flow control enabled. Cannot set link flow control.\n");
10869 		return -EOPNOTSUPP;
10870 	}
10871 
10872 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10873 
10874 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10875 
10876 	if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10877 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10878 
10879 	if (phydev)
10880 		return phy_start_aneg(phydev);
10881 
10882 	return -EOPNOTSUPP;
10883 }
10884 
10885 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10886 					  u8 *auto_neg, u32 *speed, u8 *duplex)
10887 {
10888 	struct hclge_vport *vport = hclge_get_vport(handle);
10889 	struct hclge_dev *hdev = vport->back;
10890 
10891 	if (speed)
10892 		*speed = hdev->hw.mac.speed;
10893 	if (duplex)
10894 		*duplex = hdev->hw.mac.duplex;
10895 	if (auto_neg)
10896 		*auto_neg = hdev->hw.mac.autoneg;
10897 }
10898 
10899 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10900 				 u8 *module_type)
10901 {
10902 	struct hclge_vport *vport = hclge_get_vport(handle);
10903 	struct hclge_dev *hdev = vport->back;
10904 
10905 	/* When nic is down, the service task is not running, doesn't update
10906 	 * the port information per second. Query the port information before
10907 	 * return the media type, ensure getting the correct media information.
10908 	 */
10909 	hclge_update_port_info(hdev);
10910 
10911 	if (media_type)
10912 		*media_type = hdev->hw.mac.media_type;
10913 
10914 	if (module_type)
10915 		*module_type = hdev->hw.mac.module_type;
10916 }
10917 
10918 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10919 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
10920 {
10921 	struct hclge_vport *vport = hclge_get_vport(handle);
10922 	struct hclge_dev *hdev = vport->back;
10923 	struct phy_device *phydev = hdev->hw.mac.phydev;
10924 	int mdix_ctrl, mdix, is_resolved;
10925 	unsigned int retval;
10926 
10927 	if (!phydev) {
10928 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10929 		*tp_mdix = ETH_TP_MDI_INVALID;
10930 		return;
10931 	}
10932 
10933 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10934 
10935 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10936 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10937 				    HCLGE_PHY_MDIX_CTRL_S);
10938 
10939 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10940 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10941 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10942 
10943 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10944 
10945 	switch (mdix_ctrl) {
10946 	case 0x0:
10947 		*tp_mdix_ctrl = ETH_TP_MDI;
10948 		break;
10949 	case 0x1:
10950 		*tp_mdix_ctrl = ETH_TP_MDI_X;
10951 		break;
10952 	case 0x3:
10953 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10954 		break;
10955 	default:
10956 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10957 		break;
10958 	}
10959 
10960 	if (!is_resolved)
10961 		*tp_mdix = ETH_TP_MDI_INVALID;
10962 	else if (mdix)
10963 		*tp_mdix = ETH_TP_MDI_X;
10964 	else
10965 		*tp_mdix = ETH_TP_MDI;
10966 }
10967 
10968 static void hclge_info_show(struct hclge_dev *hdev)
10969 {
10970 	struct device *dev = &hdev->pdev->dev;
10971 
10972 	dev_info(dev, "PF info begin:\n");
10973 
10974 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10975 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10976 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10977 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10978 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10979 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10980 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10981 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10982 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10983 	dev_info(dev, "This is %s PF\n",
10984 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10985 	dev_info(dev, "DCB %s\n",
10986 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10987 	dev_info(dev, "MQPRIO %s\n",
10988 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10989 
10990 	dev_info(dev, "PF info end.\n");
10991 }
10992 
10993 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10994 					  struct hclge_vport *vport)
10995 {
10996 	struct hnae3_client *client = vport->nic.client;
10997 	struct hclge_dev *hdev = ae_dev->priv;
10998 	int rst_cnt = hdev->rst_stats.reset_cnt;
10999 	int ret;
11000 
11001 	ret = client->ops->init_instance(&vport->nic);
11002 	if (ret)
11003 		return ret;
11004 
11005 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11006 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11007 	    rst_cnt != hdev->rst_stats.reset_cnt) {
11008 		ret = -EBUSY;
11009 		goto init_nic_err;
11010 	}
11011 
11012 	/* Enable nic hw error interrupts */
11013 	ret = hclge_config_nic_hw_error(hdev, true);
11014 	if (ret) {
11015 		dev_err(&ae_dev->pdev->dev,
11016 			"fail(%d) to enable hw error interrupts\n", ret);
11017 		goto init_nic_err;
11018 	}
11019 
11020 	hnae3_set_client_init_flag(client, ae_dev, 1);
11021 
11022 	if (netif_msg_drv(&hdev->vport->nic))
11023 		hclge_info_show(hdev);
11024 
11025 	return ret;
11026 
11027 init_nic_err:
11028 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11029 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11030 		msleep(HCLGE_WAIT_RESET_DONE);
11031 
11032 	client->ops->uninit_instance(&vport->nic, 0);
11033 
11034 	return ret;
11035 }
11036 
11037 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11038 					   struct hclge_vport *vport)
11039 {
11040 	struct hclge_dev *hdev = ae_dev->priv;
11041 	struct hnae3_client *client;
11042 	int rst_cnt;
11043 	int ret;
11044 
11045 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11046 	    !hdev->nic_client)
11047 		return 0;
11048 
11049 	client = hdev->roce_client;
11050 	ret = hclge_init_roce_base_info(vport);
11051 	if (ret)
11052 		return ret;
11053 
11054 	rst_cnt = hdev->rst_stats.reset_cnt;
11055 	ret = client->ops->init_instance(&vport->roce);
11056 	if (ret)
11057 		return ret;
11058 
11059 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11060 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11061 	    rst_cnt != hdev->rst_stats.reset_cnt) {
11062 		ret = -EBUSY;
11063 		goto init_roce_err;
11064 	}
11065 
11066 	/* Enable roce ras interrupts */
11067 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
11068 	if (ret) {
11069 		dev_err(&ae_dev->pdev->dev,
11070 			"fail(%d) to enable roce ras interrupts\n", ret);
11071 		goto init_roce_err;
11072 	}
11073 
11074 	hnae3_set_client_init_flag(client, ae_dev, 1);
11075 
11076 	return 0;
11077 
11078 init_roce_err:
11079 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11080 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11081 		msleep(HCLGE_WAIT_RESET_DONE);
11082 
11083 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11084 
11085 	return ret;
11086 }
11087 
11088 static int hclge_init_client_instance(struct hnae3_client *client,
11089 				      struct hnae3_ae_dev *ae_dev)
11090 {
11091 	struct hclge_dev *hdev = ae_dev->priv;
11092 	struct hclge_vport *vport = &hdev->vport[0];
11093 	int ret;
11094 
11095 	switch (client->type) {
11096 	case HNAE3_CLIENT_KNIC:
11097 		hdev->nic_client = client;
11098 		vport->nic.client = client;
11099 		ret = hclge_init_nic_client_instance(ae_dev, vport);
11100 		if (ret)
11101 			goto clear_nic;
11102 
11103 		ret = hclge_init_roce_client_instance(ae_dev, vport);
11104 		if (ret)
11105 			goto clear_roce;
11106 
11107 		break;
11108 	case HNAE3_CLIENT_ROCE:
11109 		if (hnae3_dev_roce_supported(hdev)) {
11110 			hdev->roce_client = client;
11111 			vport->roce.client = client;
11112 		}
11113 
11114 		ret = hclge_init_roce_client_instance(ae_dev, vport);
11115 		if (ret)
11116 			goto clear_roce;
11117 
11118 		break;
11119 	default:
11120 		return -EINVAL;
11121 	}
11122 
11123 	return 0;
11124 
11125 clear_nic:
11126 	hdev->nic_client = NULL;
11127 	vport->nic.client = NULL;
11128 	return ret;
11129 clear_roce:
11130 	hdev->roce_client = NULL;
11131 	vport->roce.client = NULL;
11132 	return ret;
11133 }
11134 
11135 static void hclge_uninit_client_instance(struct hnae3_client *client,
11136 					 struct hnae3_ae_dev *ae_dev)
11137 {
11138 	struct hclge_dev *hdev = ae_dev->priv;
11139 	struct hclge_vport *vport = &hdev->vport[0];
11140 
11141 	if (hdev->roce_client) {
11142 		clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11143 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11144 			msleep(HCLGE_WAIT_RESET_DONE);
11145 
11146 		hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11147 		hdev->roce_client = NULL;
11148 		vport->roce.client = NULL;
11149 	}
11150 	if (client->type == HNAE3_CLIENT_ROCE)
11151 		return;
11152 	if (hdev->nic_client && client->ops->uninit_instance) {
11153 		clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11154 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11155 			msleep(HCLGE_WAIT_RESET_DONE);
11156 
11157 		client->ops->uninit_instance(&vport->nic, 0);
11158 		hdev->nic_client = NULL;
11159 		vport->nic.client = NULL;
11160 	}
11161 }
11162 
11163 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11164 {
11165 #define HCLGE_MEM_BAR		4
11166 
11167 	struct pci_dev *pdev = hdev->pdev;
11168 	struct hclge_hw *hw = &hdev->hw;
11169 
11170 	/* for device does not have device memory, return directly */
11171 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11172 		return 0;
11173 
11174 	hw->mem_base = devm_ioremap_wc(&pdev->dev,
11175 				       pci_resource_start(pdev, HCLGE_MEM_BAR),
11176 				       pci_resource_len(pdev, HCLGE_MEM_BAR));
11177 	if (!hw->mem_base) {
11178 		dev_err(&pdev->dev, "failed to map device memory\n");
11179 		return -EFAULT;
11180 	}
11181 
11182 	return 0;
11183 }
11184 
11185 static int hclge_pci_init(struct hclge_dev *hdev)
11186 {
11187 	struct pci_dev *pdev = hdev->pdev;
11188 	struct hclge_hw *hw;
11189 	int ret;
11190 
11191 	ret = pci_enable_device(pdev);
11192 	if (ret) {
11193 		dev_err(&pdev->dev, "failed to enable PCI device\n");
11194 		return ret;
11195 	}
11196 
11197 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11198 	if (ret) {
11199 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11200 		if (ret) {
11201 			dev_err(&pdev->dev,
11202 				"can't set consistent PCI DMA");
11203 			goto err_disable_device;
11204 		}
11205 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11206 	}
11207 
11208 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11209 	if (ret) {
11210 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11211 		goto err_disable_device;
11212 	}
11213 
11214 	pci_set_master(pdev);
11215 	hw = &hdev->hw;
11216 	hw->io_base = pcim_iomap(pdev, 2, 0);
11217 	if (!hw->io_base) {
11218 		dev_err(&pdev->dev, "Can't map configuration register space\n");
11219 		ret = -ENOMEM;
11220 		goto err_clr_master;
11221 	}
11222 
11223 	ret = hclge_dev_mem_map(hdev);
11224 	if (ret)
11225 		goto err_unmap_io_base;
11226 
11227 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11228 
11229 	return 0;
11230 
11231 err_unmap_io_base:
11232 	pcim_iounmap(pdev, hdev->hw.io_base);
11233 err_clr_master:
11234 	pci_clear_master(pdev);
11235 	pci_release_regions(pdev);
11236 err_disable_device:
11237 	pci_disable_device(pdev);
11238 
11239 	return ret;
11240 }
11241 
11242 static void hclge_pci_uninit(struct hclge_dev *hdev)
11243 {
11244 	struct pci_dev *pdev = hdev->pdev;
11245 
11246 	if (hdev->hw.mem_base)
11247 		devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11248 
11249 	pcim_iounmap(pdev, hdev->hw.io_base);
11250 	pci_free_irq_vectors(pdev);
11251 	pci_clear_master(pdev);
11252 	pci_release_mem_regions(pdev);
11253 	pci_disable_device(pdev);
11254 }
11255 
11256 static void hclge_state_init(struct hclge_dev *hdev)
11257 {
11258 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11259 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11260 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11261 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11262 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11263 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11264 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11265 }
11266 
11267 static void hclge_state_uninit(struct hclge_dev *hdev)
11268 {
11269 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11270 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11271 
11272 	if (hdev->reset_timer.function)
11273 		del_timer_sync(&hdev->reset_timer);
11274 	if (hdev->service_task.work.func)
11275 		cancel_delayed_work_sync(&hdev->service_task);
11276 }
11277 
11278 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11279 					enum hnae3_reset_type rst_type)
11280 {
11281 #define HCLGE_RESET_RETRY_WAIT_MS	500
11282 #define HCLGE_RESET_RETRY_CNT	5
11283 
11284 	struct hclge_dev *hdev = ae_dev->priv;
11285 	int retry_cnt = 0;
11286 	int ret;
11287 
11288 retry:
11289 	down(&hdev->reset_sem);
11290 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11291 	hdev->reset_type = rst_type;
11292 	ret = hclge_reset_prepare(hdev);
11293 	if (ret || hdev->reset_pending) {
11294 		dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11295 			ret);
11296 		if (hdev->reset_pending ||
11297 		    retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11298 			dev_err(&hdev->pdev->dev,
11299 				"reset_pending:0x%lx, retry_cnt:%d\n",
11300 				hdev->reset_pending, retry_cnt);
11301 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11302 			up(&hdev->reset_sem);
11303 			msleep(HCLGE_RESET_RETRY_WAIT_MS);
11304 			goto retry;
11305 		}
11306 	}
11307 
11308 	/* disable misc vector before reset done */
11309 	hclge_enable_vector(&hdev->misc_vector, false);
11310 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11311 
11312 	if (hdev->reset_type == HNAE3_FLR_RESET)
11313 		hdev->rst_stats.flr_rst_cnt++;
11314 }
11315 
11316 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11317 {
11318 	struct hclge_dev *hdev = ae_dev->priv;
11319 	int ret;
11320 
11321 	hclge_enable_vector(&hdev->misc_vector, true);
11322 
11323 	ret = hclge_reset_rebuild(hdev);
11324 	if (ret)
11325 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11326 
11327 	hdev->reset_type = HNAE3_NONE_RESET;
11328 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11329 	up(&hdev->reset_sem);
11330 }
11331 
11332 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11333 {
11334 	u16 i;
11335 
11336 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11337 		struct hclge_vport *vport = &hdev->vport[i];
11338 		int ret;
11339 
11340 		 /* Send cmd to clear VF's FUNC_RST_ING */
11341 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11342 		if (ret)
11343 			dev_warn(&hdev->pdev->dev,
11344 				 "clear vf(%u) rst failed %d!\n",
11345 				 vport->vport_id, ret);
11346 	}
11347 }
11348 
11349 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11350 {
11351 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11352 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11353 }
11354 
11355 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11356 {
11357 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11358 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11359 }
11360 
11361 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11362 {
11363 	struct pci_dev *pdev = ae_dev->pdev;
11364 	struct hclge_dev *hdev;
11365 	int ret;
11366 
11367 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11368 	if (!hdev)
11369 		return -ENOMEM;
11370 
11371 	hdev->pdev = pdev;
11372 	hdev->ae_dev = ae_dev;
11373 	hdev->reset_type = HNAE3_NONE_RESET;
11374 	hdev->reset_level = HNAE3_FUNC_RESET;
11375 	ae_dev->priv = hdev;
11376 
11377 	/* HW supprt 2 layer vlan */
11378 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11379 
11380 	mutex_init(&hdev->vport_lock);
11381 	spin_lock_init(&hdev->fd_rule_lock);
11382 	sema_init(&hdev->reset_sem, 1);
11383 
11384 	ret = hclge_pci_init(hdev);
11385 	if (ret)
11386 		goto out;
11387 
11388 	/* Firmware command queue initialize */
11389 	ret = hclge_cmd_queue_init(hdev);
11390 	if (ret)
11391 		goto err_pci_uninit;
11392 
11393 	/* Firmware command initialize */
11394 	ret = hclge_cmd_init(hdev);
11395 	if (ret)
11396 		goto err_cmd_uninit;
11397 
11398 	ret = hclge_get_cap(hdev);
11399 	if (ret)
11400 		goto err_cmd_uninit;
11401 
11402 	ret = hclge_query_dev_specs(hdev);
11403 	if (ret) {
11404 		dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11405 			ret);
11406 		goto err_cmd_uninit;
11407 	}
11408 
11409 	ret = hclge_configure(hdev);
11410 	if (ret) {
11411 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11412 		goto err_cmd_uninit;
11413 	}
11414 
11415 	ret = hclge_init_msi(hdev);
11416 	if (ret) {
11417 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11418 		goto err_cmd_uninit;
11419 	}
11420 
11421 	ret = hclge_misc_irq_init(hdev);
11422 	if (ret)
11423 		goto err_msi_uninit;
11424 
11425 	ret = hclge_alloc_tqps(hdev);
11426 	if (ret) {
11427 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11428 		goto err_msi_irq_uninit;
11429 	}
11430 
11431 	ret = hclge_alloc_vport(hdev);
11432 	if (ret)
11433 		goto err_msi_irq_uninit;
11434 
11435 	ret = hclge_map_tqp(hdev);
11436 	if (ret)
11437 		goto err_msi_irq_uninit;
11438 
11439 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11440 	    !hnae3_dev_phy_imp_supported(hdev)) {
11441 		ret = hclge_mac_mdio_config(hdev);
11442 		if (ret)
11443 			goto err_msi_irq_uninit;
11444 	}
11445 
11446 	ret = hclge_init_umv_space(hdev);
11447 	if (ret)
11448 		goto err_mdiobus_unreg;
11449 
11450 	ret = hclge_mac_init(hdev);
11451 	if (ret) {
11452 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11453 		goto err_mdiobus_unreg;
11454 	}
11455 
11456 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11457 	if (ret) {
11458 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11459 		goto err_mdiobus_unreg;
11460 	}
11461 
11462 	ret = hclge_config_gro(hdev, true);
11463 	if (ret)
11464 		goto err_mdiobus_unreg;
11465 
11466 	ret = hclge_init_vlan_config(hdev);
11467 	if (ret) {
11468 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11469 		goto err_mdiobus_unreg;
11470 	}
11471 
11472 	ret = hclge_tm_schd_init(hdev);
11473 	if (ret) {
11474 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11475 		goto err_mdiobus_unreg;
11476 	}
11477 
11478 	ret = hclge_rss_init_cfg(hdev);
11479 	if (ret) {
11480 		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11481 		goto err_mdiobus_unreg;
11482 	}
11483 
11484 	ret = hclge_rss_init_hw(hdev);
11485 	if (ret) {
11486 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11487 		goto err_mdiobus_unreg;
11488 	}
11489 
11490 	ret = init_mgr_tbl(hdev);
11491 	if (ret) {
11492 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11493 		goto err_mdiobus_unreg;
11494 	}
11495 
11496 	ret = hclge_init_fd_config(hdev);
11497 	if (ret) {
11498 		dev_err(&pdev->dev,
11499 			"fd table init fail, ret=%d\n", ret);
11500 		goto err_mdiobus_unreg;
11501 	}
11502 
11503 	INIT_KFIFO(hdev->mac_tnl_log);
11504 
11505 	hclge_dcb_ops_set(hdev);
11506 
11507 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11508 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11509 
11510 	/* Setup affinity after service timer setup because add_timer_on
11511 	 * is called in affinity notify.
11512 	 */
11513 	hclge_misc_affinity_setup(hdev);
11514 
11515 	hclge_clear_all_event_cause(hdev);
11516 	hclge_clear_resetting_state(hdev);
11517 
11518 	/* Log and clear the hw errors those already occurred */
11519 	hclge_handle_all_hns_hw_errors(ae_dev);
11520 
11521 	/* request delayed reset for the error recovery because an immediate
11522 	 * global reset on a PF affecting pending initialization of other PFs
11523 	 */
11524 	if (ae_dev->hw_err_reset_req) {
11525 		enum hnae3_reset_type reset_level;
11526 
11527 		reset_level = hclge_get_reset_level(ae_dev,
11528 						    &ae_dev->hw_err_reset_req);
11529 		hclge_set_def_reset_request(ae_dev, reset_level);
11530 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11531 	}
11532 
11533 	hclge_init_rxd_adv_layout(hdev);
11534 
11535 	/* Enable MISC vector(vector0) */
11536 	hclge_enable_vector(&hdev->misc_vector, true);
11537 
11538 	hclge_state_init(hdev);
11539 	hdev->last_reset_time = jiffies;
11540 
11541 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11542 		 HCLGE_DRIVER_NAME);
11543 
11544 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11545 
11546 	return 0;
11547 
11548 err_mdiobus_unreg:
11549 	if (hdev->hw.mac.phydev)
11550 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
11551 err_msi_irq_uninit:
11552 	hclge_misc_irq_uninit(hdev);
11553 err_msi_uninit:
11554 	pci_free_irq_vectors(pdev);
11555 err_cmd_uninit:
11556 	hclge_cmd_uninit(hdev);
11557 err_pci_uninit:
11558 	pcim_iounmap(pdev, hdev->hw.io_base);
11559 	pci_clear_master(pdev);
11560 	pci_release_regions(pdev);
11561 	pci_disable_device(pdev);
11562 out:
11563 	mutex_destroy(&hdev->vport_lock);
11564 	return ret;
11565 }
11566 
11567 static void hclge_stats_clear(struct hclge_dev *hdev)
11568 {
11569 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11570 }
11571 
11572 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11573 {
11574 	return hclge_config_switch_param(hdev, vf, enable,
11575 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11576 }
11577 
11578 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11579 {
11580 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11581 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
11582 					  enable, vf);
11583 }
11584 
11585 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11586 {
11587 	int ret;
11588 
11589 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11590 	if (ret) {
11591 		dev_err(&hdev->pdev->dev,
11592 			"Set vf %d mac spoof check %s failed, ret=%d\n",
11593 			vf, enable ? "on" : "off", ret);
11594 		return ret;
11595 	}
11596 
11597 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11598 	if (ret)
11599 		dev_err(&hdev->pdev->dev,
11600 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
11601 			vf, enable ? "on" : "off", ret);
11602 
11603 	return ret;
11604 }
11605 
11606 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11607 				 bool enable)
11608 {
11609 	struct hclge_vport *vport = hclge_get_vport(handle);
11610 	struct hclge_dev *hdev = vport->back;
11611 	u32 new_spoofchk = enable ? 1 : 0;
11612 	int ret;
11613 
11614 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11615 		return -EOPNOTSUPP;
11616 
11617 	vport = hclge_get_vf_vport(hdev, vf);
11618 	if (!vport)
11619 		return -EINVAL;
11620 
11621 	if (vport->vf_info.spoofchk == new_spoofchk)
11622 		return 0;
11623 
11624 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11625 		dev_warn(&hdev->pdev->dev,
11626 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11627 			 vf);
11628 	else if (enable && hclge_is_umv_space_full(vport, true))
11629 		dev_warn(&hdev->pdev->dev,
11630 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11631 			 vf);
11632 
11633 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11634 	if (ret)
11635 		return ret;
11636 
11637 	vport->vf_info.spoofchk = new_spoofchk;
11638 	return 0;
11639 }
11640 
11641 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11642 {
11643 	struct hclge_vport *vport = hdev->vport;
11644 	int ret;
11645 	int i;
11646 
11647 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11648 		return 0;
11649 
11650 	/* resume the vf spoof check state after reset */
11651 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11652 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11653 					       vport->vf_info.spoofchk);
11654 		if (ret)
11655 			return ret;
11656 
11657 		vport++;
11658 	}
11659 
11660 	return 0;
11661 }
11662 
11663 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11664 {
11665 	struct hclge_vport *vport = hclge_get_vport(handle);
11666 	struct hclge_dev *hdev = vport->back;
11667 	u32 new_trusted = enable ? 1 : 0;
11668 
11669 	vport = hclge_get_vf_vport(hdev, vf);
11670 	if (!vport)
11671 		return -EINVAL;
11672 
11673 	if (vport->vf_info.trusted == new_trusted)
11674 		return 0;
11675 
11676 	vport->vf_info.trusted = new_trusted;
11677 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11678 	hclge_task_schedule(hdev, 0);
11679 
11680 	return 0;
11681 }
11682 
11683 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11684 {
11685 	int ret;
11686 	int vf;
11687 
11688 	/* reset vf rate to default value */
11689 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11690 		struct hclge_vport *vport = &hdev->vport[vf];
11691 
11692 		vport->vf_info.max_tx_rate = 0;
11693 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11694 		if (ret)
11695 			dev_err(&hdev->pdev->dev,
11696 				"vf%d failed to reset to default, ret=%d\n",
11697 				vf - HCLGE_VF_VPORT_START_NUM, ret);
11698 	}
11699 }
11700 
11701 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11702 				     int min_tx_rate, int max_tx_rate)
11703 {
11704 	if (min_tx_rate != 0 ||
11705 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11706 		dev_err(&hdev->pdev->dev,
11707 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11708 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11709 		return -EINVAL;
11710 	}
11711 
11712 	return 0;
11713 }
11714 
11715 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11716 			     int min_tx_rate, int max_tx_rate, bool force)
11717 {
11718 	struct hclge_vport *vport = hclge_get_vport(handle);
11719 	struct hclge_dev *hdev = vport->back;
11720 	int ret;
11721 
11722 	ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11723 	if (ret)
11724 		return ret;
11725 
11726 	vport = hclge_get_vf_vport(hdev, vf);
11727 	if (!vport)
11728 		return -EINVAL;
11729 
11730 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11731 		return 0;
11732 
11733 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11734 	if (ret)
11735 		return ret;
11736 
11737 	vport->vf_info.max_tx_rate = max_tx_rate;
11738 
11739 	return 0;
11740 }
11741 
11742 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11743 {
11744 	struct hnae3_handle *handle = &hdev->vport->nic;
11745 	struct hclge_vport *vport;
11746 	int ret;
11747 	int vf;
11748 
11749 	/* resume the vf max_tx_rate after reset */
11750 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11751 		vport = hclge_get_vf_vport(hdev, vf);
11752 		if (!vport)
11753 			return -EINVAL;
11754 
11755 		/* zero means max rate, after reset, firmware already set it to
11756 		 * max rate, so just continue.
11757 		 */
11758 		if (!vport->vf_info.max_tx_rate)
11759 			continue;
11760 
11761 		ret = hclge_set_vf_rate(handle, vf, 0,
11762 					vport->vf_info.max_tx_rate, true);
11763 		if (ret) {
11764 			dev_err(&hdev->pdev->dev,
11765 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
11766 				vf, vport->vf_info.max_tx_rate, ret);
11767 			return ret;
11768 		}
11769 	}
11770 
11771 	return 0;
11772 }
11773 
11774 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11775 {
11776 	struct hclge_vport *vport = hdev->vport;
11777 	int i;
11778 
11779 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11780 		hclge_vport_stop(vport);
11781 		vport++;
11782 	}
11783 }
11784 
11785 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11786 {
11787 	struct hclge_dev *hdev = ae_dev->priv;
11788 	struct pci_dev *pdev = ae_dev->pdev;
11789 	int ret;
11790 
11791 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11792 
11793 	hclge_stats_clear(hdev);
11794 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11795 	 * so here should not clean table in memory.
11796 	 */
11797 	if (hdev->reset_type == HNAE3_IMP_RESET ||
11798 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
11799 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11800 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11801 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11802 		hclge_reset_umv_space(hdev);
11803 	}
11804 
11805 	ret = hclge_cmd_init(hdev);
11806 	if (ret) {
11807 		dev_err(&pdev->dev, "Cmd queue init failed\n");
11808 		return ret;
11809 	}
11810 
11811 	ret = hclge_map_tqp(hdev);
11812 	if (ret) {
11813 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11814 		return ret;
11815 	}
11816 
11817 	ret = hclge_mac_init(hdev);
11818 	if (ret) {
11819 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11820 		return ret;
11821 	}
11822 
11823 	ret = hclge_tp_port_init(hdev);
11824 	if (ret) {
11825 		dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11826 			ret);
11827 		return ret;
11828 	}
11829 
11830 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11831 	if (ret) {
11832 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11833 		return ret;
11834 	}
11835 
11836 	ret = hclge_config_gro(hdev, true);
11837 	if (ret)
11838 		return ret;
11839 
11840 	ret = hclge_init_vlan_config(hdev);
11841 	if (ret) {
11842 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11843 		return ret;
11844 	}
11845 
11846 	ret = hclge_tm_init_hw(hdev, true);
11847 	if (ret) {
11848 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11849 		return ret;
11850 	}
11851 
11852 	ret = hclge_rss_init_hw(hdev);
11853 	if (ret) {
11854 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11855 		return ret;
11856 	}
11857 
11858 	ret = init_mgr_tbl(hdev);
11859 	if (ret) {
11860 		dev_err(&pdev->dev,
11861 			"failed to reinit manager table, ret = %d\n", ret);
11862 		return ret;
11863 	}
11864 
11865 	ret = hclge_init_fd_config(hdev);
11866 	if (ret) {
11867 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11868 		return ret;
11869 	}
11870 
11871 	/* Log and clear the hw errors those already occurred */
11872 	hclge_handle_all_hns_hw_errors(ae_dev);
11873 
11874 	/* Re-enable the hw error interrupts because
11875 	 * the interrupts get disabled on global reset.
11876 	 */
11877 	ret = hclge_config_nic_hw_error(hdev, true);
11878 	if (ret) {
11879 		dev_err(&pdev->dev,
11880 			"fail(%d) to re-enable NIC hw error interrupts\n",
11881 			ret);
11882 		return ret;
11883 	}
11884 
11885 	if (hdev->roce_client) {
11886 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
11887 		if (ret) {
11888 			dev_err(&pdev->dev,
11889 				"fail(%d) to re-enable roce ras interrupts\n",
11890 				ret);
11891 			return ret;
11892 		}
11893 	}
11894 
11895 	hclge_reset_vport_state(hdev);
11896 	ret = hclge_reset_vport_spoofchk(hdev);
11897 	if (ret)
11898 		return ret;
11899 
11900 	ret = hclge_resume_vf_rate(hdev);
11901 	if (ret)
11902 		return ret;
11903 
11904 	hclge_init_rxd_adv_layout(hdev);
11905 
11906 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11907 		 HCLGE_DRIVER_NAME);
11908 
11909 	return 0;
11910 }
11911 
11912 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11913 {
11914 	struct hclge_dev *hdev = ae_dev->priv;
11915 	struct hclge_mac *mac = &hdev->hw.mac;
11916 
11917 	hclge_reset_vf_rate(hdev);
11918 	hclge_clear_vf_vlan(hdev);
11919 	hclge_misc_affinity_teardown(hdev);
11920 	hclge_state_uninit(hdev);
11921 	hclge_uninit_rxd_adv_layout(hdev);
11922 	hclge_uninit_mac_table(hdev);
11923 	hclge_del_all_fd_entries(hdev);
11924 
11925 	if (mac->phydev)
11926 		mdiobus_unregister(mac->mdio_bus);
11927 
11928 	/* Disable MISC vector(vector0) */
11929 	hclge_enable_vector(&hdev->misc_vector, false);
11930 	synchronize_irq(hdev->misc_vector.vector_irq);
11931 
11932 	/* Disable all hw interrupts */
11933 	hclge_config_mac_tnl_int(hdev, false);
11934 	hclge_config_nic_hw_error(hdev, false);
11935 	hclge_config_rocee_ras_interrupt(hdev, false);
11936 
11937 	hclge_cmd_uninit(hdev);
11938 	hclge_misc_irq_uninit(hdev);
11939 	hclge_pci_uninit(hdev);
11940 	mutex_destroy(&hdev->vport_lock);
11941 	hclge_uninit_vport_vlan_table(hdev);
11942 	ae_dev->priv = NULL;
11943 }
11944 
11945 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11946 {
11947 	struct hclge_vport *vport = hclge_get_vport(handle);
11948 	struct hclge_dev *hdev = vport->back;
11949 
11950 	return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11951 }
11952 
11953 static void hclge_get_channels(struct hnae3_handle *handle,
11954 			       struct ethtool_channels *ch)
11955 {
11956 	ch->max_combined = hclge_get_max_channels(handle);
11957 	ch->other_count = 1;
11958 	ch->max_other = 1;
11959 	ch->combined_count = handle->kinfo.rss_size;
11960 }
11961 
11962 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11963 					u16 *alloc_tqps, u16 *max_rss_size)
11964 {
11965 	struct hclge_vport *vport = hclge_get_vport(handle);
11966 	struct hclge_dev *hdev = vport->back;
11967 
11968 	*alloc_tqps = vport->alloc_tqps;
11969 	*max_rss_size = hdev->pf_rss_size_max;
11970 }
11971 
11972 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11973 			      bool rxfh_configured)
11974 {
11975 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11976 	struct hclge_vport *vport = hclge_get_vport(handle);
11977 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11978 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11979 	struct hclge_dev *hdev = vport->back;
11980 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11981 	u16 cur_rss_size = kinfo->rss_size;
11982 	u16 cur_tqps = kinfo->num_tqps;
11983 	u16 tc_valid[HCLGE_MAX_TC_NUM];
11984 	u16 roundup_size;
11985 	u32 *rss_indir;
11986 	unsigned int i;
11987 	int ret;
11988 
11989 	kinfo->req_rss_size = new_tqps_num;
11990 
11991 	ret = hclge_tm_vport_map_update(hdev);
11992 	if (ret) {
11993 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11994 		return ret;
11995 	}
11996 
11997 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
11998 	roundup_size = ilog2(roundup_size);
11999 	/* Set the RSS TC mode according to the new RSS size */
12000 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12001 		tc_valid[i] = 0;
12002 
12003 		if (!(hdev->hw_tc_map & BIT(i)))
12004 			continue;
12005 
12006 		tc_valid[i] = 1;
12007 		tc_size[i] = roundup_size;
12008 		tc_offset[i] = kinfo->rss_size * i;
12009 	}
12010 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12011 	if (ret)
12012 		return ret;
12013 
12014 	/* RSS indirection table has been configured by user */
12015 	if (rxfh_configured)
12016 		goto out;
12017 
12018 	/* Reinitializes the rss indirect table according to the new RSS size */
12019 	rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12020 			    GFP_KERNEL);
12021 	if (!rss_indir)
12022 		return -ENOMEM;
12023 
12024 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12025 		rss_indir[i] = i % kinfo->rss_size;
12026 
12027 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12028 	if (ret)
12029 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12030 			ret);
12031 
12032 	kfree(rss_indir);
12033 
12034 out:
12035 	if (!ret)
12036 		dev_info(&hdev->pdev->dev,
12037 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12038 			 cur_rss_size, kinfo->rss_size,
12039 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12040 
12041 	return ret;
12042 }
12043 
12044 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12045 			      u32 *regs_num_64_bit)
12046 {
12047 	struct hclge_desc desc;
12048 	u32 total_num;
12049 	int ret;
12050 
12051 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12052 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12053 	if (ret) {
12054 		dev_err(&hdev->pdev->dev,
12055 			"Query register number cmd failed, ret = %d.\n", ret);
12056 		return ret;
12057 	}
12058 
12059 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
12060 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
12061 
12062 	total_num = *regs_num_32_bit + *regs_num_64_bit;
12063 	if (!total_num)
12064 		return -EINVAL;
12065 
12066 	return 0;
12067 }
12068 
12069 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12070 				 void *data)
12071 {
12072 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12073 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12074 
12075 	struct hclge_desc *desc;
12076 	u32 *reg_val = data;
12077 	__le32 *desc_data;
12078 	int nodata_num;
12079 	int cmd_num;
12080 	int i, k, n;
12081 	int ret;
12082 
12083 	if (regs_num == 0)
12084 		return 0;
12085 
12086 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12087 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12088 			       HCLGE_32_BIT_REG_RTN_DATANUM);
12089 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12090 	if (!desc)
12091 		return -ENOMEM;
12092 
12093 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12094 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12095 	if (ret) {
12096 		dev_err(&hdev->pdev->dev,
12097 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
12098 		kfree(desc);
12099 		return ret;
12100 	}
12101 
12102 	for (i = 0; i < cmd_num; i++) {
12103 		if (i == 0) {
12104 			desc_data = (__le32 *)(&desc[i].data[0]);
12105 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12106 		} else {
12107 			desc_data = (__le32 *)(&desc[i]);
12108 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
12109 		}
12110 		for (k = 0; k < n; k++) {
12111 			*reg_val++ = le32_to_cpu(*desc_data++);
12112 
12113 			regs_num--;
12114 			if (!regs_num)
12115 				break;
12116 		}
12117 	}
12118 
12119 	kfree(desc);
12120 	return 0;
12121 }
12122 
12123 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12124 				 void *data)
12125 {
12126 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12127 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12128 
12129 	struct hclge_desc *desc;
12130 	u64 *reg_val = data;
12131 	__le64 *desc_data;
12132 	int nodata_len;
12133 	int cmd_num;
12134 	int i, k, n;
12135 	int ret;
12136 
12137 	if (regs_num == 0)
12138 		return 0;
12139 
12140 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12141 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12142 			       HCLGE_64_BIT_REG_RTN_DATANUM);
12143 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12144 	if (!desc)
12145 		return -ENOMEM;
12146 
12147 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12148 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12149 	if (ret) {
12150 		dev_err(&hdev->pdev->dev,
12151 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
12152 		kfree(desc);
12153 		return ret;
12154 	}
12155 
12156 	for (i = 0; i < cmd_num; i++) {
12157 		if (i == 0) {
12158 			desc_data = (__le64 *)(&desc[i].data[0]);
12159 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12160 		} else {
12161 			desc_data = (__le64 *)(&desc[i]);
12162 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
12163 		}
12164 		for (k = 0; k < n; k++) {
12165 			*reg_val++ = le64_to_cpu(*desc_data++);
12166 
12167 			regs_num--;
12168 			if (!regs_num)
12169 				break;
12170 		}
12171 	}
12172 
12173 	kfree(desc);
12174 	return 0;
12175 }
12176 
12177 #define MAX_SEPARATE_NUM	4
12178 #define SEPARATOR_VALUE		0xFDFCFBFA
12179 #define REG_NUM_PER_LINE	4
12180 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
12181 #define REG_SEPARATOR_LINE	1
12182 #define REG_NUM_REMAIN_MASK	3
12183 
12184 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12185 {
12186 	int i;
12187 
12188 	/* initialize command BD except the last one */
12189 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12190 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12191 					   true);
12192 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12193 	}
12194 
12195 	/* initialize the last command BD */
12196 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12197 
12198 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12199 }
12200 
12201 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12202 				    int *bd_num_list,
12203 				    u32 type_num)
12204 {
12205 	u32 entries_per_desc, desc_index, index, offset, i;
12206 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12207 	int ret;
12208 
12209 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
12210 	if (ret) {
12211 		dev_err(&hdev->pdev->dev,
12212 			"Get dfx bd num fail, status is %d.\n", ret);
12213 		return ret;
12214 	}
12215 
12216 	entries_per_desc = ARRAY_SIZE(desc[0].data);
12217 	for (i = 0; i < type_num; i++) {
12218 		offset = hclge_dfx_bd_offset_list[i];
12219 		index = offset % entries_per_desc;
12220 		desc_index = offset / entries_per_desc;
12221 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12222 	}
12223 
12224 	return ret;
12225 }
12226 
12227 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12228 				  struct hclge_desc *desc_src, int bd_num,
12229 				  enum hclge_opcode_type cmd)
12230 {
12231 	struct hclge_desc *desc = desc_src;
12232 	int i, ret;
12233 
12234 	hclge_cmd_setup_basic_desc(desc, cmd, true);
12235 	for (i = 0; i < bd_num - 1; i++) {
12236 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12237 		desc++;
12238 		hclge_cmd_setup_basic_desc(desc, cmd, true);
12239 	}
12240 
12241 	desc = desc_src;
12242 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12243 	if (ret)
12244 		dev_err(&hdev->pdev->dev,
12245 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12246 			cmd, ret);
12247 
12248 	return ret;
12249 }
12250 
12251 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12252 				    void *data)
12253 {
12254 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12255 	struct hclge_desc *desc = desc_src;
12256 	u32 *reg = data;
12257 
12258 	entries_per_desc = ARRAY_SIZE(desc->data);
12259 	reg_num = entries_per_desc * bd_num;
12260 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12261 	for (i = 0; i < reg_num; i++) {
12262 		index = i % entries_per_desc;
12263 		desc_index = i / entries_per_desc;
12264 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
12265 	}
12266 	for (i = 0; i < separator_num; i++)
12267 		*reg++ = SEPARATOR_VALUE;
12268 
12269 	return reg_num + separator_num;
12270 }
12271 
12272 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12273 {
12274 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12275 	int data_len_per_desc, bd_num, i;
12276 	int *bd_num_list;
12277 	u32 data_len;
12278 	int ret;
12279 
12280 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12281 	if (!bd_num_list)
12282 		return -ENOMEM;
12283 
12284 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12285 	if (ret) {
12286 		dev_err(&hdev->pdev->dev,
12287 			"Get dfx reg bd num fail, status is %d.\n", ret);
12288 		goto out;
12289 	}
12290 
12291 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
12292 	*len = 0;
12293 	for (i = 0; i < dfx_reg_type_num; i++) {
12294 		bd_num = bd_num_list[i];
12295 		data_len = data_len_per_desc * bd_num;
12296 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12297 	}
12298 
12299 out:
12300 	kfree(bd_num_list);
12301 	return ret;
12302 }
12303 
12304 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12305 {
12306 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12307 	int bd_num, bd_num_max, buf_len, i;
12308 	struct hclge_desc *desc_src;
12309 	int *bd_num_list;
12310 	u32 *reg = data;
12311 	int ret;
12312 
12313 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12314 	if (!bd_num_list)
12315 		return -ENOMEM;
12316 
12317 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12318 	if (ret) {
12319 		dev_err(&hdev->pdev->dev,
12320 			"Get dfx reg bd num fail, status is %d.\n", ret);
12321 		goto out;
12322 	}
12323 
12324 	bd_num_max = bd_num_list[0];
12325 	for (i = 1; i < dfx_reg_type_num; i++)
12326 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12327 
12328 	buf_len = sizeof(*desc_src) * bd_num_max;
12329 	desc_src = kzalloc(buf_len, GFP_KERNEL);
12330 	if (!desc_src) {
12331 		ret = -ENOMEM;
12332 		goto out;
12333 	}
12334 
12335 	for (i = 0; i < dfx_reg_type_num; i++) {
12336 		bd_num = bd_num_list[i];
12337 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12338 					     hclge_dfx_reg_opcode_list[i]);
12339 		if (ret) {
12340 			dev_err(&hdev->pdev->dev,
12341 				"Get dfx reg fail, status is %d.\n", ret);
12342 			break;
12343 		}
12344 
12345 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12346 	}
12347 
12348 	kfree(desc_src);
12349 out:
12350 	kfree(bd_num_list);
12351 	return ret;
12352 }
12353 
12354 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12355 			      struct hnae3_knic_private_info *kinfo)
12356 {
12357 #define HCLGE_RING_REG_OFFSET		0x200
12358 #define HCLGE_RING_INT_REG_OFFSET	0x4
12359 
12360 	int i, j, reg_num, separator_num;
12361 	int data_num_sum;
12362 	u32 *reg = data;
12363 
12364 	/* fetching per-PF registers valus from PF PCIe register space */
12365 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12366 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12367 	for (i = 0; i < reg_num; i++)
12368 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12369 	for (i = 0; i < separator_num; i++)
12370 		*reg++ = SEPARATOR_VALUE;
12371 	data_num_sum = reg_num + separator_num;
12372 
12373 	reg_num = ARRAY_SIZE(common_reg_addr_list);
12374 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12375 	for (i = 0; i < reg_num; i++)
12376 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12377 	for (i = 0; i < separator_num; i++)
12378 		*reg++ = SEPARATOR_VALUE;
12379 	data_num_sum += reg_num + separator_num;
12380 
12381 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
12382 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12383 	for (j = 0; j < kinfo->num_tqps; j++) {
12384 		for (i = 0; i < reg_num; i++)
12385 			*reg++ = hclge_read_dev(&hdev->hw,
12386 						ring_reg_addr_list[i] +
12387 						HCLGE_RING_REG_OFFSET * j);
12388 		for (i = 0; i < separator_num; i++)
12389 			*reg++ = SEPARATOR_VALUE;
12390 	}
12391 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12392 
12393 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12394 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12395 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
12396 		for (i = 0; i < reg_num; i++)
12397 			*reg++ = hclge_read_dev(&hdev->hw,
12398 						tqp_intr_reg_addr_list[i] +
12399 						HCLGE_RING_INT_REG_OFFSET * j);
12400 		for (i = 0; i < separator_num; i++)
12401 			*reg++ = SEPARATOR_VALUE;
12402 	}
12403 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12404 
12405 	return data_num_sum;
12406 }
12407 
12408 static int hclge_get_regs_len(struct hnae3_handle *handle)
12409 {
12410 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12411 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12412 	struct hclge_vport *vport = hclge_get_vport(handle);
12413 	struct hclge_dev *hdev = vport->back;
12414 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12415 	int regs_lines_32_bit, regs_lines_64_bit;
12416 	int ret;
12417 
12418 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12419 	if (ret) {
12420 		dev_err(&hdev->pdev->dev,
12421 			"Get register number failed, ret = %d.\n", ret);
12422 		return ret;
12423 	}
12424 
12425 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12426 	if (ret) {
12427 		dev_err(&hdev->pdev->dev,
12428 			"Get dfx reg len failed, ret = %d.\n", ret);
12429 		return ret;
12430 	}
12431 
12432 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12433 		REG_SEPARATOR_LINE;
12434 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12435 		REG_SEPARATOR_LINE;
12436 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12437 		REG_SEPARATOR_LINE;
12438 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12439 		REG_SEPARATOR_LINE;
12440 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12441 		REG_SEPARATOR_LINE;
12442 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12443 		REG_SEPARATOR_LINE;
12444 
12445 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12446 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12447 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12448 }
12449 
12450 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12451 			   void *data)
12452 {
12453 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12454 	struct hclge_vport *vport = hclge_get_vport(handle);
12455 	struct hclge_dev *hdev = vport->back;
12456 	u32 regs_num_32_bit, regs_num_64_bit;
12457 	int i, reg_num, separator_num, ret;
12458 	u32 *reg = data;
12459 
12460 	*version = hdev->fw_version;
12461 
12462 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12463 	if (ret) {
12464 		dev_err(&hdev->pdev->dev,
12465 			"Get register number failed, ret = %d.\n", ret);
12466 		return;
12467 	}
12468 
12469 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12470 
12471 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12472 	if (ret) {
12473 		dev_err(&hdev->pdev->dev,
12474 			"Get 32 bit register failed, ret = %d.\n", ret);
12475 		return;
12476 	}
12477 	reg_num = regs_num_32_bit;
12478 	reg += reg_num;
12479 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12480 	for (i = 0; i < separator_num; i++)
12481 		*reg++ = SEPARATOR_VALUE;
12482 
12483 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12484 	if (ret) {
12485 		dev_err(&hdev->pdev->dev,
12486 			"Get 64 bit register failed, ret = %d.\n", ret);
12487 		return;
12488 	}
12489 	reg_num = regs_num_64_bit * 2;
12490 	reg += reg_num;
12491 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12492 	for (i = 0; i < separator_num; i++)
12493 		*reg++ = SEPARATOR_VALUE;
12494 
12495 	ret = hclge_get_dfx_reg(hdev, reg);
12496 	if (ret)
12497 		dev_err(&hdev->pdev->dev,
12498 			"Get dfx register failed, ret = %d.\n", ret);
12499 }
12500 
12501 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12502 {
12503 	struct hclge_set_led_state_cmd *req;
12504 	struct hclge_desc desc;
12505 	int ret;
12506 
12507 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12508 
12509 	req = (struct hclge_set_led_state_cmd *)desc.data;
12510 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12511 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12512 
12513 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12514 	if (ret)
12515 		dev_err(&hdev->pdev->dev,
12516 			"Send set led state cmd error, ret =%d\n", ret);
12517 
12518 	return ret;
12519 }
12520 
12521 enum hclge_led_status {
12522 	HCLGE_LED_OFF,
12523 	HCLGE_LED_ON,
12524 	HCLGE_LED_NO_CHANGE = 0xFF,
12525 };
12526 
12527 static int hclge_set_led_id(struct hnae3_handle *handle,
12528 			    enum ethtool_phys_id_state status)
12529 {
12530 	struct hclge_vport *vport = hclge_get_vport(handle);
12531 	struct hclge_dev *hdev = vport->back;
12532 
12533 	switch (status) {
12534 	case ETHTOOL_ID_ACTIVE:
12535 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
12536 	case ETHTOOL_ID_INACTIVE:
12537 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12538 	default:
12539 		return -EINVAL;
12540 	}
12541 }
12542 
12543 static void hclge_get_link_mode(struct hnae3_handle *handle,
12544 				unsigned long *supported,
12545 				unsigned long *advertising)
12546 {
12547 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12548 	struct hclge_vport *vport = hclge_get_vport(handle);
12549 	struct hclge_dev *hdev = vport->back;
12550 	unsigned int idx = 0;
12551 
12552 	for (; idx < size; idx++) {
12553 		supported[idx] = hdev->hw.mac.supported[idx];
12554 		advertising[idx] = hdev->hw.mac.advertising[idx];
12555 	}
12556 }
12557 
12558 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12559 {
12560 	struct hclge_vport *vport = hclge_get_vport(handle);
12561 	struct hclge_dev *hdev = vport->back;
12562 
12563 	return hclge_config_gro(hdev, enable);
12564 }
12565 
12566 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12567 {
12568 	struct hclge_vport *vport = &hdev->vport[0];
12569 	struct hnae3_handle *handle = &vport->nic;
12570 	u8 tmp_flags;
12571 	int ret;
12572 	u16 i;
12573 
12574 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12575 		set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12576 		vport->last_promisc_flags = vport->overflow_promisc_flags;
12577 	}
12578 
12579 	if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12580 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12581 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12582 					     tmp_flags & HNAE3_MPE);
12583 		if (!ret) {
12584 			clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12585 				  &vport->state);
12586 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12587 				&vport->state);
12588 		}
12589 	}
12590 
12591 	for (i = 1; i < hdev->num_alloc_vport; i++) {
12592 		bool uc_en = false;
12593 		bool mc_en = false;
12594 		bool bc_en;
12595 
12596 		vport = &hdev->vport[i];
12597 
12598 		if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12599 					&vport->state))
12600 			continue;
12601 
12602 		if (vport->vf_info.trusted) {
12603 			uc_en = vport->vf_info.request_uc_en > 0;
12604 			mc_en = vport->vf_info.request_mc_en > 0;
12605 		}
12606 		bc_en = vport->vf_info.request_bc_en > 0;
12607 
12608 		ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12609 						 mc_en, bc_en);
12610 		if (ret) {
12611 			set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12612 				&vport->state);
12613 			return;
12614 		}
12615 		hclge_set_vport_vlan_fltr_change(vport);
12616 	}
12617 }
12618 
12619 static bool hclge_module_existed(struct hclge_dev *hdev)
12620 {
12621 	struct hclge_desc desc;
12622 	u32 existed;
12623 	int ret;
12624 
12625 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12626 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12627 	if (ret) {
12628 		dev_err(&hdev->pdev->dev,
12629 			"failed to get SFP exist state, ret = %d\n", ret);
12630 		return false;
12631 	}
12632 
12633 	existed = le32_to_cpu(desc.data[0]);
12634 
12635 	return existed != 0;
12636 }
12637 
12638 /* need 6 bds(total 140 bytes) in one reading
12639  * return the number of bytes actually read, 0 means read failed.
12640  */
12641 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12642 				     u32 len, u8 *data)
12643 {
12644 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12645 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12646 	u16 read_len;
12647 	u16 copy_len;
12648 	int ret;
12649 	int i;
12650 
12651 	/* setup all 6 bds to read module eeprom info. */
12652 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12653 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12654 					   true);
12655 
12656 		/* bd0~bd4 need next flag */
12657 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12658 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12659 	}
12660 
12661 	/* setup bd0, this bd contains offset and read length. */
12662 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12663 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12664 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12665 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
12666 
12667 	ret = hclge_cmd_send(&hdev->hw, desc, i);
12668 	if (ret) {
12669 		dev_err(&hdev->pdev->dev,
12670 			"failed to get SFP eeprom info, ret = %d\n", ret);
12671 		return 0;
12672 	}
12673 
12674 	/* copy sfp info from bd0 to out buffer. */
12675 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12676 	memcpy(data, sfp_info_bd0->data, copy_len);
12677 	read_len = copy_len;
12678 
12679 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
12680 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12681 		if (read_len >= len)
12682 			return read_len;
12683 
12684 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12685 		memcpy(data + read_len, desc[i].data, copy_len);
12686 		read_len += copy_len;
12687 	}
12688 
12689 	return read_len;
12690 }
12691 
12692 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12693 				   u32 len, u8 *data)
12694 {
12695 	struct hclge_vport *vport = hclge_get_vport(handle);
12696 	struct hclge_dev *hdev = vport->back;
12697 	u32 read_len = 0;
12698 	u16 data_len;
12699 
12700 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12701 		return -EOPNOTSUPP;
12702 
12703 	if (!hclge_module_existed(hdev))
12704 		return -ENXIO;
12705 
12706 	while (read_len < len) {
12707 		data_len = hclge_get_sfp_eeprom_info(hdev,
12708 						     offset + read_len,
12709 						     len - read_len,
12710 						     data + read_len);
12711 		if (!data_len)
12712 			return -EIO;
12713 
12714 		read_len += data_len;
12715 	}
12716 
12717 	return 0;
12718 }
12719 
12720 static const struct hnae3_ae_ops hclge_ops = {
12721 	.init_ae_dev = hclge_init_ae_dev,
12722 	.uninit_ae_dev = hclge_uninit_ae_dev,
12723 	.reset_prepare = hclge_reset_prepare_general,
12724 	.reset_done = hclge_reset_done,
12725 	.init_client_instance = hclge_init_client_instance,
12726 	.uninit_client_instance = hclge_uninit_client_instance,
12727 	.map_ring_to_vector = hclge_map_ring_to_vector,
12728 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12729 	.get_vector = hclge_get_vector,
12730 	.put_vector = hclge_put_vector,
12731 	.set_promisc_mode = hclge_set_promisc_mode,
12732 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
12733 	.set_loopback = hclge_set_loopback,
12734 	.start = hclge_ae_start,
12735 	.stop = hclge_ae_stop,
12736 	.client_start = hclge_client_start,
12737 	.client_stop = hclge_client_stop,
12738 	.get_status = hclge_get_status,
12739 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
12740 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12741 	.get_media_type = hclge_get_media_type,
12742 	.check_port_speed = hclge_check_port_speed,
12743 	.get_fec = hclge_get_fec,
12744 	.set_fec = hclge_set_fec,
12745 	.get_rss_key_size = hclge_get_rss_key_size,
12746 	.get_rss = hclge_get_rss,
12747 	.set_rss = hclge_set_rss,
12748 	.set_rss_tuple = hclge_set_rss_tuple,
12749 	.get_rss_tuple = hclge_get_rss_tuple,
12750 	.get_tc_size = hclge_get_tc_size,
12751 	.get_mac_addr = hclge_get_mac_addr,
12752 	.set_mac_addr = hclge_set_mac_addr,
12753 	.do_ioctl = hclge_do_ioctl,
12754 	.add_uc_addr = hclge_add_uc_addr,
12755 	.rm_uc_addr = hclge_rm_uc_addr,
12756 	.add_mc_addr = hclge_add_mc_addr,
12757 	.rm_mc_addr = hclge_rm_mc_addr,
12758 	.set_autoneg = hclge_set_autoneg,
12759 	.get_autoneg = hclge_get_autoneg,
12760 	.restart_autoneg = hclge_restart_autoneg,
12761 	.halt_autoneg = hclge_halt_autoneg,
12762 	.get_pauseparam = hclge_get_pauseparam,
12763 	.set_pauseparam = hclge_set_pauseparam,
12764 	.set_mtu = hclge_set_mtu,
12765 	.reset_queue = hclge_reset_tqp,
12766 	.get_stats = hclge_get_stats,
12767 	.get_mac_stats = hclge_get_mac_stat,
12768 	.update_stats = hclge_update_stats,
12769 	.get_strings = hclge_get_strings,
12770 	.get_sset_count = hclge_get_sset_count,
12771 	.get_fw_version = hclge_get_fw_version,
12772 	.get_mdix_mode = hclge_get_mdix_mode,
12773 	.enable_vlan_filter = hclge_enable_vlan_filter,
12774 	.set_vlan_filter = hclge_set_vlan_filter,
12775 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12776 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12777 	.reset_event = hclge_reset_event,
12778 	.get_reset_level = hclge_get_reset_level,
12779 	.set_default_reset_request = hclge_set_def_reset_request,
12780 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12781 	.set_channels = hclge_set_channels,
12782 	.get_channels = hclge_get_channels,
12783 	.get_regs_len = hclge_get_regs_len,
12784 	.get_regs = hclge_get_regs,
12785 	.set_led_id = hclge_set_led_id,
12786 	.get_link_mode = hclge_get_link_mode,
12787 	.add_fd_entry = hclge_add_fd_entry,
12788 	.del_fd_entry = hclge_del_fd_entry,
12789 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12790 	.get_fd_rule_info = hclge_get_fd_rule_info,
12791 	.get_fd_all_rules = hclge_get_all_rules,
12792 	.enable_fd = hclge_enable_fd,
12793 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
12794 	.dbg_read_cmd = hclge_dbg_read_cmd,
12795 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
12796 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
12797 	.ae_dev_resetting = hclge_ae_dev_resetting,
12798 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12799 	.set_gro_en = hclge_gro_en,
12800 	.get_global_queue_id = hclge_covert_handle_qid_global,
12801 	.set_timer_task = hclge_set_timer_task,
12802 	.mac_connect_phy = hclge_mac_connect_phy,
12803 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
12804 	.get_vf_config = hclge_get_vf_config,
12805 	.set_vf_link_state = hclge_set_vf_link_state,
12806 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
12807 	.set_vf_trust = hclge_set_vf_trust,
12808 	.set_vf_rate = hclge_set_vf_rate,
12809 	.set_vf_mac = hclge_set_vf_mac,
12810 	.get_module_eeprom = hclge_get_module_eeprom,
12811 	.get_cmdq_stat = hclge_get_cmdq_stat,
12812 	.add_cls_flower = hclge_add_cls_flower,
12813 	.del_cls_flower = hclge_del_cls_flower,
12814 	.cls_flower_active = hclge_is_cls_flower_active,
12815 	.get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12816 	.set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12817 };
12818 
12819 static struct hnae3_ae_algo ae_algo = {
12820 	.ops = &hclge_ops,
12821 	.pdev_id_table = ae_algo_pci_tbl,
12822 };
12823 
12824 static int hclge_init(void)
12825 {
12826 	pr_info("%s is initializing\n", HCLGE_NAME);
12827 
12828 	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
12829 	if (!hclge_wq) {
12830 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12831 		return -ENOMEM;
12832 	}
12833 
12834 	hnae3_register_ae_algo(&ae_algo);
12835 
12836 	return 0;
12837 }
12838 
12839 static void hclge_exit(void)
12840 {
12841 	hnae3_unregister_ae_algo(&ae_algo);
12842 	destroy_workqueue(hclge_wq);
12843 }
12844 module_init(hclge_init);
12845 module_exit(hclge_exit);
12846 
12847 MODULE_LICENSE("GPL");
12848 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12849 MODULE_DESCRIPTION("HCLGE Driver");
12850 MODULE_VERSION(HCLGE_MOD_VERSION);
12851