1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26 
27 #define HCLGE_NAME			"hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 
31 #define HCLGE_BUF_SIZE_UNIT	256U
32 #define HCLGE_BUF_MUL_BY	2
33 #define HCLGE_BUF_DIV_BY	2
34 #define NEED_RESERVE_TC_NUM	2
35 #define BUF_MAX_PERCENT		100
36 #define BUF_RESERVE_PERCENT	90
37 
38 #define HCLGE_RESET_MAX_FAIL_CNT	5
39 #define HCLGE_RESET_SYNC_TIME		100
40 #define HCLGE_PF_RESET_SYNC_TIME	20
41 #define HCLGE_PF_RESET_SYNC_CNT		1500
42 
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET        1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
47 #define HCLGE_DFX_IGU_BD_OFFSET         4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
50 #define HCLGE_DFX_NCSI_BD_OFFSET        7
51 #define HCLGE_DFX_RTC_BD_OFFSET         8
52 #define HCLGE_DFX_PPP_BD_OFFSET         9
53 #define HCLGE_DFX_RCB_BD_OFFSET         10
54 #define HCLGE_DFX_TQP_BD_OFFSET         11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
56 
57 #define HCLGE_LINK_STATUS_MS	10
58 
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
67 						   unsigned long *addr);
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
69 
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73 static void hclge_sync_fd_table(struct hclge_dev *hdev);
74 
75 static struct hnae3_ae_algo ae_algo;
76 
77 static struct workqueue_struct *hclge_wq;
78 
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88 	/* required last entry */
89 	{0, }
90 };
91 
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93 
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 					 HCLGE_CMDQ_TX_ADDR_H_REG,
96 					 HCLGE_CMDQ_TX_DEPTH_REG,
97 					 HCLGE_CMDQ_TX_TAIL_REG,
98 					 HCLGE_CMDQ_TX_HEAD_REG,
99 					 HCLGE_CMDQ_RX_ADDR_L_REG,
100 					 HCLGE_CMDQ_RX_ADDR_H_REG,
101 					 HCLGE_CMDQ_RX_DEPTH_REG,
102 					 HCLGE_CMDQ_RX_TAIL_REG,
103 					 HCLGE_CMDQ_RX_HEAD_REG,
104 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 					 HCLGE_CMDQ_INTR_STS_REG,
106 					 HCLGE_CMDQ_INTR_EN_REG,
107 					 HCLGE_CMDQ_INTR_GEN_REG};
108 
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 					   HCLGE_VECTOR0_OTER_EN_REG,
111 					   HCLGE_MISC_RESET_STS_REG,
112 					   HCLGE_MISC_VECTOR_INT_STS,
113 					   HCLGE_GLOBAL_RESET_REG,
114 					   HCLGE_FUN_RST_ING,
115 					   HCLGE_GRO_EN_REG};
116 
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 					 HCLGE_RING_RX_ADDR_H_REG,
119 					 HCLGE_RING_RX_BD_NUM_REG,
120 					 HCLGE_RING_RX_BD_LENGTH_REG,
121 					 HCLGE_RING_RX_MERGE_EN_REG,
122 					 HCLGE_RING_RX_TAIL_REG,
123 					 HCLGE_RING_RX_HEAD_REG,
124 					 HCLGE_RING_RX_FBD_NUM_REG,
125 					 HCLGE_RING_RX_OFFSET_REG,
126 					 HCLGE_RING_RX_FBD_OFFSET_REG,
127 					 HCLGE_RING_RX_STASH_REG,
128 					 HCLGE_RING_RX_BD_ERR_REG,
129 					 HCLGE_RING_TX_ADDR_L_REG,
130 					 HCLGE_RING_TX_ADDR_H_REG,
131 					 HCLGE_RING_TX_BD_NUM_REG,
132 					 HCLGE_RING_TX_PRIORITY_REG,
133 					 HCLGE_RING_TX_TC_REG,
134 					 HCLGE_RING_TX_MERGE_EN_REG,
135 					 HCLGE_RING_TX_TAIL_REG,
136 					 HCLGE_RING_TX_HEAD_REG,
137 					 HCLGE_RING_TX_FBD_NUM_REG,
138 					 HCLGE_RING_TX_OFFSET_REG,
139 					 HCLGE_RING_TX_EBD_NUM_REG,
140 					 HCLGE_RING_TX_EBD_OFFSET_REG,
141 					 HCLGE_RING_TX_BD_ERR_REG,
142 					 HCLGE_RING_EN_REG};
143 
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 					     HCLGE_TQP_INTR_GL0_REG,
146 					     HCLGE_TQP_INTR_GL1_REG,
147 					     HCLGE_TQP_INTR_GL2_REG,
148 					     HCLGE_TQP_INTR_RL_REG};
149 
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 	"App    Loopback test",
152 	"Serdes serial Loopback test",
153 	"Serdes parallel Loopback test",
154 	"Phy    Loopback test"
155 };
156 
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 	{"mac_tx_mac_pause_num",
159 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 	{"mac_rx_mac_pause_num",
161 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 	{"mac_tx_control_pkt_num",
163 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 	{"mac_rx_control_pkt_num",
165 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 	{"mac_tx_pfc_pkt_num",
167 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168 	{"mac_tx_pfc_pri0_pkt_num",
169 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 	{"mac_tx_pfc_pri1_pkt_num",
171 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 	{"mac_tx_pfc_pri2_pkt_num",
173 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 	{"mac_tx_pfc_pri3_pkt_num",
175 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 	{"mac_tx_pfc_pri4_pkt_num",
177 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 	{"mac_tx_pfc_pri5_pkt_num",
179 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 	{"mac_tx_pfc_pri6_pkt_num",
181 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 	{"mac_tx_pfc_pri7_pkt_num",
183 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184 	{"mac_rx_pfc_pkt_num",
185 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186 	{"mac_rx_pfc_pri0_pkt_num",
187 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 	{"mac_rx_pfc_pri1_pkt_num",
189 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 	{"mac_rx_pfc_pri2_pkt_num",
191 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 	{"mac_rx_pfc_pri3_pkt_num",
193 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 	{"mac_rx_pfc_pri4_pkt_num",
195 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 	{"mac_rx_pfc_pri5_pkt_num",
197 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 	{"mac_rx_pfc_pri6_pkt_num",
199 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 	{"mac_rx_pfc_pri7_pkt_num",
201 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 	{"mac_tx_total_pkt_num",
203 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 	{"mac_tx_total_oct_num",
205 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 	{"mac_tx_good_pkt_num",
207 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 	{"mac_tx_bad_pkt_num",
209 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 	{"mac_tx_good_oct_num",
211 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 	{"mac_tx_bad_oct_num",
213 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 	{"mac_tx_uni_pkt_num",
215 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 	{"mac_tx_multi_pkt_num",
217 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 	{"mac_tx_broad_pkt_num",
219 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 	{"mac_tx_undersize_pkt_num",
221 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222 	{"mac_tx_oversize_pkt_num",
223 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224 	{"mac_tx_64_oct_pkt_num",
225 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 	{"mac_tx_65_127_oct_pkt_num",
227 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 	{"mac_tx_128_255_oct_pkt_num",
229 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 	{"mac_tx_256_511_oct_pkt_num",
231 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 	{"mac_tx_512_1023_oct_pkt_num",
233 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 	{"mac_tx_1024_1518_oct_pkt_num",
235 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236 	{"mac_tx_1519_2047_oct_pkt_num",
237 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 	{"mac_tx_2048_4095_oct_pkt_num",
239 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 	{"mac_tx_4096_8191_oct_pkt_num",
241 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242 	{"mac_tx_8192_9216_oct_pkt_num",
243 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 	{"mac_tx_9217_12287_oct_pkt_num",
245 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 	{"mac_tx_12288_16383_oct_pkt_num",
247 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 	{"mac_tx_1519_max_good_pkt_num",
249 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 	{"mac_tx_1519_max_bad_pkt_num",
251 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252 	{"mac_rx_total_pkt_num",
253 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 	{"mac_rx_total_oct_num",
255 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 	{"mac_rx_good_pkt_num",
257 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 	{"mac_rx_bad_pkt_num",
259 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 	{"mac_rx_good_oct_num",
261 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 	{"mac_rx_bad_oct_num",
263 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 	{"mac_rx_uni_pkt_num",
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 	{"mac_rx_multi_pkt_num",
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 	{"mac_rx_broad_pkt_num",
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 	{"mac_rx_undersize_pkt_num",
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272 	{"mac_rx_oversize_pkt_num",
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274 	{"mac_rx_64_oct_pkt_num",
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 	{"mac_rx_65_127_oct_pkt_num",
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 	{"mac_rx_128_255_oct_pkt_num",
279 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 	{"mac_rx_256_511_oct_pkt_num",
281 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 	{"mac_rx_512_1023_oct_pkt_num",
283 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 	{"mac_rx_1024_1518_oct_pkt_num",
285 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286 	{"mac_rx_1519_2047_oct_pkt_num",
287 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 	{"mac_rx_2048_4095_oct_pkt_num",
289 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 	{"mac_rx_4096_8191_oct_pkt_num",
291 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292 	{"mac_rx_8192_9216_oct_pkt_num",
293 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 	{"mac_rx_9217_12287_oct_pkt_num",
295 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 	{"mac_rx_12288_16383_oct_pkt_num",
297 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 	{"mac_rx_1519_max_good_pkt_num",
299 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 	{"mac_rx_1519_max_bad_pkt_num",
301 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302 
303 	{"mac_tx_fragment_pkt_num",
304 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 	{"mac_tx_undermin_pkt_num",
306 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 	{"mac_tx_jabber_pkt_num",
308 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 	{"mac_tx_err_all_pkt_num",
310 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 	{"mac_tx_from_app_good_pkt_num",
312 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 	{"mac_tx_from_app_bad_pkt_num",
314 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 	{"mac_rx_fragment_pkt_num",
316 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 	{"mac_rx_undermin_pkt_num",
318 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 	{"mac_rx_jabber_pkt_num",
320 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 	{"mac_rx_fcs_err_pkt_num",
322 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 	{"mac_rx_send_app_good_pkt_num",
324 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 	{"mac_rx_send_app_bad_pkt_num",
326 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
327 };
328 
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 	{
331 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
333 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334 		.i_port_bitmap = 0x1,
335 	},
336 };
337 
338 static const u8 hclge_hash_key[] = {
339 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344 };
345 
346 static const u32 hclge_dfx_bd_offset_list[] = {
347 	HCLGE_DFX_BIOS_BD_OFFSET,
348 	HCLGE_DFX_SSU_0_BD_OFFSET,
349 	HCLGE_DFX_SSU_1_BD_OFFSET,
350 	HCLGE_DFX_IGU_BD_OFFSET,
351 	HCLGE_DFX_RPU_0_BD_OFFSET,
352 	HCLGE_DFX_RPU_1_BD_OFFSET,
353 	HCLGE_DFX_NCSI_BD_OFFSET,
354 	HCLGE_DFX_RTC_BD_OFFSET,
355 	HCLGE_DFX_PPP_BD_OFFSET,
356 	HCLGE_DFX_RCB_BD_OFFSET,
357 	HCLGE_DFX_TQP_BD_OFFSET,
358 	HCLGE_DFX_SSU_2_BD_OFFSET
359 };
360 
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 	HCLGE_OPC_DFX_SSU_REG_0,
364 	HCLGE_OPC_DFX_SSU_REG_1,
365 	HCLGE_OPC_DFX_IGU_EGU_REG,
366 	HCLGE_OPC_DFX_RPU_REG_0,
367 	HCLGE_OPC_DFX_RPU_REG_1,
368 	HCLGE_OPC_DFX_NCSI_REG,
369 	HCLGE_OPC_DFX_RTC_REG,
370 	HCLGE_OPC_DFX_PPP_REG,
371 	HCLGE_OPC_DFX_RCB_REG,
372 	HCLGE_OPC_DFX_TQP_REG,
373 	HCLGE_OPC_DFX_SSU_REG_2
374 };
375 
376 static const struct key_info meta_data_key_info[] = {
377 	{ PACKET_TYPE_ID, 6},
378 	{ IP_FRAGEMENT, 1},
379 	{ ROCE_TYPE, 1},
380 	{ NEXT_KEY, 5},
381 	{ VLAN_NUMBER, 2},
382 	{ SRC_VPORT, 12},
383 	{ DST_VPORT, 12},
384 	{ TUNNEL_PACKET, 1},
385 };
386 
387 static const struct key_info tuple_key_info[] = {
388 	{ OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
389 	{ OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
390 	{ OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
391 	{ OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
392 	{ OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
393 	{ OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
394 	{ OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
395 	{ OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
396 	{ OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
397 	{ OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
398 	{ OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
399 	{ OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
400 	{ OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
401 	{ OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
402 	{ OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
403 	{ OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
404 	{ INNER_DST_MAC, 48, KEY_OPT_MAC,
405 	  offsetof(struct hclge_fd_rule, tuples.dst_mac),
406 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
407 	{ INNER_SRC_MAC, 48, KEY_OPT_MAC,
408 	  offsetof(struct hclge_fd_rule, tuples.src_mac),
409 	  offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
410 	{ INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
411 	  offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
412 	  offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
413 	{ INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
414 	{ INNER_ETH_TYPE, 16, KEY_OPT_LE16,
415 	  offsetof(struct hclge_fd_rule, tuples.ether_proto),
416 	  offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
417 	{ INNER_L2_RSV, 16, KEY_OPT_LE16,
418 	  offsetof(struct hclge_fd_rule, tuples.l2_user_def),
419 	  offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
420 	{ INNER_IP_TOS, 8, KEY_OPT_U8,
421 	  offsetof(struct hclge_fd_rule, tuples.ip_tos),
422 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
423 	{ INNER_IP_PROTO, 8, KEY_OPT_U8,
424 	  offsetof(struct hclge_fd_rule, tuples.ip_proto),
425 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
426 	{ INNER_SRC_IP, 32, KEY_OPT_IP,
427 	  offsetof(struct hclge_fd_rule, tuples.src_ip),
428 	  offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
429 	{ INNER_DST_IP, 32, KEY_OPT_IP,
430 	  offsetof(struct hclge_fd_rule, tuples.dst_ip),
431 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
432 	{ INNER_L3_RSV, 16, KEY_OPT_LE16,
433 	  offsetof(struct hclge_fd_rule, tuples.l3_user_def),
434 	  offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
435 	{ INNER_SRC_PORT, 16, KEY_OPT_LE16,
436 	  offsetof(struct hclge_fd_rule, tuples.src_port),
437 	  offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
438 	{ INNER_DST_PORT, 16, KEY_OPT_LE16,
439 	  offsetof(struct hclge_fd_rule, tuples.dst_port),
440 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
441 	{ INNER_L4_RSV, 32, KEY_OPT_LE32,
442 	  offsetof(struct hclge_fd_rule, tuples.l4_user_def),
443 	  offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
444 };
445 
446 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
447 {
448 #define HCLGE_MAC_CMD_NUM 21
449 
450 	u64 *data = (u64 *)(&hdev->mac_stats);
451 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
452 	__le64 *desc_data;
453 	int i, k, n;
454 	int ret;
455 
456 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
457 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
458 	if (ret) {
459 		dev_err(&hdev->pdev->dev,
460 			"Get MAC pkt stats fail, status = %d.\n", ret);
461 
462 		return ret;
463 	}
464 
465 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
466 		/* for special opcode 0032, only the first desc has the head */
467 		if (unlikely(i == 0)) {
468 			desc_data = (__le64 *)(&desc[i].data[0]);
469 			n = HCLGE_RD_FIRST_STATS_NUM;
470 		} else {
471 			desc_data = (__le64 *)(&desc[i]);
472 			n = HCLGE_RD_OTHER_STATS_NUM;
473 		}
474 
475 		for (k = 0; k < n; k++) {
476 			*data += le64_to_cpu(*desc_data);
477 			data++;
478 			desc_data++;
479 		}
480 	}
481 
482 	return 0;
483 }
484 
485 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
486 {
487 	u64 *data = (u64 *)(&hdev->mac_stats);
488 	struct hclge_desc *desc;
489 	__le64 *desc_data;
490 	u16 i, k, n;
491 	int ret;
492 
493 	/* This may be called inside atomic sections,
494 	 * so GFP_ATOMIC is more suitalbe here
495 	 */
496 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
497 	if (!desc)
498 		return -ENOMEM;
499 
500 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
501 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
502 	if (ret) {
503 		kfree(desc);
504 		return ret;
505 	}
506 
507 	for (i = 0; i < desc_num; i++) {
508 		/* for special opcode 0034, only the first desc has the head */
509 		if (i == 0) {
510 			desc_data = (__le64 *)(&desc[i].data[0]);
511 			n = HCLGE_RD_FIRST_STATS_NUM;
512 		} else {
513 			desc_data = (__le64 *)(&desc[i]);
514 			n = HCLGE_RD_OTHER_STATS_NUM;
515 		}
516 
517 		for (k = 0; k < n; k++) {
518 			*data += le64_to_cpu(*desc_data);
519 			data++;
520 			desc_data++;
521 		}
522 	}
523 
524 	kfree(desc);
525 
526 	return 0;
527 }
528 
529 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
530 {
531 	struct hclge_desc desc;
532 	__le32 *desc_data;
533 	u32 reg_num;
534 	int ret;
535 
536 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
537 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
538 	if (ret)
539 		return ret;
540 
541 	desc_data = (__le32 *)(&desc.data[0]);
542 	reg_num = le32_to_cpu(*desc_data);
543 
544 	*desc_num = 1 + ((reg_num - 3) >> 2) +
545 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
546 
547 	return 0;
548 }
549 
550 static int hclge_mac_update_stats(struct hclge_dev *hdev)
551 {
552 	u32 desc_num;
553 	int ret;
554 
555 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
556 	/* The firmware supports the new statistics acquisition method */
557 	if (!ret)
558 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
559 	else if (ret == -EOPNOTSUPP)
560 		ret = hclge_mac_update_stats_defective(hdev);
561 	else
562 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
563 
564 	return ret;
565 }
566 
567 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
568 {
569 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
570 	struct hclge_vport *vport = hclge_get_vport(handle);
571 	struct hclge_dev *hdev = vport->back;
572 	struct hnae3_queue *queue;
573 	struct hclge_desc desc[1];
574 	struct hclge_tqp *tqp;
575 	int ret, i;
576 
577 	for (i = 0; i < kinfo->num_tqps; i++) {
578 		queue = handle->kinfo.tqp[i];
579 		tqp = container_of(queue, struct hclge_tqp, q);
580 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
581 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
582 					   true);
583 
584 		desc[0].data[0] = cpu_to_le32(tqp->index);
585 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
586 		if (ret) {
587 			dev_err(&hdev->pdev->dev,
588 				"Query tqp stat fail, status = %d,queue = %d\n",
589 				ret, i);
590 			return ret;
591 		}
592 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
593 			le32_to_cpu(desc[0].data[1]);
594 	}
595 
596 	for (i = 0; i < kinfo->num_tqps; i++) {
597 		queue = handle->kinfo.tqp[i];
598 		tqp = container_of(queue, struct hclge_tqp, q);
599 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
600 		hclge_cmd_setup_basic_desc(&desc[0],
601 					   HCLGE_OPC_QUERY_TX_STATS,
602 					   true);
603 
604 		desc[0].data[0] = cpu_to_le32(tqp->index);
605 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
606 		if (ret) {
607 			dev_err(&hdev->pdev->dev,
608 				"Query tqp stat fail, status = %d,queue = %d\n",
609 				ret, i);
610 			return ret;
611 		}
612 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
613 			le32_to_cpu(desc[0].data[1]);
614 	}
615 
616 	return 0;
617 }
618 
619 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
620 {
621 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622 	struct hclge_tqp *tqp;
623 	u64 *buff = data;
624 	int i;
625 
626 	for (i = 0; i < kinfo->num_tqps; i++) {
627 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
628 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
629 	}
630 
631 	for (i = 0; i < kinfo->num_tqps; i++) {
632 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
633 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
634 	}
635 
636 	return buff;
637 }
638 
639 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
640 {
641 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
642 
643 	/* each tqp has TX & RX two queues */
644 	return kinfo->num_tqps * (2);
645 }
646 
647 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
648 {
649 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
650 	u8 *buff = data;
651 	int i;
652 
653 	for (i = 0; i < kinfo->num_tqps; i++) {
654 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
655 			struct hclge_tqp, q);
656 		snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
657 			 tqp->index);
658 		buff = buff + ETH_GSTRING_LEN;
659 	}
660 
661 	for (i = 0; i < kinfo->num_tqps; i++) {
662 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
663 			struct hclge_tqp, q);
664 		snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
665 			 tqp->index);
666 		buff = buff + ETH_GSTRING_LEN;
667 	}
668 
669 	return buff;
670 }
671 
672 static u64 *hclge_comm_get_stats(const void *comm_stats,
673 				 const struct hclge_comm_stats_str strs[],
674 				 int size, u64 *data)
675 {
676 	u64 *buf = data;
677 	u32 i;
678 
679 	for (i = 0; i < size; i++)
680 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
681 
682 	return buf + size;
683 }
684 
685 static u8 *hclge_comm_get_strings(u32 stringset,
686 				  const struct hclge_comm_stats_str strs[],
687 				  int size, u8 *data)
688 {
689 	char *buff = (char *)data;
690 	u32 i;
691 
692 	if (stringset != ETH_SS_STATS)
693 		return buff;
694 
695 	for (i = 0; i < size; i++) {
696 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
697 		buff = buff + ETH_GSTRING_LEN;
698 	}
699 
700 	return (u8 *)buff;
701 }
702 
703 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
704 {
705 	struct hnae3_handle *handle;
706 	int status;
707 
708 	handle = &hdev->vport[0].nic;
709 	if (handle->client) {
710 		status = hclge_tqps_update_stats(handle);
711 		if (status) {
712 			dev_err(&hdev->pdev->dev,
713 				"Update TQPS stats fail, status = %d.\n",
714 				status);
715 		}
716 	}
717 
718 	status = hclge_mac_update_stats(hdev);
719 	if (status)
720 		dev_err(&hdev->pdev->dev,
721 			"Update MAC stats fail, status = %d.\n", status);
722 }
723 
724 static void hclge_update_stats(struct hnae3_handle *handle,
725 			       struct net_device_stats *net_stats)
726 {
727 	struct hclge_vport *vport = hclge_get_vport(handle);
728 	struct hclge_dev *hdev = vport->back;
729 	int status;
730 
731 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
732 		return;
733 
734 	status = hclge_mac_update_stats(hdev);
735 	if (status)
736 		dev_err(&hdev->pdev->dev,
737 			"Update MAC stats fail, status = %d.\n",
738 			status);
739 
740 	status = hclge_tqps_update_stats(handle);
741 	if (status)
742 		dev_err(&hdev->pdev->dev,
743 			"Update TQPS stats fail, status = %d.\n",
744 			status);
745 
746 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
747 }
748 
749 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
750 {
751 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
752 		HNAE3_SUPPORT_PHY_LOOPBACK |\
753 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
754 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
755 
756 	struct hclge_vport *vport = hclge_get_vport(handle);
757 	struct hclge_dev *hdev = vport->back;
758 	int count = 0;
759 
760 	/* Loopback test support rules:
761 	 * mac: only GE mode support
762 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
763 	 * phy: only support when phy device exist on board
764 	 */
765 	if (stringset == ETH_SS_TEST) {
766 		/* clear loopback bit flags at first */
767 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
768 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
769 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
770 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
771 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
772 			count += 1;
773 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
774 		}
775 
776 		count += 2;
777 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
778 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
779 
780 		if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
781 		     hdev->hw.mac.phydev->drv->set_loopback) ||
782 		    hnae3_dev_phy_imp_supported(hdev)) {
783 			count += 1;
784 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
785 		}
786 	} else if (stringset == ETH_SS_STATS) {
787 		count = ARRAY_SIZE(g_mac_stats_string) +
788 			hclge_tqps_get_sset_count(handle, stringset);
789 	}
790 
791 	return count;
792 }
793 
794 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
795 			      u8 *data)
796 {
797 	u8 *p = (char *)data;
798 	int size;
799 
800 	if (stringset == ETH_SS_STATS) {
801 		size = ARRAY_SIZE(g_mac_stats_string);
802 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
803 					   size, p);
804 		p = hclge_tqps_get_strings(handle, p);
805 	} else if (stringset == ETH_SS_TEST) {
806 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
807 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
808 			       ETH_GSTRING_LEN);
809 			p += ETH_GSTRING_LEN;
810 		}
811 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
812 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
813 			       ETH_GSTRING_LEN);
814 			p += ETH_GSTRING_LEN;
815 		}
816 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
817 			memcpy(p,
818 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
819 			       ETH_GSTRING_LEN);
820 			p += ETH_GSTRING_LEN;
821 		}
822 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
823 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
824 			       ETH_GSTRING_LEN);
825 			p += ETH_GSTRING_LEN;
826 		}
827 	}
828 }
829 
830 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
831 {
832 	struct hclge_vport *vport = hclge_get_vport(handle);
833 	struct hclge_dev *hdev = vport->back;
834 	u64 *p;
835 
836 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
837 				 ARRAY_SIZE(g_mac_stats_string), data);
838 	p = hclge_tqps_get_stats(handle, p);
839 }
840 
841 static void hclge_get_mac_stat(struct hnae3_handle *handle,
842 			       struct hns3_mac_stats *mac_stats)
843 {
844 	struct hclge_vport *vport = hclge_get_vport(handle);
845 	struct hclge_dev *hdev = vport->back;
846 
847 	hclge_update_stats(handle, NULL);
848 
849 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
850 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
851 }
852 
853 static int hclge_parse_func_status(struct hclge_dev *hdev,
854 				   struct hclge_func_status_cmd *status)
855 {
856 #define HCLGE_MAC_ID_MASK	0xF
857 
858 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
859 		return -EINVAL;
860 
861 	/* Set the pf to main pf */
862 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
863 		hdev->flag |= HCLGE_FLAG_MAIN;
864 	else
865 		hdev->flag &= ~HCLGE_FLAG_MAIN;
866 
867 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
868 	return 0;
869 }
870 
871 static int hclge_query_function_status(struct hclge_dev *hdev)
872 {
873 #define HCLGE_QUERY_MAX_CNT	5
874 
875 	struct hclge_func_status_cmd *req;
876 	struct hclge_desc desc;
877 	int timeout = 0;
878 	int ret;
879 
880 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
881 	req = (struct hclge_func_status_cmd *)desc.data;
882 
883 	do {
884 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
885 		if (ret) {
886 			dev_err(&hdev->pdev->dev,
887 				"query function status failed %d.\n", ret);
888 			return ret;
889 		}
890 
891 		/* Check pf reset is done */
892 		if (req->pf_state)
893 			break;
894 		usleep_range(1000, 2000);
895 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
896 
897 	return hclge_parse_func_status(hdev, req);
898 }
899 
900 static int hclge_query_pf_resource(struct hclge_dev *hdev)
901 {
902 	struct hclge_pf_res_cmd *req;
903 	struct hclge_desc desc;
904 	int ret;
905 
906 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
907 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
908 	if (ret) {
909 		dev_err(&hdev->pdev->dev,
910 			"query pf resource failed %d.\n", ret);
911 		return ret;
912 	}
913 
914 	req = (struct hclge_pf_res_cmd *)desc.data;
915 	hdev->num_tqps = le16_to_cpu(req->tqp_num) +
916 			 le16_to_cpu(req->ext_tqp_num);
917 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
918 
919 	if (req->tx_buf_size)
920 		hdev->tx_buf_size =
921 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
922 	else
923 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
924 
925 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
926 
927 	if (req->dv_buf_size)
928 		hdev->dv_buf_size =
929 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
930 	else
931 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
932 
933 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
934 
935 	hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
936 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
937 		dev_err(&hdev->pdev->dev,
938 			"only %u msi resources available, not enough for pf(min:2).\n",
939 			hdev->num_nic_msi);
940 		return -EINVAL;
941 	}
942 
943 	if (hnae3_dev_roce_supported(hdev)) {
944 		hdev->num_roce_msi =
945 			le16_to_cpu(req->pf_intr_vector_number_roce);
946 
947 		/* PF should have NIC vectors and Roce vectors,
948 		 * NIC vectors are queued before Roce vectors.
949 		 */
950 		hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
951 	} else {
952 		hdev->num_msi = hdev->num_nic_msi;
953 	}
954 
955 	return 0;
956 }
957 
958 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
959 {
960 	switch (speed_cmd) {
961 	case 6:
962 		*speed = HCLGE_MAC_SPEED_10M;
963 		break;
964 	case 7:
965 		*speed = HCLGE_MAC_SPEED_100M;
966 		break;
967 	case 0:
968 		*speed = HCLGE_MAC_SPEED_1G;
969 		break;
970 	case 1:
971 		*speed = HCLGE_MAC_SPEED_10G;
972 		break;
973 	case 2:
974 		*speed = HCLGE_MAC_SPEED_25G;
975 		break;
976 	case 3:
977 		*speed = HCLGE_MAC_SPEED_40G;
978 		break;
979 	case 4:
980 		*speed = HCLGE_MAC_SPEED_50G;
981 		break;
982 	case 5:
983 		*speed = HCLGE_MAC_SPEED_100G;
984 		break;
985 	case 8:
986 		*speed = HCLGE_MAC_SPEED_200G;
987 		break;
988 	default:
989 		return -EINVAL;
990 	}
991 
992 	return 0;
993 }
994 
995 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
996 {
997 	struct hclge_vport *vport = hclge_get_vport(handle);
998 	struct hclge_dev *hdev = vport->back;
999 	u32 speed_ability = hdev->hw.mac.speed_ability;
1000 	u32 speed_bit = 0;
1001 
1002 	switch (speed) {
1003 	case HCLGE_MAC_SPEED_10M:
1004 		speed_bit = HCLGE_SUPPORT_10M_BIT;
1005 		break;
1006 	case HCLGE_MAC_SPEED_100M:
1007 		speed_bit = HCLGE_SUPPORT_100M_BIT;
1008 		break;
1009 	case HCLGE_MAC_SPEED_1G:
1010 		speed_bit = HCLGE_SUPPORT_1G_BIT;
1011 		break;
1012 	case HCLGE_MAC_SPEED_10G:
1013 		speed_bit = HCLGE_SUPPORT_10G_BIT;
1014 		break;
1015 	case HCLGE_MAC_SPEED_25G:
1016 		speed_bit = HCLGE_SUPPORT_25G_BIT;
1017 		break;
1018 	case HCLGE_MAC_SPEED_40G:
1019 		speed_bit = HCLGE_SUPPORT_40G_BIT;
1020 		break;
1021 	case HCLGE_MAC_SPEED_50G:
1022 		speed_bit = HCLGE_SUPPORT_50G_BIT;
1023 		break;
1024 	case HCLGE_MAC_SPEED_100G:
1025 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1026 		break;
1027 	case HCLGE_MAC_SPEED_200G:
1028 		speed_bit = HCLGE_SUPPORT_200G_BIT;
1029 		break;
1030 	default:
1031 		return -EINVAL;
1032 	}
1033 
1034 	if (speed_bit & speed_ability)
1035 		return 0;
1036 
1037 	return -EINVAL;
1038 }
1039 
1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1041 {
1042 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1044 				 mac->supported);
1045 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1047 				 mac->supported);
1048 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1050 				 mac->supported);
1051 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1053 				 mac->supported);
1054 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1056 				 mac->supported);
1057 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1059 				 mac->supported);
1060 }
1061 
1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1063 {
1064 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1066 				 mac->supported);
1067 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1069 				 mac->supported);
1070 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1072 				 mac->supported);
1073 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1075 				 mac->supported);
1076 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1078 				 mac->supported);
1079 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1080 		linkmode_set_bit(
1081 			ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1082 			mac->supported);
1083 }
1084 
1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1086 {
1087 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1089 				 mac->supported);
1090 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1092 				 mac->supported);
1093 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1095 				 mac->supported);
1096 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1098 				 mac->supported);
1099 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1101 				 mac->supported);
1102 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1104 				 mac->supported);
1105 }
1106 
1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1108 {
1109 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1111 				 mac->supported);
1112 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1114 				 mac->supported);
1115 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1117 				 mac->supported);
1118 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1120 				 mac->supported);
1121 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1123 				 mac->supported);
1124 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1126 				 mac->supported);
1127 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1129 				 mac->supported);
1130 }
1131 
1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1133 {
1134 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1136 
1137 	switch (mac->speed) {
1138 	case HCLGE_MAC_SPEED_10G:
1139 	case HCLGE_MAC_SPEED_40G:
1140 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1141 				 mac->supported);
1142 		mac->fec_ability =
1143 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1144 		break;
1145 	case HCLGE_MAC_SPEED_25G:
1146 	case HCLGE_MAC_SPEED_50G:
1147 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1148 				 mac->supported);
1149 		mac->fec_ability =
1150 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151 			BIT(HNAE3_FEC_AUTO);
1152 		break;
1153 	case HCLGE_MAC_SPEED_100G:
1154 	case HCLGE_MAC_SPEED_200G:
1155 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1157 		break;
1158 	default:
1159 		mac->fec_ability = 0;
1160 		break;
1161 	}
1162 }
1163 
1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1165 					u16 speed_ability)
1166 {
1167 	struct hclge_mac *mac = &hdev->hw.mac;
1168 
1169 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1171 				 mac->supported);
1172 
1173 	hclge_convert_setting_sr(mac, speed_ability);
1174 	hclge_convert_setting_lr(mac, speed_ability);
1175 	hclge_convert_setting_cr(mac, speed_ability);
1176 	if (hnae3_dev_fec_supported(hdev))
1177 		hclge_convert_setting_fec(mac);
1178 
1179 	if (hnae3_dev_pause_supported(hdev))
1180 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1181 
1182 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1184 }
1185 
1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1187 					    u16 speed_ability)
1188 {
1189 	struct hclge_mac *mac = &hdev->hw.mac;
1190 
1191 	hclge_convert_setting_kr(mac, speed_ability);
1192 	if (hnae3_dev_fec_supported(hdev))
1193 		hclge_convert_setting_fec(mac);
1194 
1195 	if (hnae3_dev_pause_supported(hdev))
1196 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1197 
1198 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1200 }
1201 
1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1203 					 u16 speed_ability)
1204 {
1205 	unsigned long *supported = hdev->hw.mac.supported;
1206 
1207 	/* default to support all speed for GE port */
1208 	if (!speed_ability)
1209 		speed_ability = HCLGE_SUPPORT_GE;
1210 
1211 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1213 				 supported);
1214 
1215 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1217 				 supported);
1218 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1219 				 supported);
1220 	}
1221 
1222 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1225 	}
1226 
1227 	if (hnae3_dev_pause_supported(hdev)) {
1228 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229 		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1230 	}
1231 
1232 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1234 }
1235 
1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1237 {
1238 	u8 media_type = hdev->hw.mac.media_type;
1239 
1240 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1242 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243 		hclge_parse_copper_link_mode(hdev, speed_ability);
1244 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1246 }
1247 
1248 static u32 hclge_get_max_speed(u16 speed_ability)
1249 {
1250 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251 		return HCLGE_MAC_SPEED_200G;
1252 
1253 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254 		return HCLGE_MAC_SPEED_100G;
1255 
1256 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257 		return HCLGE_MAC_SPEED_50G;
1258 
1259 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260 		return HCLGE_MAC_SPEED_40G;
1261 
1262 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263 		return HCLGE_MAC_SPEED_25G;
1264 
1265 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266 		return HCLGE_MAC_SPEED_10G;
1267 
1268 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269 		return HCLGE_MAC_SPEED_1G;
1270 
1271 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272 		return HCLGE_MAC_SPEED_100M;
1273 
1274 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275 		return HCLGE_MAC_SPEED_10M;
1276 
1277 	return HCLGE_MAC_SPEED_1G;
1278 }
1279 
1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1281 {
1282 #define SPEED_ABILITY_EXT_SHIFT			8
1283 
1284 	struct hclge_cfg_param_cmd *req;
1285 	u64 mac_addr_tmp_high;
1286 	u16 speed_ability_ext;
1287 	u64 mac_addr_tmp;
1288 	unsigned int i;
1289 
1290 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1291 
1292 	/* get the configuration */
1293 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1294 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1295 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1296 					    HCLGE_CFG_TQP_DESC_N_M,
1297 					    HCLGE_CFG_TQP_DESC_N_S);
1298 
1299 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1300 					HCLGE_CFG_PHY_ADDR_M,
1301 					HCLGE_CFG_PHY_ADDR_S);
1302 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1303 					  HCLGE_CFG_MEDIA_TP_M,
1304 					  HCLGE_CFG_MEDIA_TP_S);
1305 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1306 					  HCLGE_CFG_RX_BUF_LEN_M,
1307 					  HCLGE_CFG_RX_BUF_LEN_S);
1308 	/* get mac_address */
1309 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1310 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1311 					    HCLGE_CFG_MAC_ADDR_H_M,
1312 					    HCLGE_CFG_MAC_ADDR_H_S);
1313 
1314 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1315 
1316 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1317 					     HCLGE_CFG_DEFAULT_SPEED_M,
1318 					     HCLGE_CFG_DEFAULT_SPEED_S);
1319 	cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1320 					       HCLGE_CFG_RSS_SIZE_M,
1321 					       HCLGE_CFG_RSS_SIZE_S);
1322 
1323 	for (i = 0; i < ETH_ALEN; i++)
1324 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1325 
1326 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1327 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1328 
1329 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1330 					     HCLGE_CFG_SPEED_ABILITY_M,
1331 					     HCLGE_CFG_SPEED_ABILITY_S);
1332 	speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1333 					    HCLGE_CFG_SPEED_ABILITY_EXT_M,
1334 					    HCLGE_CFG_SPEED_ABILITY_EXT_S);
1335 	cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1336 
1337 	cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1338 					       HCLGE_CFG_VLAN_FLTR_CAP_M,
1339 					       HCLGE_CFG_VLAN_FLTR_CAP_S);
1340 
1341 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1342 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1343 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1344 	if (!cfg->umv_space)
1345 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1346 
1347 	cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1348 					       HCLGE_CFG_PF_RSS_SIZE_M,
1349 					       HCLGE_CFG_PF_RSS_SIZE_S);
1350 
1351 	/* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1352 	 * power of 2, instead of reading out directly. This would
1353 	 * be more flexible for future changes and expansions.
1354 	 * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1355 	 * it does not make sense if PF's field is 0. In this case, PF and VF
1356 	 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1357 	 */
1358 	cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1359 			       1U << cfg->pf_rss_size_max :
1360 			       cfg->vf_rss_size_max;
1361 }
1362 
1363 /* hclge_get_cfg: query the static parameter from flash
1364  * @hdev: pointer to struct hclge_dev
1365  * @hcfg: the config structure to be getted
1366  */
1367 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1368 {
1369 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1370 	struct hclge_cfg_param_cmd *req;
1371 	unsigned int i;
1372 	int ret;
1373 
1374 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1375 		u32 offset = 0;
1376 
1377 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1378 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1379 					   true);
1380 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1381 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1382 		/* Len should be united by 4 bytes when send to hardware */
1383 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1384 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1385 		req->offset = cpu_to_le32(offset);
1386 	}
1387 
1388 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1389 	if (ret) {
1390 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1391 		return ret;
1392 	}
1393 
1394 	hclge_parse_cfg(hcfg, desc);
1395 
1396 	return 0;
1397 }
1398 
1399 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1400 {
1401 #define HCLGE_MAX_NON_TSO_BD_NUM			8U
1402 
1403 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1404 
1405 	ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1406 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1407 	ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1408 	ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1409 	ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1410 	ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1411 	ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1412 }
1413 
1414 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1415 				  struct hclge_desc *desc)
1416 {
1417 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1418 	struct hclge_dev_specs_0_cmd *req0;
1419 	struct hclge_dev_specs_1_cmd *req1;
1420 
1421 	req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1422 	req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1423 
1424 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1425 	ae_dev->dev_specs.rss_ind_tbl_size =
1426 		le16_to_cpu(req0->rss_ind_tbl_size);
1427 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1428 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1429 	ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1430 	ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1431 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1432 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1433 }
1434 
1435 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1436 {
1437 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1438 
1439 	if (!dev_specs->max_non_tso_bd_num)
1440 		dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1441 	if (!dev_specs->rss_ind_tbl_size)
1442 		dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1443 	if (!dev_specs->rss_key_size)
1444 		dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1445 	if (!dev_specs->max_tm_rate)
1446 		dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1447 	if (!dev_specs->max_qset_num)
1448 		dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1449 	if (!dev_specs->max_int_gl)
1450 		dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1451 	if (!dev_specs->max_frm_size)
1452 		dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1453 }
1454 
1455 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1456 {
1457 	struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1458 	int ret;
1459 	int i;
1460 
1461 	/* set default specifications as devices lower than version V3 do not
1462 	 * support querying specifications from firmware.
1463 	 */
1464 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1465 		hclge_set_default_dev_specs(hdev);
1466 		return 0;
1467 	}
1468 
1469 	for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1470 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1471 					   true);
1472 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1473 	}
1474 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1475 
1476 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1477 	if (ret)
1478 		return ret;
1479 
1480 	hclge_parse_dev_specs(hdev, desc);
1481 	hclge_check_dev_specs(hdev);
1482 
1483 	return 0;
1484 }
1485 
1486 static int hclge_get_cap(struct hclge_dev *hdev)
1487 {
1488 	int ret;
1489 
1490 	ret = hclge_query_function_status(hdev);
1491 	if (ret) {
1492 		dev_err(&hdev->pdev->dev,
1493 			"query function status error %d.\n", ret);
1494 		return ret;
1495 	}
1496 
1497 	/* get pf resource */
1498 	return hclge_query_pf_resource(hdev);
1499 }
1500 
1501 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1502 {
1503 #define HCLGE_MIN_TX_DESC	64
1504 #define HCLGE_MIN_RX_DESC	64
1505 
1506 	if (!is_kdump_kernel())
1507 		return;
1508 
1509 	dev_info(&hdev->pdev->dev,
1510 		 "Running kdump kernel. Using minimal resources\n");
1511 
1512 	/* minimal queue pairs equals to the number of vports */
1513 	hdev->num_tqps = hdev->num_req_vfs + 1;
1514 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1515 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1516 }
1517 
1518 static int hclge_configure(struct hclge_dev *hdev)
1519 {
1520 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1521 	struct hclge_cfg cfg;
1522 	unsigned int i;
1523 	int ret;
1524 
1525 	ret = hclge_get_cfg(hdev, &cfg);
1526 	if (ret)
1527 		return ret;
1528 
1529 	hdev->base_tqp_pid = 0;
1530 	hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1531 	hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1532 	hdev->rx_buf_len = cfg.rx_buf_len;
1533 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1534 	hdev->hw.mac.media_type = cfg.media_type;
1535 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1536 	hdev->num_tx_desc = cfg.tqp_desc_num;
1537 	hdev->num_rx_desc = cfg.tqp_desc_num;
1538 	hdev->tm_info.num_pg = 1;
1539 	hdev->tc_max = cfg.tc_num;
1540 	hdev->tm_info.hw_pfc_map = 0;
1541 	hdev->wanted_umv_size = cfg.umv_space;
1542 	if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1543 		set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1544 
1545 	if (hnae3_dev_fd_supported(hdev)) {
1546 		hdev->fd_en = true;
1547 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1548 	}
1549 
1550 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1551 	if (ret) {
1552 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1553 			cfg.default_speed, ret);
1554 		return ret;
1555 	}
1556 
1557 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1558 
1559 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1560 
1561 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1562 	    (hdev->tc_max < 1)) {
1563 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1564 			 hdev->tc_max);
1565 		hdev->tc_max = 1;
1566 	}
1567 
1568 	/* Dev does not support DCB */
1569 	if (!hnae3_dev_dcb_supported(hdev)) {
1570 		hdev->tc_max = 1;
1571 		hdev->pfc_max = 0;
1572 	} else {
1573 		hdev->pfc_max = hdev->tc_max;
1574 	}
1575 
1576 	hdev->tm_info.num_tc = 1;
1577 
1578 	/* Currently not support uncontiuous tc */
1579 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1580 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1581 
1582 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1583 
1584 	hclge_init_kdump_kernel_config(hdev);
1585 
1586 	/* Set the init affinity based on pci func number */
1587 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1588 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1589 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1590 			&hdev->affinity_mask);
1591 
1592 	return ret;
1593 }
1594 
1595 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1596 			    u16 tso_mss_max)
1597 {
1598 	struct hclge_cfg_tso_status_cmd *req;
1599 	struct hclge_desc desc;
1600 
1601 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1602 
1603 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1604 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1605 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1606 
1607 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1608 }
1609 
1610 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1611 {
1612 	struct hclge_cfg_gro_status_cmd *req;
1613 	struct hclge_desc desc;
1614 	int ret;
1615 
1616 	if (!hnae3_dev_gro_supported(hdev))
1617 		return 0;
1618 
1619 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1620 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1621 
1622 	req->gro_en = en ? 1 : 0;
1623 
1624 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1625 	if (ret)
1626 		dev_err(&hdev->pdev->dev,
1627 			"GRO hardware config cmd failed, ret = %d\n", ret);
1628 
1629 	return ret;
1630 }
1631 
1632 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1633 {
1634 	struct hclge_tqp *tqp;
1635 	int i;
1636 
1637 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1638 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1639 	if (!hdev->htqp)
1640 		return -ENOMEM;
1641 
1642 	tqp = hdev->htqp;
1643 
1644 	for (i = 0; i < hdev->num_tqps; i++) {
1645 		tqp->dev = &hdev->pdev->dev;
1646 		tqp->index = i;
1647 
1648 		tqp->q.ae_algo = &ae_algo;
1649 		tqp->q.buf_size = hdev->rx_buf_len;
1650 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1651 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1652 
1653 		/* need an extended offset to configure queues >=
1654 		 * HCLGE_TQP_MAX_SIZE_DEV_V2
1655 		 */
1656 		if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1657 			tqp->q.io_base = hdev->hw.io_base +
1658 					 HCLGE_TQP_REG_OFFSET +
1659 					 i * HCLGE_TQP_REG_SIZE;
1660 		else
1661 			tqp->q.io_base = hdev->hw.io_base +
1662 					 HCLGE_TQP_REG_OFFSET +
1663 					 HCLGE_TQP_EXT_REG_OFFSET +
1664 					 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1665 					 HCLGE_TQP_REG_SIZE;
1666 
1667 		tqp++;
1668 	}
1669 
1670 	return 0;
1671 }
1672 
1673 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1674 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1675 {
1676 	struct hclge_tqp_map_cmd *req;
1677 	struct hclge_desc desc;
1678 	int ret;
1679 
1680 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1681 
1682 	req = (struct hclge_tqp_map_cmd *)desc.data;
1683 	req->tqp_id = cpu_to_le16(tqp_pid);
1684 	req->tqp_vf = func_id;
1685 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1686 	if (!is_pf)
1687 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1688 	req->tqp_vid = cpu_to_le16(tqp_vid);
1689 
1690 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1691 	if (ret)
1692 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1693 
1694 	return ret;
1695 }
1696 
1697 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1698 {
1699 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1700 	struct hclge_dev *hdev = vport->back;
1701 	int i, alloced;
1702 
1703 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1704 	     alloced < num_tqps; i++) {
1705 		if (!hdev->htqp[i].alloced) {
1706 			hdev->htqp[i].q.handle = &vport->nic;
1707 			hdev->htqp[i].q.tqp_index = alloced;
1708 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1709 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1710 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1711 			hdev->htqp[i].alloced = true;
1712 			alloced++;
1713 		}
1714 	}
1715 	vport->alloc_tqps = alloced;
1716 	kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1717 				vport->alloc_tqps / hdev->tm_info.num_tc);
1718 
1719 	/* ensure one to one mapping between irq and queue at default */
1720 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1721 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1722 
1723 	return 0;
1724 }
1725 
1726 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1727 			    u16 num_tx_desc, u16 num_rx_desc)
1728 
1729 {
1730 	struct hnae3_handle *nic = &vport->nic;
1731 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1732 	struct hclge_dev *hdev = vport->back;
1733 	int ret;
1734 
1735 	kinfo->num_tx_desc = num_tx_desc;
1736 	kinfo->num_rx_desc = num_rx_desc;
1737 
1738 	kinfo->rx_buf_len = hdev->rx_buf_len;
1739 
1740 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1741 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1742 	if (!kinfo->tqp)
1743 		return -ENOMEM;
1744 
1745 	ret = hclge_assign_tqp(vport, num_tqps);
1746 	if (ret)
1747 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1748 
1749 	return ret;
1750 }
1751 
1752 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1753 				  struct hclge_vport *vport)
1754 {
1755 	struct hnae3_handle *nic = &vport->nic;
1756 	struct hnae3_knic_private_info *kinfo;
1757 	u16 i;
1758 
1759 	kinfo = &nic->kinfo;
1760 	for (i = 0; i < vport->alloc_tqps; i++) {
1761 		struct hclge_tqp *q =
1762 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1763 		bool is_pf;
1764 		int ret;
1765 
1766 		is_pf = !(vport->vport_id);
1767 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1768 					     i, is_pf);
1769 		if (ret)
1770 			return ret;
1771 	}
1772 
1773 	return 0;
1774 }
1775 
1776 static int hclge_map_tqp(struct hclge_dev *hdev)
1777 {
1778 	struct hclge_vport *vport = hdev->vport;
1779 	u16 i, num_vport;
1780 
1781 	num_vport = hdev->num_req_vfs + 1;
1782 	for (i = 0; i < num_vport; i++)	{
1783 		int ret;
1784 
1785 		ret = hclge_map_tqp_to_vport(hdev, vport);
1786 		if (ret)
1787 			return ret;
1788 
1789 		vport++;
1790 	}
1791 
1792 	return 0;
1793 }
1794 
1795 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1796 {
1797 	struct hnae3_handle *nic = &vport->nic;
1798 	struct hclge_dev *hdev = vport->back;
1799 	int ret;
1800 
1801 	nic->pdev = hdev->pdev;
1802 	nic->ae_algo = &ae_algo;
1803 	nic->numa_node_mask = hdev->numa_node_mask;
1804 
1805 	ret = hclge_knic_setup(vport, num_tqps,
1806 			       hdev->num_tx_desc, hdev->num_rx_desc);
1807 	if (ret)
1808 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1809 
1810 	return ret;
1811 }
1812 
1813 static int hclge_alloc_vport(struct hclge_dev *hdev)
1814 {
1815 	struct pci_dev *pdev = hdev->pdev;
1816 	struct hclge_vport *vport;
1817 	u32 tqp_main_vport;
1818 	u32 tqp_per_vport;
1819 	int num_vport, i;
1820 	int ret;
1821 
1822 	/* We need to alloc a vport for main NIC of PF */
1823 	num_vport = hdev->num_req_vfs + 1;
1824 
1825 	if (hdev->num_tqps < num_vport) {
1826 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1827 			hdev->num_tqps, num_vport);
1828 		return -EINVAL;
1829 	}
1830 
1831 	/* Alloc the same number of TQPs for every vport */
1832 	tqp_per_vport = hdev->num_tqps / num_vport;
1833 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1834 
1835 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1836 			     GFP_KERNEL);
1837 	if (!vport)
1838 		return -ENOMEM;
1839 
1840 	hdev->vport = vport;
1841 	hdev->num_alloc_vport = num_vport;
1842 
1843 	if (IS_ENABLED(CONFIG_PCI_IOV))
1844 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1845 
1846 	for (i = 0; i < num_vport; i++) {
1847 		vport->back = hdev;
1848 		vport->vport_id = i;
1849 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1850 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1851 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1852 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1853 		vport->req_vlan_fltr_en = true;
1854 		INIT_LIST_HEAD(&vport->vlan_list);
1855 		INIT_LIST_HEAD(&vport->uc_mac_list);
1856 		INIT_LIST_HEAD(&vport->mc_mac_list);
1857 		spin_lock_init(&vport->mac_list_lock);
1858 
1859 		if (i == 0)
1860 			ret = hclge_vport_setup(vport, tqp_main_vport);
1861 		else
1862 			ret = hclge_vport_setup(vport, tqp_per_vport);
1863 		if (ret) {
1864 			dev_err(&pdev->dev,
1865 				"vport setup failed for vport %d, %d\n",
1866 				i, ret);
1867 			return ret;
1868 		}
1869 
1870 		vport++;
1871 	}
1872 
1873 	return 0;
1874 }
1875 
1876 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1877 				    struct hclge_pkt_buf_alloc *buf_alloc)
1878 {
1879 /* TX buffer size is unit by 128 byte */
1880 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1881 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1882 	struct hclge_tx_buff_alloc_cmd *req;
1883 	struct hclge_desc desc;
1884 	int ret;
1885 	u8 i;
1886 
1887 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1888 
1889 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1890 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1891 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1892 
1893 		req->tx_pkt_buff[i] =
1894 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1895 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1896 	}
1897 
1898 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1899 	if (ret)
1900 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1901 			ret);
1902 
1903 	return ret;
1904 }
1905 
1906 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1907 				 struct hclge_pkt_buf_alloc *buf_alloc)
1908 {
1909 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1910 
1911 	if (ret)
1912 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1913 
1914 	return ret;
1915 }
1916 
1917 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1918 {
1919 	unsigned int i;
1920 	u32 cnt = 0;
1921 
1922 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1923 		if (hdev->hw_tc_map & BIT(i))
1924 			cnt++;
1925 	return cnt;
1926 }
1927 
1928 /* Get the number of pfc enabled TCs, which have private buffer */
1929 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1930 				  struct hclge_pkt_buf_alloc *buf_alloc)
1931 {
1932 	struct hclge_priv_buf *priv;
1933 	unsigned int i;
1934 	int cnt = 0;
1935 
1936 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1937 		priv = &buf_alloc->priv_buf[i];
1938 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1939 		    priv->enable)
1940 			cnt++;
1941 	}
1942 
1943 	return cnt;
1944 }
1945 
1946 /* Get the number of pfc disabled TCs, which have private buffer */
1947 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1948 				     struct hclge_pkt_buf_alloc *buf_alloc)
1949 {
1950 	struct hclge_priv_buf *priv;
1951 	unsigned int i;
1952 	int cnt = 0;
1953 
1954 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1955 		priv = &buf_alloc->priv_buf[i];
1956 		if (hdev->hw_tc_map & BIT(i) &&
1957 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1958 		    priv->enable)
1959 			cnt++;
1960 	}
1961 
1962 	return cnt;
1963 }
1964 
1965 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1966 {
1967 	struct hclge_priv_buf *priv;
1968 	u32 rx_priv = 0;
1969 	int i;
1970 
1971 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1972 		priv = &buf_alloc->priv_buf[i];
1973 		if (priv->enable)
1974 			rx_priv += priv->buf_size;
1975 	}
1976 	return rx_priv;
1977 }
1978 
1979 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1980 {
1981 	u32 i, total_tx_size = 0;
1982 
1983 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1984 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1985 
1986 	return total_tx_size;
1987 }
1988 
1989 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1990 				struct hclge_pkt_buf_alloc *buf_alloc,
1991 				u32 rx_all)
1992 {
1993 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1994 	u32 tc_num = hclge_get_tc_num(hdev);
1995 	u32 shared_buf, aligned_mps;
1996 	u32 rx_priv;
1997 	int i;
1998 
1999 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2000 
2001 	if (hnae3_dev_dcb_supported(hdev))
2002 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2003 					hdev->dv_buf_size;
2004 	else
2005 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2006 					+ hdev->dv_buf_size;
2007 
2008 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2009 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2010 			     HCLGE_BUF_SIZE_UNIT);
2011 
2012 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2013 	if (rx_all < rx_priv + shared_std)
2014 		return false;
2015 
2016 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2017 	buf_alloc->s_buf.buf_size = shared_buf;
2018 	if (hnae3_dev_dcb_supported(hdev)) {
2019 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2020 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2021 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2022 				  HCLGE_BUF_SIZE_UNIT);
2023 	} else {
2024 		buf_alloc->s_buf.self.high = aligned_mps +
2025 						HCLGE_NON_DCB_ADDITIONAL_BUF;
2026 		buf_alloc->s_buf.self.low = aligned_mps;
2027 	}
2028 
2029 	if (hnae3_dev_dcb_supported(hdev)) {
2030 		hi_thrd = shared_buf - hdev->dv_buf_size;
2031 
2032 		if (tc_num <= NEED_RESERVE_TC_NUM)
2033 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2034 					/ BUF_MAX_PERCENT;
2035 
2036 		if (tc_num)
2037 			hi_thrd = hi_thrd / tc_num;
2038 
2039 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2040 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2041 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2042 	} else {
2043 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2044 		lo_thrd = aligned_mps;
2045 	}
2046 
2047 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2048 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2049 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2050 	}
2051 
2052 	return true;
2053 }
2054 
2055 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2056 				struct hclge_pkt_buf_alloc *buf_alloc)
2057 {
2058 	u32 i, total_size;
2059 
2060 	total_size = hdev->pkt_buf_size;
2061 
2062 	/* alloc tx buffer for all enabled tc */
2063 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2064 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2065 
2066 		if (hdev->hw_tc_map & BIT(i)) {
2067 			if (total_size < hdev->tx_buf_size)
2068 				return -ENOMEM;
2069 
2070 			priv->tx_buf_size = hdev->tx_buf_size;
2071 		} else {
2072 			priv->tx_buf_size = 0;
2073 		}
2074 
2075 		total_size -= priv->tx_buf_size;
2076 	}
2077 
2078 	return 0;
2079 }
2080 
2081 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2082 				  struct hclge_pkt_buf_alloc *buf_alloc)
2083 {
2084 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2085 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2086 	unsigned int i;
2087 
2088 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2089 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2090 
2091 		priv->enable = 0;
2092 		priv->wl.low = 0;
2093 		priv->wl.high = 0;
2094 		priv->buf_size = 0;
2095 
2096 		if (!(hdev->hw_tc_map & BIT(i)))
2097 			continue;
2098 
2099 		priv->enable = 1;
2100 
2101 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2102 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2103 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
2104 						HCLGE_BUF_SIZE_UNIT);
2105 		} else {
2106 			priv->wl.low = 0;
2107 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2108 					aligned_mps;
2109 		}
2110 
2111 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2112 	}
2113 
2114 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2115 }
2116 
2117 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2118 					  struct hclge_pkt_buf_alloc *buf_alloc)
2119 {
2120 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2121 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2122 	int i;
2123 
2124 	/* let the last to be cleared first */
2125 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2126 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2127 		unsigned int mask = BIT((unsigned int)i);
2128 
2129 		if (hdev->hw_tc_map & mask &&
2130 		    !(hdev->tm_info.hw_pfc_map & mask)) {
2131 			/* Clear the no pfc TC private buffer */
2132 			priv->wl.low = 0;
2133 			priv->wl.high = 0;
2134 			priv->buf_size = 0;
2135 			priv->enable = 0;
2136 			no_pfc_priv_num--;
2137 		}
2138 
2139 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2140 		    no_pfc_priv_num == 0)
2141 			break;
2142 	}
2143 
2144 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2145 }
2146 
2147 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2148 					struct hclge_pkt_buf_alloc *buf_alloc)
2149 {
2150 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2151 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2152 	int i;
2153 
2154 	/* let the last to be cleared first */
2155 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2156 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2157 		unsigned int mask = BIT((unsigned int)i);
2158 
2159 		if (hdev->hw_tc_map & mask &&
2160 		    hdev->tm_info.hw_pfc_map & mask) {
2161 			/* Reduce the number of pfc TC with private buffer */
2162 			priv->wl.low = 0;
2163 			priv->enable = 0;
2164 			priv->wl.high = 0;
2165 			priv->buf_size = 0;
2166 			pfc_priv_num--;
2167 		}
2168 
2169 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2170 		    pfc_priv_num == 0)
2171 			break;
2172 	}
2173 
2174 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2175 }
2176 
2177 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2178 				      struct hclge_pkt_buf_alloc *buf_alloc)
2179 {
2180 #define COMPENSATE_BUFFER	0x3C00
2181 #define COMPENSATE_HALF_MPS_NUM	5
2182 #define PRIV_WL_GAP		0x1800
2183 
2184 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2185 	u32 tc_num = hclge_get_tc_num(hdev);
2186 	u32 half_mps = hdev->mps >> 1;
2187 	u32 min_rx_priv;
2188 	unsigned int i;
2189 
2190 	if (tc_num)
2191 		rx_priv = rx_priv / tc_num;
2192 
2193 	if (tc_num <= NEED_RESERVE_TC_NUM)
2194 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2195 
2196 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2197 			COMPENSATE_HALF_MPS_NUM * half_mps;
2198 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2199 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2200 	if (rx_priv < min_rx_priv)
2201 		return false;
2202 
2203 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2204 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2205 
2206 		priv->enable = 0;
2207 		priv->wl.low = 0;
2208 		priv->wl.high = 0;
2209 		priv->buf_size = 0;
2210 
2211 		if (!(hdev->hw_tc_map & BIT(i)))
2212 			continue;
2213 
2214 		priv->enable = 1;
2215 		priv->buf_size = rx_priv;
2216 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2217 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2218 	}
2219 
2220 	buf_alloc->s_buf.buf_size = 0;
2221 
2222 	return true;
2223 }
2224 
2225 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2226  * @hdev: pointer to struct hclge_dev
2227  * @buf_alloc: pointer to buffer calculation data
2228  * @return: 0: calculate successful, negative: fail
2229  */
2230 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2231 				struct hclge_pkt_buf_alloc *buf_alloc)
2232 {
2233 	/* When DCB is not supported, rx private buffer is not allocated. */
2234 	if (!hnae3_dev_dcb_supported(hdev)) {
2235 		u32 rx_all = hdev->pkt_buf_size;
2236 
2237 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2238 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2239 			return -ENOMEM;
2240 
2241 		return 0;
2242 	}
2243 
2244 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2245 		return 0;
2246 
2247 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2248 		return 0;
2249 
2250 	/* try to decrease the buffer size */
2251 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2252 		return 0;
2253 
2254 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2255 		return 0;
2256 
2257 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2258 		return 0;
2259 
2260 	return -ENOMEM;
2261 }
2262 
2263 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2264 				   struct hclge_pkt_buf_alloc *buf_alloc)
2265 {
2266 	struct hclge_rx_priv_buff_cmd *req;
2267 	struct hclge_desc desc;
2268 	int ret;
2269 	int i;
2270 
2271 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2272 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2273 
2274 	/* Alloc private buffer TCs */
2275 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2276 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2277 
2278 		req->buf_num[i] =
2279 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2280 		req->buf_num[i] |=
2281 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2282 	}
2283 
2284 	req->shared_buf =
2285 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2286 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2287 
2288 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2289 	if (ret)
2290 		dev_err(&hdev->pdev->dev,
2291 			"rx private buffer alloc cmd failed %d\n", ret);
2292 
2293 	return ret;
2294 }
2295 
2296 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2297 				   struct hclge_pkt_buf_alloc *buf_alloc)
2298 {
2299 	struct hclge_rx_priv_wl_buf *req;
2300 	struct hclge_priv_buf *priv;
2301 	struct hclge_desc desc[2];
2302 	int i, j;
2303 	int ret;
2304 
2305 	for (i = 0; i < 2; i++) {
2306 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2307 					   false);
2308 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2309 
2310 		/* The first descriptor set the NEXT bit to 1 */
2311 		if (i == 0)
2312 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2313 		else
2314 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2315 
2316 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2317 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2318 
2319 			priv = &buf_alloc->priv_buf[idx];
2320 			req->tc_wl[j].high =
2321 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2322 			req->tc_wl[j].high |=
2323 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2324 			req->tc_wl[j].low =
2325 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2326 			req->tc_wl[j].low |=
2327 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2328 		}
2329 	}
2330 
2331 	/* Send 2 descriptor at one time */
2332 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2333 	if (ret)
2334 		dev_err(&hdev->pdev->dev,
2335 			"rx private waterline config cmd failed %d\n",
2336 			ret);
2337 	return ret;
2338 }
2339 
2340 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2341 				    struct hclge_pkt_buf_alloc *buf_alloc)
2342 {
2343 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2344 	struct hclge_rx_com_thrd *req;
2345 	struct hclge_desc desc[2];
2346 	struct hclge_tc_thrd *tc;
2347 	int i, j;
2348 	int ret;
2349 
2350 	for (i = 0; i < 2; i++) {
2351 		hclge_cmd_setup_basic_desc(&desc[i],
2352 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2353 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2354 
2355 		/* The first descriptor set the NEXT bit to 1 */
2356 		if (i == 0)
2357 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2358 		else
2359 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2360 
2361 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2362 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2363 
2364 			req->com_thrd[j].high =
2365 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2366 			req->com_thrd[j].high |=
2367 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2368 			req->com_thrd[j].low =
2369 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2370 			req->com_thrd[j].low |=
2371 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2372 		}
2373 	}
2374 
2375 	/* Send 2 descriptors at one time */
2376 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2377 	if (ret)
2378 		dev_err(&hdev->pdev->dev,
2379 			"common threshold config cmd failed %d\n", ret);
2380 	return ret;
2381 }
2382 
2383 static int hclge_common_wl_config(struct hclge_dev *hdev,
2384 				  struct hclge_pkt_buf_alloc *buf_alloc)
2385 {
2386 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2387 	struct hclge_rx_com_wl *req;
2388 	struct hclge_desc desc;
2389 	int ret;
2390 
2391 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2392 
2393 	req = (struct hclge_rx_com_wl *)desc.data;
2394 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2395 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2396 
2397 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2398 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2399 
2400 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2401 	if (ret)
2402 		dev_err(&hdev->pdev->dev,
2403 			"common waterline config cmd failed %d\n", ret);
2404 
2405 	return ret;
2406 }
2407 
2408 int hclge_buffer_alloc(struct hclge_dev *hdev)
2409 {
2410 	struct hclge_pkt_buf_alloc *pkt_buf;
2411 	int ret;
2412 
2413 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2414 	if (!pkt_buf)
2415 		return -ENOMEM;
2416 
2417 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2418 	if (ret) {
2419 		dev_err(&hdev->pdev->dev,
2420 			"could not calc tx buffer size for all TCs %d\n", ret);
2421 		goto out;
2422 	}
2423 
2424 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2425 	if (ret) {
2426 		dev_err(&hdev->pdev->dev,
2427 			"could not alloc tx buffers %d\n", ret);
2428 		goto out;
2429 	}
2430 
2431 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2432 	if (ret) {
2433 		dev_err(&hdev->pdev->dev,
2434 			"could not calc rx priv buffer size for all TCs %d\n",
2435 			ret);
2436 		goto out;
2437 	}
2438 
2439 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2440 	if (ret) {
2441 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2442 			ret);
2443 		goto out;
2444 	}
2445 
2446 	if (hnae3_dev_dcb_supported(hdev)) {
2447 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2448 		if (ret) {
2449 			dev_err(&hdev->pdev->dev,
2450 				"could not configure rx private waterline %d\n",
2451 				ret);
2452 			goto out;
2453 		}
2454 
2455 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2456 		if (ret) {
2457 			dev_err(&hdev->pdev->dev,
2458 				"could not configure common threshold %d\n",
2459 				ret);
2460 			goto out;
2461 		}
2462 	}
2463 
2464 	ret = hclge_common_wl_config(hdev, pkt_buf);
2465 	if (ret)
2466 		dev_err(&hdev->pdev->dev,
2467 			"could not configure common waterline %d\n", ret);
2468 
2469 out:
2470 	kfree(pkt_buf);
2471 	return ret;
2472 }
2473 
2474 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2475 {
2476 	struct hnae3_handle *roce = &vport->roce;
2477 	struct hnae3_handle *nic = &vport->nic;
2478 	struct hclge_dev *hdev = vport->back;
2479 
2480 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2481 
2482 	if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2483 		return -EINVAL;
2484 
2485 	roce->rinfo.base_vector = hdev->roce_base_vector;
2486 
2487 	roce->rinfo.netdev = nic->kinfo.netdev;
2488 	roce->rinfo.roce_io_base = hdev->hw.io_base;
2489 	roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2490 
2491 	roce->pdev = nic->pdev;
2492 	roce->ae_algo = nic->ae_algo;
2493 	roce->numa_node_mask = nic->numa_node_mask;
2494 
2495 	return 0;
2496 }
2497 
2498 static int hclge_init_msi(struct hclge_dev *hdev)
2499 {
2500 	struct pci_dev *pdev = hdev->pdev;
2501 	int vectors;
2502 	int i;
2503 
2504 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2505 					hdev->num_msi,
2506 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2507 	if (vectors < 0) {
2508 		dev_err(&pdev->dev,
2509 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2510 			vectors);
2511 		return vectors;
2512 	}
2513 	if (vectors < hdev->num_msi)
2514 		dev_warn(&hdev->pdev->dev,
2515 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2516 			 hdev->num_msi, vectors);
2517 
2518 	hdev->num_msi = vectors;
2519 	hdev->num_msi_left = vectors;
2520 
2521 	hdev->base_msi_vector = pdev->irq;
2522 	hdev->roce_base_vector = hdev->base_msi_vector +
2523 				hdev->num_nic_msi;
2524 
2525 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2526 					   sizeof(u16), GFP_KERNEL);
2527 	if (!hdev->vector_status) {
2528 		pci_free_irq_vectors(pdev);
2529 		return -ENOMEM;
2530 	}
2531 
2532 	for (i = 0; i < hdev->num_msi; i++)
2533 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2534 
2535 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2536 					sizeof(int), GFP_KERNEL);
2537 	if (!hdev->vector_irq) {
2538 		pci_free_irq_vectors(pdev);
2539 		return -ENOMEM;
2540 	}
2541 
2542 	return 0;
2543 }
2544 
2545 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2546 {
2547 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2548 		duplex = HCLGE_MAC_FULL;
2549 
2550 	return duplex;
2551 }
2552 
2553 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2554 				      u8 duplex)
2555 {
2556 	struct hclge_config_mac_speed_dup_cmd *req;
2557 	struct hclge_desc desc;
2558 	int ret;
2559 
2560 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2561 
2562 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2563 
2564 	if (duplex)
2565 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2566 
2567 	switch (speed) {
2568 	case HCLGE_MAC_SPEED_10M:
2569 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2570 				HCLGE_CFG_SPEED_S, 6);
2571 		break;
2572 	case HCLGE_MAC_SPEED_100M:
2573 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2574 				HCLGE_CFG_SPEED_S, 7);
2575 		break;
2576 	case HCLGE_MAC_SPEED_1G:
2577 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2578 				HCLGE_CFG_SPEED_S, 0);
2579 		break;
2580 	case HCLGE_MAC_SPEED_10G:
2581 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2582 				HCLGE_CFG_SPEED_S, 1);
2583 		break;
2584 	case HCLGE_MAC_SPEED_25G:
2585 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2586 				HCLGE_CFG_SPEED_S, 2);
2587 		break;
2588 	case HCLGE_MAC_SPEED_40G:
2589 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2590 				HCLGE_CFG_SPEED_S, 3);
2591 		break;
2592 	case HCLGE_MAC_SPEED_50G:
2593 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2594 				HCLGE_CFG_SPEED_S, 4);
2595 		break;
2596 	case HCLGE_MAC_SPEED_100G:
2597 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2598 				HCLGE_CFG_SPEED_S, 5);
2599 		break;
2600 	case HCLGE_MAC_SPEED_200G:
2601 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2602 				HCLGE_CFG_SPEED_S, 8);
2603 		break;
2604 	default:
2605 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2606 		return -EINVAL;
2607 	}
2608 
2609 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2610 		      1);
2611 
2612 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2613 	if (ret) {
2614 		dev_err(&hdev->pdev->dev,
2615 			"mac speed/duplex config cmd failed %d.\n", ret);
2616 		return ret;
2617 	}
2618 
2619 	return 0;
2620 }
2621 
2622 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2623 {
2624 	struct hclge_mac *mac = &hdev->hw.mac;
2625 	int ret;
2626 
2627 	duplex = hclge_check_speed_dup(duplex, speed);
2628 	if (!mac->support_autoneg && mac->speed == speed &&
2629 	    mac->duplex == duplex)
2630 		return 0;
2631 
2632 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2633 	if (ret)
2634 		return ret;
2635 
2636 	hdev->hw.mac.speed = speed;
2637 	hdev->hw.mac.duplex = duplex;
2638 
2639 	return 0;
2640 }
2641 
2642 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2643 				     u8 duplex)
2644 {
2645 	struct hclge_vport *vport = hclge_get_vport(handle);
2646 	struct hclge_dev *hdev = vport->back;
2647 
2648 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2649 }
2650 
2651 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2652 {
2653 	struct hclge_config_auto_neg_cmd *req;
2654 	struct hclge_desc desc;
2655 	u32 flag = 0;
2656 	int ret;
2657 
2658 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2659 
2660 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2661 	if (enable)
2662 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2663 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2664 
2665 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2666 	if (ret)
2667 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2668 			ret);
2669 
2670 	return ret;
2671 }
2672 
2673 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2674 {
2675 	struct hclge_vport *vport = hclge_get_vport(handle);
2676 	struct hclge_dev *hdev = vport->back;
2677 
2678 	if (!hdev->hw.mac.support_autoneg) {
2679 		if (enable) {
2680 			dev_err(&hdev->pdev->dev,
2681 				"autoneg is not supported by current port\n");
2682 			return -EOPNOTSUPP;
2683 		} else {
2684 			return 0;
2685 		}
2686 	}
2687 
2688 	return hclge_set_autoneg_en(hdev, enable);
2689 }
2690 
2691 static int hclge_get_autoneg(struct hnae3_handle *handle)
2692 {
2693 	struct hclge_vport *vport = hclge_get_vport(handle);
2694 	struct hclge_dev *hdev = vport->back;
2695 	struct phy_device *phydev = hdev->hw.mac.phydev;
2696 
2697 	if (phydev)
2698 		return phydev->autoneg;
2699 
2700 	return hdev->hw.mac.autoneg;
2701 }
2702 
2703 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2704 {
2705 	struct hclge_vport *vport = hclge_get_vport(handle);
2706 	struct hclge_dev *hdev = vport->back;
2707 	int ret;
2708 
2709 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2710 
2711 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2712 	if (ret)
2713 		return ret;
2714 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2715 }
2716 
2717 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2718 {
2719 	struct hclge_vport *vport = hclge_get_vport(handle);
2720 	struct hclge_dev *hdev = vport->back;
2721 
2722 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2723 		return hclge_set_autoneg_en(hdev, !halt);
2724 
2725 	return 0;
2726 }
2727 
2728 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2729 {
2730 	struct hclge_config_fec_cmd *req;
2731 	struct hclge_desc desc;
2732 	int ret;
2733 
2734 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2735 
2736 	req = (struct hclge_config_fec_cmd *)desc.data;
2737 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2738 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2739 	if (fec_mode & BIT(HNAE3_FEC_RS))
2740 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2741 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2742 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2743 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2744 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2745 
2746 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2747 	if (ret)
2748 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2749 
2750 	return ret;
2751 }
2752 
2753 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2754 {
2755 	struct hclge_vport *vport = hclge_get_vport(handle);
2756 	struct hclge_dev *hdev = vport->back;
2757 	struct hclge_mac *mac = &hdev->hw.mac;
2758 	int ret;
2759 
2760 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2761 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2762 		return -EINVAL;
2763 	}
2764 
2765 	ret = hclge_set_fec_hw(hdev, fec_mode);
2766 	if (ret)
2767 		return ret;
2768 
2769 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2770 	return 0;
2771 }
2772 
2773 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2774 			  u8 *fec_mode)
2775 {
2776 	struct hclge_vport *vport = hclge_get_vport(handle);
2777 	struct hclge_dev *hdev = vport->back;
2778 	struct hclge_mac *mac = &hdev->hw.mac;
2779 
2780 	if (fec_ability)
2781 		*fec_ability = mac->fec_ability;
2782 	if (fec_mode)
2783 		*fec_mode = mac->fec_mode;
2784 }
2785 
2786 static int hclge_mac_init(struct hclge_dev *hdev)
2787 {
2788 	struct hclge_mac *mac = &hdev->hw.mac;
2789 	int ret;
2790 
2791 	hdev->support_sfp_query = true;
2792 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2793 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2794 					 hdev->hw.mac.duplex);
2795 	if (ret)
2796 		return ret;
2797 
2798 	if (hdev->hw.mac.support_autoneg) {
2799 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2800 		if (ret)
2801 			return ret;
2802 	}
2803 
2804 	mac->link = 0;
2805 
2806 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2807 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2808 		if (ret)
2809 			return ret;
2810 	}
2811 
2812 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2813 	if (ret) {
2814 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2815 		return ret;
2816 	}
2817 
2818 	ret = hclge_set_default_loopback(hdev);
2819 	if (ret)
2820 		return ret;
2821 
2822 	ret = hclge_buffer_alloc(hdev);
2823 	if (ret)
2824 		dev_err(&hdev->pdev->dev,
2825 			"allocate buffer fail, ret=%d\n", ret);
2826 
2827 	return ret;
2828 }
2829 
2830 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2831 {
2832 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2833 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2834 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2835 				    hclge_wq, &hdev->service_task, 0);
2836 }
2837 
2838 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2839 {
2840 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2841 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2842 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2843 				    hclge_wq, &hdev->service_task, 0);
2844 }
2845 
2846 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2847 {
2848 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2849 	    !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2850 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2851 				    hclge_wq, &hdev->service_task, 0);
2852 }
2853 
2854 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2855 {
2856 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2857 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2858 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2859 				    hclge_wq, &hdev->service_task,
2860 				    delay_time);
2861 }
2862 
2863 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2864 {
2865 	struct hclge_link_status_cmd *req;
2866 	struct hclge_desc desc;
2867 	int ret;
2868 
2869 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2870 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2871 	if (ret) {
2872 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2873 			ret);
2874 		return ret;
2875 	}
2876 
2877 	req = (struct hclge_link_status_cmd *)desc.data;
2878 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2879 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2880 
2881 	return 0;
2882 }
2883 
2884 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2885 {
2886 	struct phy_device *phydev = hdev->hw.mac.phydev;
2887 
2888 	*link_status = HCLGE_LINK_STATUS_DOWN;
2889 
2890 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2891 		return 0;
2892 
2893 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2894 		return 0;
2895 
2896 	return hclge_get_mac_link_status(hdev, link_status);
2897 }
2898 
2899 static void hclge_push_link_status(struct hclge_dev *hdev)
2900 {
2901 	struct hclge_vport *vport;
2902 	int ret;
2903 	u16 i;
2904 
2905 	for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2906 		vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2907 
2908 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2909 		    vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2910 			continue;
2911 
2912 		ret = hclge_push_vf_link_status(vport);
2913 		if (ret) {
2914 			dev_err(&hdev->pdev->dev,
2915 				"failed to push link status to vf%u, ret = %d\n",
2916 				i, ret);
2917 		}
2918 	}
2919 }
2920 
2921 static void hclge_update_link_status(struct hclge_dev *hdev)
2922 {
2923 	struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2924 	struct hnae3_handle *handle = &hdev->vport[0].nic;
2925 	struct hnae3_client *rclient = hdev->roce_client;
2926 	struct hnae3_client *client = hdev->nic_client;
2927 	int state;
2928 	int ret;
2929 
2930 	if (!client)
2931 		return;
2932 
2933 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2934 		return;
2935 
2936 	ret = hclge_get_mac_phy_link(hdev, &state);
2937 	if (ret) {
2938 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2939 		return;
2940 	}
2941 
2942 	if (state != hdev->hw.mac.link) {
2943 		client->ops->link_status_change(handle, state);
2944 		hclge_config_mac_tnl_int(hdev, state);
2945 		if (rclient && rclient->ops->link_status_change)
2946 			rclient->ops->link_status_change(rhandle, state);
2947 
2948 		hdev->hw.mac.link = state;
2949 		hclge_push_link_status(hdev);
2950 	}
2951 
2952 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2953 }
2954 
2955 static void hclge_update_port_capability(struct hclge_dev *hdev,
2956 					 struct hclge_mac *mac)
2957 {
2958 	if (hnae3_dev_fec_supported(hdev))
2959 		/* update fec ability by speed */
2960 		hclge_convert_setting_fec(mac);
2961 
2962 	/* firmware can not identify back plane type, the media type
2963 	 * read from configuration can help deal it
2964 	 */
2965 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2966 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2967 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2968 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2969 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2970 
2971 	if (mac->support_autoneg) {
2972 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2973 		linkmode_copy(mac->advertising, mac->supported);
2974 	} else {
2975 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2976 				   mac->supported);
2977 		linkmode_zero(mac->advertising);
2978 	}
2979 }
2980 
2981 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2982 {
2983 	struct hclge_sfp_info_cmd *resp;
2984 	struct hclge_desc desc;
2985 	int ret;
2986 
2987 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2988 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2989 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2990 	if (ret == -EOPNOTSUPP) {
2991 		dev_warn(&hdev->pdev->dev,
2992 			 "IMP do not support get SFP speed %d\n", ret);
2993 		return ret;
2994 	} else if (ret) {
2995 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2996 		return ret;
2997 	}
2998 
2999 	*speed = le32_to_cpu(resp->speed);
3000 
3001 	return 0;
3002 }
3003 
3004 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3005 {
3006 	struct hclge_sfp_info_cmd *resp;
3007 	struct hclge_desc desc;
3008 	int ret;
3009 
3010 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3011 	resp = (struct hclge_sfp_info_cmd *)desc.data;
3012 
3013 	resp->query_type = QUERY_ACTIVE_SPEED;
3014 
3015 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3016 	if (ret == -EOPNOTSUPP) {
3017 		dev_warn(&hdev->pdev->dev,
3018 			 "IMP does not support get SFP info %d\n", ret);
3019 		return ret;
3020 	} else if (ret) {
3021 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3022 		return ret;
3023 	}
3024 
3025 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
3026 	 * set to mac->speed.
3027 	 */
3028 	if (!le32_to_cpu(resp->speed))
3029 		return 0;
3030 
3031 	mac->speed = le32_to_cpu(resp->speed);
3032 	/* if resp->speed_ability is 0, it means it's an old version
3033 	 * firmware, do not update these params
3034 	 */
3035 	if (resp->speed_ability) {
3036 		mac->module_type = le32_to_cpu(resp->module_type);
3037 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
3038 		mac->autoneg = resp->autoneg;
3039 		mac->support_autoneg = resp->autoneg_ability;
3040 		mac->speed_type = QUERY_ACTIVE_SPEED;
3041 		if (!resp->active_fec)
3042 			mac->fec_mode = 0;
3043 		else
3044 			mac->fec_mode = BIT(resp->active_fec);
3045 	} else {
3046 		mac->speed_type = QUERY_SFP_SPEED;
3047 	}
3048 
3049 	return 0;
3050 }
3051 
3052 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3053 					struct ethtool_link_ksettings *cmd)
3054 {
3055 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3056 	struct hclge_vport *vport = hclge_get_vport(handle);
3057 	struct hclge_phy_link_ksetting_0_cmd *req0;
3058 	struct hclge_phy_link_ksetting_1_cmd *req1;
3059 	u32 supported, advertising, lp_advertising;
3060 	struct hclge_dev *hdev = vport->back;
3061 	int ret;
3062 
3063 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3064 				   true);
3065 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3066 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3067 				   true);
3068 
3069 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3070 	if (ret) {
3071 		dev_err(&hdev->pdev->dev,
3072 			"failed to get phy link ksetting, ret = %d.\n", ret);
3073 		return ret;
3074 	}
3075 
3076 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3077 	cmd->base.autoneg = req0->autoneg;
3078 	cmd->base.speed = le32_to_cpu(req0->speed);
3079 	cmd->base.duplex = req0->duplex;
3080 	cmd->base.port = req0->port;
3081 	cmd->base.transceiver = req0->transceiver;
3082 	cmd->base.phy_address = req0->phy_address;
3083 	cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3084 	cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3085 	supported = le32_to_cpu(req0->supported);
3086 	advertising = le32_to_cpu(req0->advertising);
3087 	lp_advertising = le32_to_cpu(req0->lp_advertising);
3088 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3089 						supported);
3090 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3091 						advertising);
3092 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3093 						lp_advertising);
3094 
3095 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3096 	cmd->base.master_slave_cfg = req1->master_slave_cfg;
3097 	cmd->base.master_slave_state = req1->master_slave_state;
3098 
3099 	return 0;
3100 }
3101 
3102 static int
3103 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3104 			     const struct ethtool_link_ksettings *cmd)
3105 {
3106 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3107 	struct hclge_vport *vport = hclge_get_vport(handle);
3108 	struct hclge_phy_link_ksetting_0_cmd *req0;
3109 	struct hclge_phy_link_ksetting_1_cmd *req1;
3110 	struct hclge_dev *hdev = vport->back;
3111 	u32 advertising;
3112 	int ret;
3113 
3114 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
3115 	    ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3116 	     (cmd->base.duplex != DUPLEX_HALF &&
3117 	      cmd->base.duplex != DUPLEX_FULL)))
3118 		return -EINVAL;
3119 
3120 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3121 				   false);
3122 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3123 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3124 				   false);
3125 
3126 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3127 	req0->autoneg = cmd->base.autoneg;
3128 	req0->speed = cpu_to_le32(cmd->base.speed);
3129 	req0->duplex = cmd->base.duplex;
3130 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
3131 						cmd->link_modes.advertising);
3132 	req0->advertising = cpu_to_le32(advertising);
3133 	req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3134 
3135 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3136 	req1->master_slave_cfg = cmd->base.master_slave_cfg;
3137 
3138 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3139 	if (ret) {
3140 		dev_err(&hdev->pdev->dev,
3141 			"failed to set phy link ksettings, ret = %d.\n", ret);
3142 		return ret;
3143 	}
3144 
3145 	hdev->hw.mac.autoneg = cmd->base.autoneg;
3146 	hdev->hw.mac.speed = cmd->base.speed;
3147 	hdev->hw.mac.duplex = cmd->base.duplex;
3148 	linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3149 
3150 	return 0;
3151 }
3152 
3153 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3154 {
3155 	struct ethtool_link_ksettings cmd;
3156 	int ret;
3157 
3158 	if (!hnae3_dev_phy_imp_supported(hdev))
3159 		return 0;
3160 
3161 	ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3162 	if (ret)
3163 		return ret;
3164 
3165 	hdev->hw.mac.autoneg = cmd.base.autoneg;
3166 	hdev->hw.mac.speed = cmd.base.speed;
3167 	hdev->hw.mac.duplex = cmd.base.duplex;
3168 
3169 	return 0;
3170 }
3171 
3172 static int hclge_tp_port_init(struct hclge_dev *hdev)
3173 {
3174 	struct ethtool_link_ksettings cmd;
3175 
3176 	if (!hnae3_dev_phy_imp_supported(hdev))
3177 		return 0;
3178 
3179 	cmd.base.autoneg = hdev->hw.mac.autoneg;
3180 	cmd.base.speed = hdev->hw.mac.speed;
3181 	cmd.base.duplex = hdev->hw.mac.duplex;
3182 	linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3183 
3184 	return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3185 }
3186 
3187 static int hclge_update_port_info(struct hclge_dev *hdev)
3188 {
3189 	struct hclge_mac *mac = &hdev->hw.mac;
3190 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
3191 	int ret;
3192 
3193 	/* get the port info from SFP cmd if not copper port */
3194 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3195 		return hclge_update_tp_port_info(hdev);
3196 
3197 	/* if IMP does not support get SFP/qSFP info, return directly */
3198 	if (!hdev->support_sfp_query)
3199 		return 0;
3200 
3201 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3202 		ret = hclge_get_sfp_info(hdev, mac);
3203 	else
3204 		ret = hclge_get_sfp_speed(hdev, &speed);
3205 
3206 	if (ret == -EOPNOTSUPP) {
3207 		hdev->support_sfp_query = false;
3208 		return ret;
3209 	} else if (ret) {
3210 		return ret;
3211 	}
3212 
3213 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3214 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3215 			hclge_update_port_capability(hdev, mac);
3216 			return 0;
3217 		}
3218 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3219 					       HCLGE_MAC_FULL);
3220 	} else {
3221 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3222 			return 0; /* do nothing if no SFP */
3223 
3224 		/* must config full duplex for SFP */
3225 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3226 	}
3227 }
3228 
3229 static int hclge_get_status(struct hnae3_handle *handle)
3230 {
3231 	struct hclge_vport *vport = hclge_get_vport(handle);
3232 	struct hclge_dev *hdev = vport->back;
3233 
3234 	hclge_update_link_status(hdev);
3235 
3236 	return hdev->hw.mac.link;
3237 }
3238 
3239 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3240 {
3241 	if (!pci_num_vf(hdev->pdev)) {
3242 		dev_err(&hdev->pdev->dev,
3243 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
3244 		return NULL;
3245 	}
3246 
3247 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3248 		dev_err(&hdev->pdev->dev,
3249 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
3250 			vf, pci_num_vf(hdev->pdev));
3251 		return NULL;
3252 	}
3253 
3254 	/* VF start from 1 in vport */
3255 	vf += HCLGE_VF_VPORT_START_NUM;
3256 	return &hdev->vport[vf];
3257 }
3258 
3259 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3260 			       struct ifla_vf_info *ivf)
3261 {
3262 	struct hclge_vport *vport = hclge_get_vport(handle);
3263 	struct hclge_dev *hdev = vport->back;
3264 
3265 	vport = hclge_get_vf_vport(hdev, vf);
3266 	if (!vport)
3267 		return -EINVAL;
3268 
3269 	ivf->vf = vf;
3270 	ivf->linkstate = vport->vf_info.link_state;
3271 	ivf->spoofchk = vport->vf_info.spoofchk;
3272 	ivf->trusted = vport->vf_info.trusted;
3273 	ivf->min_tx_rate = 0;
3274 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3275 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3276 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3277 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3278 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
3279 
3280 	return 0;
3281 }
3282 
3283 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3284 				   int link_state)
3285 {
3286 	struct hclge_vport *vport = hclge_get_vport(handle);
3287 	struct hclge_dev *hdev = vport->back;
3288 	int link_state_old;
3289 	int ret;
3290 
3291 	vport = hclge_get_vf_vport(hdev, vf);
3292 	if (!vport)
3293 		return -EINVAL;
3294 
3295 	link_state_old = vport->vf_info.link_state;
3296 	vport->vf_info.link_state = link_state;
3297 
3298 	ret = hclge_push_vf_link_status(vport);
3299 	if (ret) {
3300 		vport->vf_info.link_state = link_state_old;
3301 		dev_err(&hdev->pdev->dev,
3302 			"failed to push vf%d link status, ret = %d\n", vf, ret);
3303 	}
3304 
3305 	return ret;
3306 }
3307 
3308 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3309 {
3310 	u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3311 
3312 	/* fetch the events from their corresponding regs */
3313 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3314 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3315 	hw_err_src_reg = hclge_read_dev(&hdev->hw,
3316 					HCLGE_RAS_PF_OTHER_INT_STS_REG);
3317 
3318 	/* Assumption: If by any chance reset and mailbox events are reported
3319 	 * together then we will only process reset event in this go and will
3320 	 * defer the processing of the mailbox events. Since, we would have not
3321 	 * cleared RX CMDQ event this time we would receive again another
3322 	 * interrupt from H/W just for the mailbox.
3323 	 *
3324 	 * check for vector0 reset event sources
3325 	 */
3326 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3327 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3328 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3329 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3330 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3331 		hdev->rst_stats.imp_rst_cnt++;
3332 		return HCLGE_VECTOR0_EVENT_RST;
3333 	}
3334 
3335 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3336 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3337 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3338 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3339 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3340 		hdev->rst_stats.global_rst_cnt++;
3341 		return HCLGE_VECTOR0_EVENT_RST;
3342 	}
3343 
3344 	/* check for vector0 msix event and hardware error event source */
3345 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3346 	    hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3347 		return HCLGE_VECTOR0_EVENT_ERR;
3348 
3349 	/* check for vector0 ptp event source */
3350 	if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3351 		*clearval = msix_src_reg;
3352 		return HCLGE_VECTOR0_EVENT_PTP;
3353 	}
3354 
3355 	/* check for vector0 mailbox(=CMDQ RX) event source */
3356 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3357 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3358 		*clearval = cmdq_src_reg;
3359 		return HCLGE_VECTOR0_EVENT_MBX;
3360 	}
3361 
3362 	/* print other vector0 event source */
3363 	dev_info(&hdev->pdev->dev,
3364 		 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3365 		 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3366 
3367 	return HCLGE_VECTOR0_EVENT_OTHER;
3368 }
3369 
3370 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3371 				    u32 regclr)
3372 {
3373 	switch (event_type) {
3374 	case HCLGE_VECTOR0_EVENT_PTP:
3375 	case HCLGE_VECTOR0_EVENT_RST:
3376 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3377 		break;
3378 	case HCLGE_VECTOR0_EVENT_MBX:
3379 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3380 		break;
3381 	default:
3382 		break;
3383 	}
3384 }
3385 
3386 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3387 {
3388 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3389 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3390 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3391 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3392 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3393 }
3394 
3395 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3396 {
3397 	writel(enable ? 1 : 0, vector->addr);
3398 }
3399 
3400 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3401 {
3402 	struct hclge_dev *hdev = data;
3403 	unsigned long flags;
3404 	u32 clearval = 0;
3405 	u32 event_cause;
3406 
3407 	hclge_enable_vector(&hdev->misc_vector, false);
3408 	event_cause = hclge_check_event_cause(hdev, &clearval);
3409 
3410 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
3411 	switch (event_cause) {
3412 	case HCLGE_VECTOR0_EVENT_ERR:
3413 		hclge_errhand_task_schedule(hdev);
3414 		break;
3415 	case HCLGE_VECTOR0_EVENT_RST:
3416 		hclge_reset_task_schedule(hdev);
3417 		break;
3418 	case HCLGE_VECTOR0_EVENT_PTP:
3419 		spin_lock_irqsave(&hdev->ptp->lock, flags);
3420 		hclge_ptp_clean_tx_hwts(hdev);
3421 		spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3422 		break;
3423 	case HCLGE_VECTOR0_EVENT_MBX:
3424 		/* If we are here then,
3425 		 * 1. Either we are not handling any mbx task and we are not
3426 		 *    scheduled as well
3427 		 *                        OR
3428 		 * 2. We could be handling a mbx task but nothing more is
3429 		 *    scheduled.
3430 		 * In both cases, we should schedule mbx task as there are more
3431 		 * mbx messages reported by this interrupt.
3432 		 */
3433 		hclge_mbx_task_schedule(hdev);
3434 		break;
3435 	default:
3436 		dev_warn(&hdev->pdev->dev,
3437 			 "received unknown or unhandled event of vector0\n");
3438 		break;
3439 	}
3440 
3441 	hclge_clear_event_cause(hdev, event_cause, clearval);
3442 
3443 	/* Enable interrupt if it is not caused by reset event or error event */
3444 	if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3445 	    event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3446 	    event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3447 		hclge_enable_vector(&hdev->misc_vector, true);
3448 
3449 	return IRQ_HANDLED;
3450 }
3451 
3452 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3453 {
3454 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3455 		dev_warn(&hdev->pdev->dev,
3456 			 "vector(vector_id %d) has been freed.\n", vector_id);
3457 		return;
3458 	}
3459 
3460 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3461 	hdev->num_msi_left += 1;
3462 	hdev->num_msi_used -= 1;
3463 }
3464 
3465 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3466 {
3467 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3468 
3469 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3470 
3471 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3472 	hdev->vector_status[0] = 0;
3473 
3474 	hdev->num_msi_left -= 1;
3475 	hdev->num_msi_used += 1;
3476 }
3477 
3478 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3479 				      const cpumask_t *mask)
3480 {
3481 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3482 					      affinity_notify);
3483 
3484 	cpumask_copy(&hdev->affinity_mask, mask);
3485 }
3486 
3487 static void hclge_irq_affinity_release(struct kref *ref)
3488 {
3489 }
3490 
3491 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3492 {
3493 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3494 			      &hdev->affinity_mask);
3495 
3496 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3497 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3498 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3499 				  &hdev->affinity_notify);
3500 }
3501 
3502 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3503 {
3504 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3505 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3506 }
3507 
3508 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3509 {
3510 	int ret;
3511 
3512 	hclge_get_misc_vector(hdev);
3513 
3514 	/* this would be explicitly freed in the end */
3515 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3516 		 HCLGE_NAME, pci_name(hdev->pdev));
3517 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3518 			  0, hdev->misc_vector.name, hdev);
3519 	if (ret) {
3520 		hclge_free_vector(hdev, 0);
3521 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3522 			hdev->misc_vector.vector_irq);
3523 	}
3524 
3525 	return ret;
3526 }
3527 
3528 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3529 {
3530 	free_irq(hdev->misc_vector.vector_irq, hdev);
3531 	hclge_free_vector(hdev, 0);
3532 }
3533 
3534 int hclge_notify_client(struct hclge_dev *hdev,
3535 			enum hnae3_reset_notify_type type)
3536 {
3537 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3538 	struct hnae3_client *client = hdev->nic_client;
3539 	int ret;
3540 
3541 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3542 		return 0;
3543 
3544 	if (!client->ops->reset_notify)
3545 		return -EOPNOTSUPP;
3546 
3547 	ret = client->ops->reset_notify(handle, type);
3548 	if (ret)
3549 		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3550 			type, ret);
3551 
3552 	return ret;
3553 }
3554 
3555 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3556 				    enum hnae3_reset_notify_type type)
3557 {
3558 	struct hnae3_handle *handle = &hdev->vport[0].roce;
3559 	struct hnae3_client *client = hdev->roce_client;
3560 	int ret;
3561 
3562 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3563 		return 0;
3564 
3565 	if (!client->ops->reset_notify)
3566 		return -EOPNOTSUPP;
3567 
3568 	ret = client->ops->reset_notify(handle, type);
3569 	if (ret)
3570 		dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3571 			type, ret);
3572 
3573 	return ret;
3574 }
3575 
3576 static int hclge_reset_wait(struct hclge_dev *hdev)
3577 {
3578 #define HCLGE_RESET_WATI_MS	100
3579 #define HCLGE_RESET_WAIT_CNT	350
3580 
3581 	u32 val, reg, reg_bit;
3582 	u32 cnt = 0;
3583 
3584 	switch (hdev->reset_type) {
3585 	case HNAE3_IMP_RESET:
3586 		reg = HCLGE_GLOBAL_RESET_REG;
3587 		reg_bit = HCLGE_IMP_RESET_BIT;
3588 		break;
3589 	case HNAE3_GLOBAL_RESET:
3590 		reg = HCLGE_GLOBAL_RESET_REG;
3591 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3592 		break;
3593 	case HNAE3_FUNC_RESET:
3594 		reg = HCLGE_FUN_RST_ING;
3595 		reg_bit = HCLGE_FUN_RST_ING_B;
3596 		break;
3597 	default:
3598 		dev_err(&hdev->pdev->dev,
3599 			"Wait for unsupported reset type: %d\n",
3600 			hdev->reset_type);
3601 		return -EINVAL;
3602 	}
3603 
3604 	val = hclge_read_dev(&hdev->hw, reg);
3605 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3606 		msleep(HCLGE_RESET_WATI_MS);
3607 		val = hclge_read_dev(&hdev->hw, reg);
3608 		cnt++;
3609 	}
3610 
3611 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3612 		dev_warn(&hdev->pdev->dev,
3613 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3614 		return -EBUSY;
3615 	}
3616 
3617 	return 0;
3618 }
3619 
3620 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3621 {
3622 	struct hclge_vf_rst_cmd *req;
3623 	struct hclge_desc desc;
3624 
3625 	req = (struct hclge_vf_rst_cmd *)desc.data;
3626 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3627 	req->dest_vfid = func_id;
3628 
3629 	if (reset)
3630 		req->vf_rst = 0x1;
3631 
3632 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3633 }
3634 
3635 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3636 {
3637 	int i;
3638 
3639 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3640 		struct hclge_vport *vport = &hdev->vport[i];
3641 		int ret;
3642 
3643 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3644 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3645 		if (ret) {
3646 			dev_err(&hdev->pdev->dev,
3647 				"set vf(%u) rst failed %d!\n",
3648 				vport->vport_id, ret);
3649 			return ret;
3650 		}
3651 
3652 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3653 			continue;
3654 
3655 		/* Inform VF to process the reset.
3656 		 * hclge_inform_reset_assert_to_vf may fail if VF
3657 		 * driver is not loaded.
3658 		 */
3659 		ret = hclge_inform_reset_assert_to_vf(vport);
3660 		if (ret)
3661 			dev_warn(&hdev->pdev->dev,
3662 				 "inform reset to vf(%u) failed %d!\n",
3663 				 vport->vport_id, ret);
3664 	}
3665 
3666 	return 0;
3667 }
3668 
3669 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3670 {
3671 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3672 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3673 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3674 		return;
3675 
3676 	hclge_mbx_handler(hdev);
3677 
3678 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3679 }
3680 
3681 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3682 {
3683 	struct hclge_pf_rst_sync_cmd *req;
3684 	struct hclge_desc desc;
3685 	int cnt = 0;
3686 	int ret;
3687 
3688 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3689 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3690 
3691 	do {
3692 		/* vf need to down netdev by mbx during PF or FLR reset */
3693 		hclge_mailbox_service_task(hdev);
3694 
3695 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3696 		/* for compatible with old firmware, wait
3697 		 * 100 ms for VF to stop IO
3698 		 */
3699 		if (ret == -EOPNOTSUPP) {
3700 			msleep(HCLGE_RESET_SYNC_TIME);
3701 			return;
3702 		} else if (ret) {
3703 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3704 				 ret);
3705 			return;
3706 		} else if (req->all_vf_ready) {
3707 			return;
3708 		}
3709 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3710 		hclge_cmd_reuse_desc(&desc, true);
3711 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3712 
3713 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3714 }
3715 
3716 void hclge_report_hw_error(struct hclge_dev *hdev,
3717 			   enum hnae3_hw_error_type type)
3718 {
3719 	struct hnae3_client *client = hdev->nic_client;
3720 
3721 	if (!client || !client->ops->process_hw_error ||
3722 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3723 		return;
3724 
3725 	client->ops->process_hw_error(&hdev->vport[0].nic, type);
3726 }
3727 
3728 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3729 {
3730 	u32 reg_val;
3731 
3732 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3733 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3734 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3735 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3736 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3737 	}
3738 
3739 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3740 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3741 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3742 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3743 	}
3744 }
3745 
3746 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3747 {
3748 	struct hclge_desc desc;
3749 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3750 	int ret;
3751 
3752 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3753 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3754 	req->fun_reset_vfid = func_id;
3755 
3756 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3757 	if (ret)
3758 		dev_err(&hdev->pdev->dev,
3759 			"send function reset cmd fail, status =%d\n", ret);
3760 
3761 	return ret;
3762 }
3763 
3764 static void hclge_do_reset(struct hclge_dev *hdev)
3765 {
3766 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3767 	struct pci_dev *pdev = hdev->pdev;
3768 	u32 val;
3769 
3770 	if (hclge_get_hw_reset_stat(handle)) {
3771 		dev_info(&pdev->dev, "hardware reset not finish\n");
3772 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3773 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3774 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3775 		return;
3776 	}
3777 
3778 	switch (hdev->reset_type) {
3779 	case HNAE3_GLOBAL_RESET:
3780 		dev_info(&pdev->dev, "global reset requested\n");
3781 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3782 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3783 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3784 		break;
3785 	case HNAE3_FUNC_RESET:
3786 		dev_info(&pdev->dev, "PF reset requested\n");
3787 		/* schedule again to check later */
3788 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3789 		hclge_reset_task_schedule(hdev);
3790 		break;
3791 	default:
3792 		dev_warn(&pdev->dev,
3793 			 "unsupported reset type: %d\n", hdev->reset_type);
3794 		break;
3795 	}
3796 }
3797 
3798 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3799 						   unsigned long *addr)
3800 {
3801 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3802 	struct hclge_dev *hdev = ae_dev->priv;
3803 
3804 	/* return the highest priority reset level amongst all */
3805 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3806 		rst_level = HNAE3_IMP_RESET;
3807 		clear_bit(HNAE3_IMP_RESET, addr);
3808 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3809 		clear_bit(HNAE3_FUNC_RESET, addr);
3810 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3811 		rst_level = HNAE3_GLOBAL_RESET;
3812 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3813 		clear_bit(HNAE3_FUNC_RESET, addr);
3814 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3815 		rst_level = HNAE3_FUNC_RESET;
3816 		clear_bit(HNAE3_FUNC_RESET, addr);
3817 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3818 		rst_level = HNAE3_FLR_RESET;
3819 		clear_bit(HNAE3_FLR_RESET, addr);
3820 	}
3821 
3822 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3823 	    rst_level < hdev->reset_type)
3824 		return HNAE3_NONE_RESET;
3825 
3826 	return rst_level;
3827 }
3828 
3829 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3830 {
3831 	u32 clearval = 0;
3832 
3833 	switch (hdev->reset_type) {
3834 	case HNAE3_IMP_RESET:
3835 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3836 		break;
3837 	case HNAE3_GLOBAL_RESET:
3838 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3839 		break;
3840 	default:
3841 		break;
3842 	}
3843 
3844 	if (!clearval)
3845 		return;
3846 
3847 	/* For revision 0x20, the reset interrupt source
3848 	 * can only be cleared after hardware reset done
3849 	 */
3850 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3851 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3852 				clearval);
3853 
3854 	hclge_enable_vector(&hdev->misc_vector, true);
3855 }
3856 
3857 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3858 {
3859 	u32 reg_val;
3860 
3861 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3862 	if (enable)
3863 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3864 	else
3865 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3866 
3867 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3868 }
3869 
3870 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3871 {
3872 	int ret;
3873 
3874 	ret = hclge_set_all_vf_rst(hdev, true);
3875 	if (ret)
3876 		return ret;
3877 
3878 	hclge_func_reset_sync_vf(hdev);
3879 
3880 	return 0;
3881 }
3882 
3883 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3884 {
3885 	u32 reg_val;
3886 	int ret = 0;
3887 
3888 	switch (hdev->reset_type) {
3889 	case HNAE3_FUNC_RESET:
3890 		ret = hclge_func_reset_notify_vf(hdev);
3891 		if (ret)
3892 			return ret;
3893 
3894 		ret = hclge_func_reset_cmd(hdev, 0);
3895 		if (ret) {
3896 			dev_err(&hdev->pdev->dev,
3897 				"asserting function reset fail %d!\n", ret);
3898 			return ret;
3899 		}
3900 
3901 		/* After performaning pf reset, it is not necessary to do the
3902 		 * mailbox handling or send any command to firmware, because
3903 		 * any mailbox handling or command to firmware is only valid
3904 		 * after hclge_cmd_init is called.
3905 		 */
3906 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3907 		hdev->rst_stats.pf_rst_cnt++;
3908 		break;
3909 	case HNAE3_FLR_RESET:
3910 		ret = hclge_func_reset_notify_vf(hdev);
3911 		if (ret)
3912 			return ret;
3913 		break;
3914 	case HNAE3_IMP_RESET:
3915 		hclge_handle_imp_error(hdev);
3916 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3917 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3918 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3919 		break;
3920 	default:
3921 		break;
3922 	}
3923 
3924 	/* inform hardware that preparatory work is done */
3925 	msleep(HCLGE_RESET_SYNC_TIME);
3926 	hclge_reset_handshake(hdev, true);
3927 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3928 
3929 	return ret;
3930 }
3931 
3932 static void hclge_show_rst_info(struct hclge_dev *hdev)
3933 {
3934 	char *buf;
3935 
3936 	buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
3937 	if (!buf)
3938 		return;
3939 
3940 	hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
3941 
3942 	dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
3943 
3944 	kfree(buf);
3945 }
3946 
3947 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3948 {
3949 #define MAX_RESET_FAIL_CNT 5
3950 
3951 	if (hdev->reset_pending) {
3952 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3953 			 hdev->reset_pending);
3954 		return true;
3955 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3956 		   HCLGE_RESET_INT_M) {
3957 		dev_info(&hdev->pdev->dev,
3958 			 "reset failed because new reset interrupt\n");
3959 		hclge_clear_reset_cause(hdev);
3960 		return false;
3961 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3962 		hdev->rst_stats.reset_fail_cnt++;
3963 		set_bit(hdev->reset_type, &hdev->reset_pending);
3964 		dev_info(&hdev->pdev->dev,
3965 			 "re-schedule reset task(%u)\n",
3966 			 hdev->rst_stats.reset_fail_cnt);
3967 		return true;
3968 	}
3969 
3970 	hclge_clear_reset_cause(hdev);
3971 
3972 	/* recover the handshake status when reset fail */
3973 	hclge_reset_handshake(hdev, true);
3974 
3975 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3976 
3977 	hclge_show_rst_info(hdev);
3978 
3979 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3980 
3981 	return false;
3982 }
3983 
3984 static void hclge_update_reset_level(struct hclge_dev *hdev)
3985 {
3986 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3987 	enum hnae3_reset_type reset_level;
3988 
3989 	/* reset request will not be set during reset, so clear
3990 	 * pending reset request to avoid unnecessary reset
3991 	 * caused by the same reason.
3992 	 */
3993 	hclge_get_reset_level(ae_dev, &hdev->reset_request);
3994 
3995 	/* if default_reset_request has a higher level reset request,
3996 	 * it should be handled as soon as possible. since some errors
3997 	 * need this kind of reset to fix.
3998 	 */
3999 	reset_level = hclge_get_reset_level(ae_dev,
4000 					    &hdev->default_reset_request);
4001 	if (reset_level != HNAE3_NONE_RESET)
4002 		set_bit(reset_level, &hdev->reset_request);
4003 }
4004 
4005 static int hclge_set_rst_done(struct hclge_dev *hdev)
4006 {
4007 	struct hclge_pf_rst_done_cmd *req;
4008 	struct hclge_desc desc;
4009 	int ret;
4010 
4011 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
4012 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4013 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4014 
4015 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4016 	/* To be compatible with the old firmware, which does not support
4017 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4018 	 * return success
4019 	 */
4020 	if (ret == -EOPNOTSUPP) {
4021 		dev_warn(&hdev->pdev->dev,
4022 			 "current firmware does not support command(0x%x)!\n",
4023 			 HCLGE_OPC_PF_RST_DONE);
4024 		return 0;
4025 	} else if (ret) {
4026 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4027 			ret);
4028 	}
4029 
4030 	return ret;
4031 }
4032 
4033 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4034 {
4035 	int ret = 0;
4036 
4037 	switch (hdev->reset_type) {
4038 	case HNAE3_FUNC_RESET:
4039 	case HNAE3_FLR_RESET:
4040 		ret = hclge_set_all_vf_rst(hdev, false);
4041 		break;
4042 	case HNAE3_GLOBAL_RESET:
4043 	case HNAE3_IMP_RESET:
4044 		ret = hclge_set_rst_done(hdev);
4045 		break;
4046 	default:
4047 		break;
4048 	}
4049 
4050 	/* clear up the handshake status after re-initialize done */
4051 	hclge_reset_handshake(hdev, false);
4052 
4053 	return ret;
4054 }
4055 
4056 static int hclge_reset_stack(struct hclge_dev *hdev)
4057 {
4058 	int ret;
4059 
4060 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4061 	if (ret)
4062 		return ret;
4063 
4064 	ret = hclge_reset_ae_dev(hdev->ae_dev);
4065 	if (ret)
4066 		return ret;
4067 
4068 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4069 }
4070 
4071 static int hclge_reset_prepare(struct hclge_dev *hdev)
4072 {
4073 	int ret;
4074 
4075 	hdev->rst_stats.reset_cnt++;
4076 	/* perform reset of the stack & ae device for a client */
4077 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4078 	if (ret)
4079 		return ret;
4080 
4081 	rtnl_lock();
4082 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4083 	rtnl_unlock();
4084 	if (ret)
4085 		return ret;
4086 
4087 	return hclge_reset_prepare_wait(hdev);
4088 }
4089 
4090 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4091 {
4092 	int ret;
4093 
4094 	hdev->rst_stats.hw_reset_done_cnt++;
4095 
4096 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4097 	if (ret)
4098 		return ret;
4099 
4100 	rtnl_lock();
4101 	ret = hclge_reset_stack(hdev);
4102 	rtnl_unlock();
4103 	if (ret)
4104 		return ret;
4105 
4106 	hclge_clear_reset_cause(hdev);
4107 
4108 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4109 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4110 	 * times
4111 	 */
4112 	if (ret &&
4113 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4114 		return ret;
4115 
4116 	ret = hclge_reset_prepare_up(hdev);
4117 	if (ret)
4118 		return ret;
4119 
4120 	rtnl_lock();
4121 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4122 	rtnl_unlock();
4123 	if (ret)
4124 		return ret;
4125 
4126 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4127 	if (ret)
4128 		return ret;
4129 
4130 	hdev->last_reset_time = jiffies;
4131 	hdev->rst_stats.reset_fail_cnt = 0;
4132 	hdev->rst_stats.reset_done_cnt++;
4133 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4134 
4135 	hclge_update_reset_level(hdev);
4136 
4137 	return 0;
4138 }
4139 
4140 static void hclge_reset(struct hclge_dev *hdev)
4141 {
4142 	if (hclge_reset_prepare(hdev))
4143 		goto err_reset;
4144 
4145 	if (hclge_reset_wait(hdev))
4146 		goto err_reset;
4147 
4148 	if (hclge_reset_rebuild(hdev))
4149 		goto err_reset;
4150 
4151 	return;
4152 
4153 err_reset:
4154 	if (hclge_reset_err_handle(hdev))
4155 		hclge_reset_task_schedule(hdev);
4156 }
4157 
4158 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4159 {
4160 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4161 	struct hclge_dev *hdev = ae_dev->priv;
4162 
4163 	/* We might end up getting called broadly because of 2 below cases:
4164 	 * 1. Recoverable error was conveyed through APEI and only way to bring
4165 	 *    normalcy is to reset.
4166 	 * 2. A new reset request from the stack due to timeout
4167 	 *
4168 	 * check if this is a new reset request and we are not here just because
4169 	 * last reset attempt did not succeed and watchdog hit us again. We will
4170 	 * know this if last reset request did not occur very recently (watchdog
4171 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4172 	 * In case of new request we reset the "reset level" to PF reset.
4173 	 * And if it is a repeat reset request of the most recent one then we
4174 	 * want to make sure we throttle the reset request. Therefore, we will
4175 	 * not allow it again before 3*HZ times.
4176 	 */
4177 
4178 	if (time_before(jiffies, (hdev->last_reset_time +
4179 				  HCLGE_RESET_INTERVAL))) {
4180 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4181 		return;
4182 	}
4183 
4184 	if (hdev->default_reset_request) {
4185 		hdev->reset_level =
4186 			hclge_get_reset_level(ae_dev,
4187 					      &hdev->default_reset_request);
4188 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4189 		hdev->reset_level = HNAE3_FUNC_RESET;
4190 	}
4191 
4192 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4193 		 hdev->reset_level);
4194 
4195 	/* request reset & schedule reset task */
4196 	set_bit(hdev->reset_level, &hdev->reset_request);
4197 	hclge_reset_task_schedule(hdev);
4198 
4199 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4200 		hdev->reset_level++;
4201 }
4202 
4203 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4204 					enum hnae3_reset_type rst_type)
4205 {
4206 	struct hclge_dev *hdev = ae_dev->priv;
4207 
4208 	set_bit(rst_type, &hdev->default_reset_request);
4209 }
4210 
4211 static void hclge_reset_timer(struct timer_list *t)
4212 {
4213 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4214 
4215 	/* if default_reset_request has no value, it means that this reset
4216 	 * request has already be handled, so just return here
4217 	 */
4218 	if (!hdev->default_reset_request)
4219 		return;
4220 
4221 	dev_info(&hdev->pdev->dev,
4222 		 "triggering reset in reset timer\n");
4223 	hclge_reset_event(hdev->pdev, NULL);
4224 }
4225 
4226 static void hclge_reset_subtask(struct hclge_dev *hdev)
4227 {
4228 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4229 
4230 	/* check if there is any ongoing reset in the hardware. This status can
4231 	 * be checked from reset_pending. If there is then, we need to wait for
4232 	 * hardware to complete reset.
4233 	 *    a. If we are able to figure out in reasonable time that hardware
4234 	 *       has fully resetted then, we can proceed with driver, client
4235 	 *       reset.
4236 	 *    b. else, we can come back later to check this status so re-sched
4237 	 *       now.
4238 	 */
4239 	hdev->last_reset_time = jiffies;
4240 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4241 	if (hdev->reset_type != HNAE3_NONE_RESET)
4242 		hclge_reset(hdev);
4243 
4244 	/* check if we got any *new* reset requests to be honored */
4245 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4246 	if (hdev->reset_type != HNAE3_NONE_RESET)
4247 		hclge_do_reset(hdev);
4248 
4249 	hdev->reset_type = HNAE3_NONE_RESET;
4250 }
4251 
4252 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4253 {
4254 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4255 	enum hnae3_reset_type reset_type;
4256 
4257 	if (ae_dev->hw_err_reset_req) {
4258 		reset_type = hclge_get_reset_level(ae_dev,
4259 						   &ae_dev->hw_err_reset_req);
4260 		hclge_set_def_reset_request(ae_dev, reset_type);
4261 	}
4262 
4263 	if (hdev->default_reset_request && ae_dev->ops->reset_event)
4264 		ae_dev->ops->reset_event(hdev->pdev, NULL);
4265 
4266 	/* enable interrupt after error handling complete */
4267 	hclge_enable_vector(&hdev->misc_vector, true);
4268 }
4269 
4270 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4271 {
4272 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4273 
4274 	ae_dev->hw_err_reset_req = 0;
4275 
4276 	if (hclge_find_error_source(hdev)) {
4277 		hclge_handle_error_info_log(ae_dev);
4278 		hclge_handle_mac_tnl(hdev);
4279 	}
4280 
4281 	hclge_handle_err_reset_request(hdev);
4282 }
4283 
4284 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4285 {
4286 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4287 	struct device *dev = &hdev->pdev->dev;
4288 	u32 msix_sts_reg;
4289 
4290 	msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4291 	if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4292 		if (hclge_handle_hw_msix_error
4293 				(hdev, &hdev->default_reset_request))
4294 			dev_info(dev, "received msix interrupt 0x%x\n",
4295 				 msix_sts_reg);
4296 	}
4297 
4298 	hclge_handle_hw_ras_error(ae_dev);
4299 
4300 	hclge_handle_err_reset_request(hdev);
4301 }
4302 
4303 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4304 {
4305 	if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4306 		return;
4307 
4308 	if (hnae3_dev_ras_imp_supported(hdev))
4309 		hclge_handle_err_recovery(hdev);
4310 	else
4311 		hclge_misc_err_recovery(hdev);
4312 }
4313 
4314 static void hclge_reset_service_task(struct hclge_dev *hdev)
4315 {
4316 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4317 		return;
4318 
4319 	down(&hdev->reset_sem);
4320 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4321 
4322 	hclge_reset_subtask(hdev);
4323 
4324 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4325 	up(&hdev->reset_sem);
4326 }
4327 
4328 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4329 {
4330 	int i;
4331 
4332 	/* start from vport 1 for PF is always alive */
4333 	for (i = 1; i < hdev->num_alloc_vport; i++) {
4334 		struct hclge_vport *vport = &hdev->vport[i];
4335 
4336 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4337 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4338 
4339 		/* If vf is not alive, set to default value */
4340 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4341 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4342 	}
4343 }
4344 
4345 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4346 {
4347 	unsigned long delta = round_jiffies_relative(HZ);
4348 
4349 	if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4350 		return;
4351 
4352 	/* Always handle the link updating to make sure link state is
4353 	 * updated when it is triggered by mbx.
4354 	 */
4355 	hclge_update_link_status(hdev);
4356 	hclge_sync_mac_table(hdev);
4357 	hclge_sync_promisc_mode(hdev);
4358 	hclge_sync_fd_table(hdev);
4359 
4360 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4361 		delta = jiffies - hdev->last_serv_processed;
4362 
4363 		if (delta < round_jiffies_relative(HZ)) {
4364 			delta = round_jiffies_relative(HZ) - delta;
4365 			goto out;
4366 		}
4367 	}
4368 
4369 	hdev->serv_processed_cnt++;
4370 	hclge_update_vport_alive(hdev);
4371 
4372 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4373 		hdev->last_serv_processed = jiffies;
4374 		goto out;
4375 	}
4376 
4377 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4378 		hclge_update_stats_for_all(hdev);
4379 
4380 	hclge_update_port_info(hdev);
4381 	hclge_sync_vlan_filter(hdev);
4382 
4383 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4384 		hclge_rfs_filter_expire(hdev);
4385 
4386 	hdev->last_serv_processed = jiffies;
4387 
4388 out:
4389 	hclge_task_schedule(hdev, delta);
4390 }
4391 
4392 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4393 {
4394 	unsigned long flags;
4395 
4396 	if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4397 	    !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4398 	    !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4399 		return;
4400 
4401 	/* to prevent concurrence with the irq handler */
4402 	spin_lock_irqsave(&hdev->ptp->lock, flags);
4403 
4404 	/* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4405 	 * handler may handle it just before spin_lock_irqsave().
4406 	 */
4407 	if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4408 		hclge_ptp_clean_tx_hwts(hdev);
4409 
4410 	spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4411 }
4412 
4413 static void hclge_service_task(struct work_struct *work)
4414 {
4415 	struct hclge_dev *hdev =
4416 		container_of(work, struct hclge_dev, service_task.work);
4417 
4418 	hclge_errhand_service_task(hdev);
4419 	hclge_reset_service_task(hdev);
4420 	hclge_ptp_service_task(hdev);
4421 	hclge_mailbox_service_task(hdev);
4422 	hclge_periodic_service_task(hdev);
4423 
4424 	/* Handle error recovery, reset and mbx again in case periodical task
4425 	 * delays the handling by calling hclge_task_schedule() in
4426 	 * hclge_periodic_service_task().
4427 	 */
4428 	hclge_errhand_service_task(hdev);
4429 	hclge_reset_service_task(hdev);
4430 	hclge_mailbox_service_task(hdev);
4431 }
4432 
4433 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4434 {
4435 	/* VF handle has no client */
4436 	if (!handle->client)
4437 		return container_of(handle, struct hclge_vport, nic);
4438 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4439 		return container_of(handle, struct hclge_vport, roce);
4440 	else
4441 		return container_of(handle, struct hclge_vport, nic);
4442 }
4443 
4444 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4445 				  struct hnae3_vector_info *vector_info)
4446 {
4447 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2	64
4448 
4449 	vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4450 
4451 	/* need an extend offset to config vector >= 64 */
4452 	if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4453 		vector_info->io_addr = hdev->hw.io_base +
4454 				HCLGE_VECTOR_REG_BASE +
4455 				(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4456 	else
4457 		vector_info->io_addr = hdev->hw.io_base +
4458 				HCLGE_VECTOR_EXT_REG_BASE +
4459 				(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4460 				HCLGE_VECTOR_REG_OFFSET_H +
4461 				(idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4462 				HCLGE_VECTOR_REG_OFFSET;
4463 
4464 	hdev->vector_status[idx] = hdev->vport[0].vport_id;
4465 	hdev->vector_irq[idx] = vector_info->vector;
4466 }
4467 
4468 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4469 			    struct hnae3_vector_info *vector_info)
4470 {
4471 	struct hclge_vport *vport = hclge_get_vport(handle);
4472 	struct hnae3_vector_info *vector = vector_info;
4473 	struct hclge_dev *hdev = vport->back;
4474 	int alloc = 0;
4475 	u16 i = 0;
4476 	u16 j;
4477 
4478 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4479 	vector_num = min(hdev->num_msi_left, vector_num);
4480 
4481 	for (j = 0; j < vector_num; j++) {
4482 		while (++i < hdev->num_nic_msi) {
4483 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4484 				hclge_get_vector_info(hdev, i, vector);
4485 				vector++;
4486 				alloc++;
4487 
4488 				break;
4489 			}
4490 		}
4491 	}
4492 	hdev->num_msi_left -= alloc;
4493 	hdev->num_msi_used += alloc;
4494 
4495 	return alloc;
4496 }
4497 
4498 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4499 {
4500 	int i;
4501 
4502 	for (i = 0; i < hdev->num_msi; i++)
4503 		if (vector == hdev->vector_irq[i])
4504 			return i;
4505 
4506 	return -EINVAL;
4507 }
4508 
4509 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4510 {
4511 	struct hclge_vport *vport = hclge_get_vport(handle);
4512 	struct hclge_dev *hdev = vport->back;
4513 	int vector_id;
4514 
4515 	vector_id = hclge_get_vector_index(hdev, vector);
4516 	if (vector_id < 0) {
4517 		dev_err(&hdev->pdev->dev,
4518 			"Get vector index fail. vector = %d\n", vector);
4519 		return vector_id;
4520 	}
4521 
4522 	hclge_free_vector(hdev, vector_id);
4523 
4524 	return 0;
4525 }
4526 
4527 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4528 {
4529 	return HCLGE_RSS_KEY_SIZE;
4530 }
4531 
4532 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4533 				  const u8 hfunc, const u8 *key)
4534 {
4535 	struct hclge_rss_config_cmd *req;
4536 	unsigned int key_offset = 0;
4537 	struct hclge_desc desc;
4538 	int key_counts;
4539 	int key_size;
4540 	int ret;
4541 
4542 	key_counts = HCLGE_RSS_KEY_SIZE;
4543 	req = (struct hclge_rss_config_cmd *)desc.data;
4544 
4545 	while (key_counts) {
4546 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4547 					   false);
4548 
4549 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4550 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4551 
4552 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4553 		memcpy(req->hash_key,
4554 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4555 
4556 		key_counts -= key_size;
4557 		key_offset++;
4558 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4559 		if (ret) {
4560 			dev_err(&hdev->pdev->dev,
4561 				"Configure RSS config fail, status = %d\n",
4562 				ret);
4563 			return ret;
4564 		}
4565 	}
4566 	return 0;
4567 }
4568 
4569 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4570 {
4571 	struct hclge_rss_indirection_table_cmd *req;
4572 	struct hclge_desc desc;
4573 	int rss_cfg_tbl_num;
4574 	u8 rss_msb_oft;
4575 	u8 rss_msb_val;
4576 	int ret;
4577 	u16 qid;
4578 	int i;
4579 	u32 j;
4580 
4581 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4582 	rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4583 			  HCLGE_RSS_CFG_TBL_SIZE;
4584 
4585 	for (i = 0; i < rss_cfg_tbl_num; i++) {
4586 		hclge_cmd_setup_basic_desc
4587 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4588 
4589 		req->start_table_index =
4590 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4591 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4592 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4593 			qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4594 			req->rss_qid_l[j] = qid & 0xff;
4595 			rss_msb_oft =
4596 				j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4597 			rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4598 				(j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4599 			req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4600 		}
4601 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4602 		if (ret) {
4603 			dev_err(&hdev->pdev->dev,
4604 				"Configure rss indir table fail,status = %d\n",
4605 				ret);
4606 			return ret;
4607 		}
4608 	}
4609 	return 0;
4610 }
4611 
4612 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4613 				 u16 *tc_size, u16 *tc_offset)
4614 {
4615 	struct hclge_rss_tc_mode_cmd *req;
4616 	struct hclge_desc desc;
4617 	int ret;
4618 	int i;
4619 
4620 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4621 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4622 
4623 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4624 		u16 mode = 0;
4625 
4626 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4627 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4628 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4629 		hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4630 			      tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4631 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4632 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4633 
4634 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4635 	}
4636 
4637 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4638 	if (ret)
4639 		dev_err(&hdev->pdev->dev,
4640 			"Configure rss tc mode fail, status = %d\n", ret);
4641 
4642 	return ret;
4643 }
4644 
4645 static void hclge_get_rss_type(struct hclge_vport *vport)
4646 {
4647 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4648 	    vport->rss_tuple_sets.ipv4_udp_en ||
4649 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4650 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4651 	    vport->rss_tuple_sets.ipv6_udp_en ||
4652 	    vport->rss_tuple_sets.ipv6_sctp_en)
4653 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4654 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4655 		 vport->rss_tuple_sets.ipv6_fragment_en)
4656 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4657 	else
4658 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4659 }
4660 
4661 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4662 {
4663 	struct hclge_rss_input_tuple_cmd *req;
4664 	struct hclge_desc desc;
4665 	int ret;
4666 
4667 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4668 
4669 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4670 
4671 	/* Get the tuple cfg from pf */
4672 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4673 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4674 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4675 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4676 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4677 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4678 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4679 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4680 	hclge_get_rss_type(&hdev->vport[0]);
4681 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4682 	if (ret)
4683 		dev_err(&hdev->pdev->dev,
4684 			"Configure rss input fail, status = %d\n", ret);
4685 	return ret;
4686 }
4687 
4688 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4689 			 u8 *key, u8 *hfunc)
4690 {
4691 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4692 	struct hclge_vport *vport = hclge_get_vport(handle);
4693 	int i;
4694 
4695 	/* Get hash algorithm */
4696 	if (hfunc) {
4697 		switch (vport->rss_algo) {
4698 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4699 			*hfunc = ETH_RSS_HASH_TOP;
4700 			break;
4701 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4702 			*hfunc = ETH_RSS_HASH_XOR;
4703 			break;
4704 		default:
4705 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4706 			break;
4707 		}
4708 	}
4709 
4710 	/* Get the RSS Key required by the user */
4711 	if (key)
4712 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4713 
4714 	/* Get indirect table */
4715 	if (indir)
4716 		for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4717 			indir[i] =  vport->rss_indirection_tbl[i];
4718 
4719 	return 0;
4720 }
4721 
4722 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4723 			 const  u8 *key, const  u8 hfunc)
4724 {
4725 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4726 	struct hclge_vport *vport = hclge_get_vport(handle);
4727 	struct hclge_dev *hdev = vport->back;
4728 	u8 hash_algo;
4729 	int ret, i;
4730 
4731 	/* Set the RSS Hash Key if specififed by the user */
4732 	if (key) {
4733 		switch (hfunc) {
4734 		case ETH_RSS_HASH_TOP:
4735 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4736 			break;
4737 		case ETH_RSS_HASH_XOR:
4738 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4739 			break;
4740 		case ETH_RSS_HASH_NO_CHANGE:
4741 			hash_algo = vport->rss_algo;
4742 			break;
4743 		default:
4744 			return -EINVAL;
4745 		}
4746 
4747 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4748 		if (ret)
4749 			return ret;
4750 
4751 		/* Update the shadow RSS key with user specified qids */
4752 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4753 		vport->rss_algo = hash_algo;
4754 	}
4755 
4756 	/* Update the shadow RSS table with user specified qids */
4757 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4758 		vport->rss_indirection_tbl[i] = indir[i];
4759 
4760 	/* Update the hardware */
4761 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4762 }
4763 
4764 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4765 {
4766 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4767 
4768 	if (nfc->data & RXH_L4_B_2_3)
4769 		hash_sets |= HCLGE_D_PORT_BIT;
4770 	else
4771 		hash_sets &= ~HCLGE_D_PORT_BIT;
4772 
4773 	if (nfc->data & RXH_IP_SRC)
4774 		hash_sets |= HCLGE_S_IP_BIT;
4775 	else
4776 		hash_sets &= ~HCLGE_S_IP_BIT;
4777 
4778 	if (nfc->data & RXH_IP_DST)
4779 		hash_sets |= HCLGE_D_IP_BIT;
4780 	else
4781 		hash_sets &= ~HCLGE_D_IP_BIT;
4782 
4783 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4784 		hash_sets |= HCLGE_V_TAG_BIT;
4785 
4786 	return hash_sets;
4787 }
4788 
4789 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4790 				    struct ethtool_rxnfc *nfc,
4791 				    struct hclge_rss_input_tuple_cmd *req)
4792 {
4793 	struct hclge_dev *hdev = vport->back;
4794 	u8 tuple_sets;
4795 
4796 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4797 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4798 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4799 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4800 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4801 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4802 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4803 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4804 
4805 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4806 	switch (nfc->flow_type) {
4807 	case TCP_V4_FLOW:
4808 		req->ipv4_tcp_en = tuple_sets;
4809 		break;
4810 	case TCP_V6_FLOW:
4811 		req->ipv6_tcp_en = tuple_sets;
4812 		break;
4813 	case UDP_V4_FLOW:
4814 		req->ipv4_udp_en = tuple_sets;
4815 		break;
4816 	case UDP_V6_FLOW:
4817 		req->ipv6_udp_en = tuple_sets;
4818 		break;
4819 	case SCTP_V4_FLOW:
4820 		req->ipv4_sctp_en = tuple_sets;
4821 		break;
4822 	case SCTP_V6_FLOW:
4823 		if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4824 		    (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4825 			return -EINVAL;
4826 
4827 		req->ipv6_sctp_en = tuple_sets;
4828 		break;
4829 	case IPV4_FLOW:
4830 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4831 		break;
4832 	case IPV6_FLOW:
4833 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4834 		break;
4835 	default:
4836 		return -EINVAL;
4837 	}
4838 
4839 	return 0;
4840 }
4841 
4842 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4843 			       struct ethtool_rxnfc *nfc)
4844 {
4845 	struct hclge_vport *vport = hclge_get_vport(handle);
4846 	struct hclge_dev *hdev = vport->back;
4847 	struct hclge_rss_input_tuple_cmd *req;
4848 	struct hclge_desc desc;
4849 	int ret;
4850 
4851 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4852 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4853 		return -EINVAL;
4854 
4855 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4856 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4857 
4858 	ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4859 	if (ret) {
4860 		dev_err(&hdev->pdev->dev,
4861 			"failed to init rss tuple cmd, ret = %d\n", ret);
4862 		return ret;
4863 	}
4864 
4865 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4866 	if (ret) {
4867 		dev_err(&hdev->pdev->dev,
4868 			"Set rss tuple fail, status = %d\n", ret);
4869 		return ret;
4870 	}
4871 
4872 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4873 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4874 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4875 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4876 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4877 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4878 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4879 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4880 	hclge_get_rss_type(vport);
4881 	return 0;
4882 }
4883 
4884 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4885 				     u8 *tuple_sets)
4886 {
4887 	switch (flow_type) {
4888 	case TCP_V4_FLOW:
4889 		*tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4890 		break;
4891 	case UDP_V4_FLOW:
4892 		*tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4893 		break;
4894 	case TCP_V6_FLOW:
4895 		*tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4896 		break;
4897 	case UDP_V6_FLOW:
4898 		*tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4899 		break;
4900 	case SCTP_V4_FLOW:
4901 		*tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4902 		break;
4903 	case SCTP_V6_FLOW:
4904 		*tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4905 		break;
4906 	case IPV4_FLOW:
4907 	case IPV6_FLOW:
4908 		*tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4909 		break;
4910 	default:
4911 		return -EINVAL;
4912 	}
4913 
4914 	return 0;
4915 }
4916 
4917 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4918 {
4919 	u64 tuple_data = 0;
4920 
4921 	if (tuple_sets & HCLGE_D_PORT_BIT)
4922 		tuple_data |= RXH_L4_B_2_3;
4923 	if (tuple_sets & HCLGE_S_PORT_BIT)
4924 		tuple_data |= RXH_L4_B_0_1;
4925 	if (tuple_sets & HCLGE_D_IP_BIT)
4926 		tuple_data |= RXH_IP_DST;
4927 	if (tuple_sets & HCLGE_S_IP_BIT)
4928 		tuple_data |= RXH_IP_SRC;
4929 
4930 	return tuple_data;
4931 }
4932 
4933 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4934 			       struct ethtool_rxnfc *nfc)
4935 {
4936 	struct hclge_vport *vport = hclge_get_vport(handle);
4937 	u8 tuple_sets;
4938 	int ret;
4939 
4940 	nfc->data = 0;
4941 
4942 	ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4943 	if (ret || !tuple_sets)
4944 		return ret;
4945 
4946 	nfc->data = hclge_convert_rss_tuple(tuple_sets);
4947 
4948 	return 0;
4949 }
4950 
4951 static int hclge_get_tc_size(struct hnae3_handle *handle)
4952 {
4953 	struct hclge_vport *vport = hclge_get_vport(handle);
4954 	struct hclge_dev *hdev = vport->back;
4955 
4956 	return hdev->pf_rss_size_max;
4957 }
4958 
4959 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4960 {
4961 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4962 	struct hclge_vport *vport = hdev->vport;
4963 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4964 	u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4965 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4966 	struct hnae3_tc_info *tc_info;
4967 	u16 roundup_size;
4968 	u16 rss_size;
4969 	int i;
4970 
4971 	tc_info = &vport->nic.kinfo.tc_info;
4972 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4973 		rss_size = tc_info->tqp_count[i];
4974 		tc_valid[i] = 0;
4975 
4976 		if (!(hdev->hw_tc_map & BIT(i)))
4977 			continue;
4978 
4979 		/* tc_size set to hardware is the log2 of roundup power of two
4980 		 * of rss_size, the acutal queue size is limited by indirection
4981 		 * table.
4982 		 */
4983 		if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4984 		    rss_size == 0) {
4985 			dev_err(&hdev->pdev->dev,
4986 				"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4987 				rss_size);
4988 			return -EINVAL;
4989 		}
4990 
4991 		roundup_size = roundup_pow_of_two(rss_size);
4992 		roundup_size = ilog2(roundup_size);
4993 
4994 		tc_valid[i] = 1;
4995 		tc_size[i] = roundup_size;
4996 		tc_offset[i] = tc_info->tqp_offset[i];
4997 	}
4998 
4999 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5000 }
5001 
5002 int hclge_rss_init_hw(struct hclge_dev *hdev)
5003 {
5004 	struct hclge_vport *vport = hdev->vport;
5005 	u16 *rss_indir = vport[0].rss_indirection_tbl;
5006 	u8 *key = vport[0].rss_hash_key;
5007 	u8 hfunc = vport[0].rss_algo;
5008 	int ret;
5009 
5010 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
5011 	if (ret)
5012 		return ret;
5013 
5014 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
5015 	if (ret)
5016 		return ret;
5017 
5018 	ret = hclge_set_rss_input_tuple(hdev);
5019 	if (ret)
5020 		return ret;
5021 
5022 	return hclge_init_rss_tc_mode(hdev);
5023 }
5024 
5025 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
5026 {
5027 	struct hclge_vport *vport = &hdev->vport[0];
5028 	int i;
5029 
5030 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
5031 		vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
5032 }
5033 
5034 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
5035 {
5036 	u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
5037 	int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
5038 	struct hclge_vport *vport = &hdev->vport[0];
5039 	u16 *rss_ind_tbl;
5040 
5041 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
5042 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
5043 
5044 	vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5045 	vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5046 	vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
5047 	vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5048 	vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5049 	vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5050 	vport->rss_tuple_sets.ipv6_sctp_en =
5051 		hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
5052 		HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
5053 		HCLGE_RSS_INPUT_TUPLE_SCTP;
5054 	vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5055 
5056 	vport->rss_algo = rss_algo;
5057 
5058 	rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
5059 				   sizeof(*rss_ind_tbl), GFP_KERNEL);
5060 	if (!rss_ind_tbl)
5061 		return -ENOMEM;
5062 
5063 	vport->rss_indirection_tbl = rss_ind_tbl;
5064 	memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
5065 
5066 	hclge_rss_indir_init_cfg(hdev);
5067 
5068 	return 0;
5069 }
5070 
5071 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5072 				int vector_id, bool en,
5073 				struct hnae3_ring_chain_node *ring_chain)
5074 {
5075 	struct hclge_dev *hdev = vport->back;
5076 	struct hnae3_ring_chain_node *node;
5077 	struct hclge_desc desc;
5078 	struct hclge_ctrl_vector_chain_cmd *req =
5079 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
5080 	enum hclge_cmd_status status;
5081 	enum hclge_opcode_type op;
5082 	u16 tqp_type_and_id;
5083 	int i;
5084 
5085 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5086 	hclge_cmd_setup_basic_desc(&desc, op, false);
5087 	req->int_vector_id_l = hnae3_get_field(vector_id,
5088 					       HCLGE_VECTOR_ID_L_M,
5089 					       HCLGE_VECTOR_ID_L_S);
5090 	req->int_vector_id_h = hnae3_get_field(vector_id,
5091 					       HCLGE_VECTOR_ID_H_M,
5092 					       HCLGE_VECTOR_ID_H_S);
5093 
5094 	i = 0;
5095 	for (node = ring_chain; node; node = node->next) {
5096 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5097 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
5098 				HCLGE_INT_TYPE_S,
5099 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5100 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5101 				HCLGE_TQP_ID_S, node->tqp_index);
5102 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5103 				HCLGE_INT_GL_IDX_S,
5104 				hnae3_get_field(node->int_gl_idx,
5105 						HNAE3_RING_GL_IDX_M,
5106 						HNAE3_RING_GL_IDX_S));
5107 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5108 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5109 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5110 			req->vfid = vport->vport_id;
5111 
5112 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
5113 			if (status) {
5114 				dev_err(&hdev->pdev->dev,
5115 					"Map TQP fail, status is %d.\n",
5116 					status);
5117 				return -EIO;
5118 			}
5119 			i = 0;
5120 
5121 			hclge_cmd_setup_basic_desc(&desc,
5122 						   op,
5123 						   false);
5124 			req->int_vector_id_l =
5125 				hnae3_get_field(vector_id,
5126 						HCLGE_VECTOR_ID_L_M,
5127 						HCLGE_VECTOR_ID_L_S);
5128 			req->int_vector_id_h =
5129 				hnae3_get_field(vector_id,
5130 						HCLGE_VECTOR_ID_H_M,
5131 						HCLGE_VECTOR_ID_H_S);
5132 		}
5133 	}
5134 
5135 	if (i > 0) {
5136 		req->int_cause_num = i;
5137 		req->vfid = vport->vport_id;
5138 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
5139 		if (status) {
5140 			dev_err(&hdev->pdev->dev,
5141 				"Map TQP fail, status is %d.\n", status);
5142 			return -EIO;
5143 		}
5144 	}
5145 
5146 	return 0;
5147 }
5148 
5149 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5150 				    struct hnae3_ring_chain_node *ring_chain)
5151 {
5152 	struct hclge_vport *vport = hclge_get_vport(handle);
5153 	struct hclge_dev *hdev = vport->back;
5154 	int vector_id;
5155 
5156 	vector_id = hclge_get_vector_index(hdev, vector);
5157 	if (vector_id < 0) {
5158 		dev_err(&hdev->pdev->dev,
5159 			"failed to get vector index. vector=%d\n", vector);
5160 		return vector_id;
5161 	}
5162 
5163 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5164 }
5165 
5166 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5167 				       struct hnae3_ring_chain_node *ring_chain)
5168 {
5169 	struct hclge_vport *vport = hclge_get_vport(handle);
5170 	struct hclge_dev *hdev = vport->back;
5171 	int vector_id, ret;
5172 
5173 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5174 		return 0;
5175 
5176 	vector_id = hclge_get_vector_index(hdev, vector);
5177 	if (vector_id < 0) {
5178 		dev_err(&handle->pdev->dev,
5179 			"Get vector index fail. ret =%d\n", vector_id);
5180 		return vector_id;
5181 	}
5182 
5183 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5184 	if (ret)
5185 		dev_err(&handle->pdev->dev,
5186 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5187 			vector_id, ret);
5188 
5189 	return ret;
5190 }
5191 
5192 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5193 				      bool en_uc, bool en_mc, bool en_bc)
5194 {
5195 	struct hclge_vport *vport = &hdev->vport[vf_id];
5196 	struct hnae3_handle *handle = &vport->nic;
5197 	struct hclge_promisc_cfg_cmd *req;
5198 	struct hclge_desc desc;
5199 	bool uc_tx_en = en_uc;
5200 	u8 promisc_cfg = 0;
5201 	int ret;
5202 
5203 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5204 
5205 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
5206 	req->vf_id = vf_id;
5207 
5208 	if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5209 		uc_tx_en = false;
5210 
5211 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5212 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5213 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5214 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5215 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5216 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5217 	req->extend_promisc = promisc_cfg;
5218 
5219 	/* to be compatible with DEVICE_VERSION_V1/2 */
5220 	promisc_cfg = 0;
5221 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5222 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5223 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5224 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5225 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5226 	req->promisc = promisc_cfg;
5227 
5228 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5229 	if (ret)
5230 		dev_err(&hdev->pdev->dev,
5231 			"failed to set vport %u promisc mode, ret = %d.\n",
5232 			vf_id, ret);
5233 
5234 	return ret;
5235 }
5236 
5237 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5238 				 bool en_mc_pmc, bool en_bc_pmc)
5239 {
5240 	return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5241 					  en_uc_pmc, en_mc_pmc, en_bc_pmc);
5242 }
5243 
5244 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5245 				  bool en_mc_pmc)
5246 {
5247 	struct hclge_vport *vport = hclge_get_vport(handle);
5248 	struct hclge_dev *hdev = vport->back;
5249 	bool en_bc_pmc = true;
5250 
5251 	/* For device whose version below V2, if broadcast promisc enabled,
5252 	 * vlan filter is always bypassed. So broadcast promisc should be
5253 	 * disabled until user enable promisc mode
5254 	 */
5255 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5256 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5257 
5258 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5259 					    en_bc_pmc);
5260 }
5261 
5262 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5263 {
5264 	struct hclge_vport *vport = hclge_get_vport(handle);
5265 
5266 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5267 }
5268 
5269 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5270 {
5271 	if (hlist_empty(&hdev->fd_rule_list))
5272 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5273 }
5274 
5275 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5276 {
5277 	if (!test_bit(location, hdev->fd_bmap)) {
5278 		set_bit(location, hdev->fd_bmap);
5279 		hdev->hclge_fd_rule_num++;
5280 	}
5281 }
5282 
5283 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5284 {
5285 	if (test_bit(location, hdev->fd_bmap)) {
5286 		clear_bit(location, hdev->fd_bmap);
5287 		hdev->hclge_fd_rule_num--;
5288 	}
5289 }
5290 
5291 static void hclge_fd_free_node(struct hclge_dev *hdev,
5292 			       struct hclge_fd_rule *rule)
5293 {
5294 	hlist_del(&rule->rule_node);
5295 	kfree(rule);
5296 	hclge_sync_fd_state(hdev);
5297 }
5298 
5299 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5300 				      struct hclge_fd_rule *old_rule,
5301 				      struct hclge_fd_rule *new_rule,
5302 				      enum HCLGE_FD_NODE_STATE state)
5303 {
5304 	switch (state) {
5305 	case HCLGE_FD_TO_ADD:
5306 	case HCLGE_FD_ACTIVE:
5307 		/* 1) if the new state is TO_ADD, just replace the old rule
5308 		 * with the same location, no matter its state, because the
5309 		 * new rule will be configured to the hardware.
5310 		 * 2) if the new state is ACTIVE, it means the new rule
5311 		 * has been configured to the hardware, so just replace
5312 		 * the old rule node with the same location.
5313 		 * 3) for it doesn't add a new node to the list, so it's
5314 		 * unnecessary to update the rule number and fd_bmap.
5315 		 */
5316 		new_rule->rule_node.next = old_rule->rule_node.next;
5317 		new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5318 		memcpy(old_rule, new_rule, sizeof(*old_rule));
5319 		kfree(new_rule);
5320 		break;
5321 	case HCLGE_FD_DELETED:
5322 		hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5323 		hclge_fd_free_node(hdev, old_rule);
5324 		break;
5325 	case HCLGE_FD_TO_DEL:
5326 		/* if new request is TO_DEL, and old rule is existent
5327 		 * 1) the state of old rule is TO_DEL, we need do nothing,
5328 		 * because we delete rule by location, other rule content
5329 		 * is unncessary.
5330 		 * 2) the state of old rule is ACTIVE, we need to change its
5331 		 * state to TO_DEL, so the rule will be deleted when periodic
5332 		 * task being scheduled.
5333 		 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5334 		 * been added to hardware, so we just delete the rule node from
5335 		 * fd_rule_list directly.
5336 		 */
5337 		if (old_rule->state == HCLGE_FD_TO_ADD) {
5338 			hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5339 			hclge_fd_free_node(hdev, old_rule);
5340 			return;
5341 		}
5342 		old_rule->state = HCLGE_FD_TO_DEL;
5343 		break;
5344 	}
5345 }
5346 
5347 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5348 						u16 location,
5349 						struct hclge_fd_rule **parent)
5350 {
5351 	struct hclge_fd_rule *rule;
5352 	struct hlist_node *node;
5353 
5354 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5355 		if (rule->location == location)
5356 			return rule;
5357 		else if (rule->location > location)
5358 			return NULL;
5359 		/* record the parent node, use to keep the nodes in fd_rule_list
5360 		 * in ascend order.
5361 		 */
5362 		*parent = rule;
5363 	}
5364 
5365 	return NULL;
5366 }
5367 
5368 /* insert fd rule node in ascend order according to rule->location */
5369 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5370 				      struct hclge_fd_rule *rule,
5371 				      struct hclge_fd_rule *parent)
5372 {
5373 	INIT_HLIST_NODE(&rule->rule_node);
5374 
5375 	if (parent)
5376 		hlist_add_behind(&rule->rule_node, &parent->rule_node);
5377 	else
5378 		hlist_add_head(&rule->rule_node, hlist);
5379 }
5380 
5381 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5382 				     struct hclge_fd_user_def_cfg *cfg)
5383 {
5384 	struct hclge_fd_user_def_cfg_cmd *req;
5385 	struct hclge_desc desc;
5386 	u16 data = 0;
5387 	int ret;
5388 
5389 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5390 
5391 	req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5392 
5393 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5394 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5395 			HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5396 	req->ol2_cfg = cpu_to_le16(data);
5397 
5398 	data = 0;
5399 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5400 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5401 			HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5402 	req->ol3_cfg = cpu_to_le16(data);
5403 
5404 	data = 0;
5405 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5406 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5407 			HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5408 	req->ol4_cfg = cpu_to_le16(data);
5409 
5410 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5411 	if (ret)
5412 		dev_err(&hdev->pdev->dev,
5413 			"failed to set fd user def data, ret= %d\n", ret);
5414 	return ret;
5415 }
5416 
5417 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5418 {
5419 	int ret;
5420 
5421 	if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5422 		return;
5423 
5424 	if (!locked)
5425 		spin_lock_bh(&hdev->fd_rule_lock);
5426 
5427 	ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5428 	if (ret)
5429 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5430 
5431 	if (!locked)
5432 		spin_unlock_bh(&hdev->fd_rule_lock);
5433 }
5434 
5435 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5436 					  struct hclge_fd_rule *rule)
5437 {
5438 	struct hlist_head *hlist = &hdev->fd_rule_list;
5439 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5440 	struct hclge_fd_user_def_info *info, *old_info;
5441 	struct hclge_fd_user_def_cfg *cfg;
5442 
5443 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5444 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5445 		return 0;
5446 
5447 	/* for valid layer is start from 1, so need minus 1 to get the cfg */
5448 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5449 	info = &rule->ep.user_def;
5450 
5451 	if (!cfg->ref_cnt || cfg->offset == info->offset)
5452 		return 0;
5453 
5454 	if (cfg->ref_cnt > 1)
5455 		goto error;
5456 
5457 	fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5458 	if (fd_rule) {
5459 		old_info = &fd_rule->ep.user_def;
5460 		if (info->layer == old_info->layer)
5461 			return 0;
5462 	}
5463 
5464 error:
5465 	dev_err(&hdev->pdev->dev,
5466 		"No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5467 		info->layer + 1);
5468 	return -ENOSPC;
5469 }
5470 
5471 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5472 					 struct hclge_fd_rule *rule)
5473 {
5474 	struct hclge_fd_user_def_cfg *cfg;
5475 
5476 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5477 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5478 		return;
5479 
5480 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5481 	if (!cfg->ref_cnt) {
5482 		cfg->offset = rule->ep.user_def.offset;
5483 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5484 	}
5485 	cfg->ref_cnt++;
5486 }
5487 
5488 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5489 					 struct hclge_fd_rule *rule)
5490 {
5491 	struct hclge_fd_user_def_cfg *cfg;
5492 
5493 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5494 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5495 		return;
5496 
5497 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5498 	if (!cfg->ref_cnt)
5499 		return;
5500 
5501 	cfg->ref_cnt--;
5502 	if (!cfg->ref_cnt) {
5503 		cfg->offset = 0;
5504 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5505 	}
5506 }
5507 
5508 static void hclge_update_fd_list(struct hclge_dev *hdev,
5509 				 enum HCLGE_FD_NODE_STATE state, u16 location,
5510 				 struct hclge_fd_rule *new_rule)
5511 {
5512 	struct hlist_head *hlist = &hdev->fd_rule_list;
5513 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5514 
5515 	fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5516 	if (fd_rule) {
5517 		hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5518 		if (state == HCLGE_FD_ACTIVE)
5519 			hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5520 		hclge_sync_fd_user_def_cfg(hdev, true);
5521 
5522 		hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5523 		return;
5524 	}
5525 
5526 	/* it's unlikely to fail here, because we have checked the rule
5527 	 * exist before.
5528 	 */
5529 	if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5530 		dev_warn(&hdev->pdev->dev,
5531 			 "failed to delete fd rule %u, it's inexistent\n",
5532 			 location);
5533 		return;
5534 	}
5535 
5536 	hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5537 	hclge_sync_fd_user_def_cfg(hdev, true);
5538 
5539 	hclge_fd_insert_rule_node(hlist, new_rule, parent);
5540 	hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5541 
5542 	if (state == HCLGE_FD_TO_ADD) {
5543 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5544 		hclge_task_schedule(hdev, 0);
5545 	}
5546 }
5547 
5548 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5549 {
5550 	struct hclge_get_fd_mode_cmd *req;
5551 	struct hclge_desc desc;
5552 	int ret;
5553 
5554 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5555 
5556 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
5557 
5558 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5559 	if (ret) {
5560 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5561 		return ret;
5562 	}
5563 
5564 	*fd_mode = req->mode;
5565 
5566 	return ret;
5567 }
5568 
5569 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5570 				   u32 *stage1_entry_num,
5571 				   u32 *stage2_entry_num,
5572 				   u16 *stage1_counter_num,
5573 				   u16 *stage2_counter_num)
5574 {
5575 	struct hclge_get_fd_allocation_cmd *req;
5576 	struct hclge_desc desc;
5577 	int ret;
5578 
5579 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5580 
5581 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5582 
5583 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5584 	if (ret) {
5585 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5586 			ret);
5587 		return ret;
5588 	}
5589 
5590 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5591 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5592 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5593 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5594 
5595 	return ret;
5596 }
5597 
5598 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5599 				   enum HCLGE_FD_STAGE stage_num)
5600 {
5601 	struct hclge_set_fd_key_config_cmd *req;
5602 	struct hclge_fd_key_cfg *stage;
5603 	struct hclge_desc desc;
5604 	int ret;
5605 
5606 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5607 
5608 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5609 	stage = &hdev->fd_cfg.key_cfg[stage_num];
5610 	req->stage = stage_num;
5611 	req->key_select = stage->key_sel;
5612 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5613 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5614 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5615 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5616 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5617 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5618 
5619 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5620 	if (ret)
5621 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5622 
5623 	return ret;
5624 }
5625 
5626 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5627 {
5628 	struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5629 
5630 	spin_lock_bh(&hdev->fd_rule_lock);
5631 	memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5632 	spin_unlock_bh(&hdev->fd_rule_lock);
5633 
5634 	hclge_fd_set_user_def_cmd(hdev, cfg);
5635 }
5636 
5637 static int hclge_init_fd_config(struct hclge_dev *hdev)
5638 {
5639 #define LOW_2_WORDS		0x03
5640 	struct hclge_fd_key_cfg *key_cfg;
5641 	int ret;
5642 
5643 	if (!hnae3_dev_fd_supported(hdev))
5644 		return 0;
5645 
5646 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5647 	if (ret)
5648 		return ret;
5649 
5650 	switch (hdev->fd_cfg.fd_mode) {
5651 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5652 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5653 		break;
5654 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5655 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5656 		break;
5657 	default:
5658 		dev_err(&hdev->pdev->dev,
5659 			"Unsupported flow director mode %u\n",
5660 			hdev->fd_cfg.fd_mode);
5661 		return -EOPNOTSUPP;
5662 	}
5663 
5664 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5665 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5666 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5667 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5668 	key_cfg->outer_sipv6_word_en = 0;
5669 	key_cfg->outer_dipv6_word_en = 0;
5670 
5671 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5672 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5673 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5674 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5675 
5676 	/* If use max 400bit key, we can support tuples for ether type */
5677 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5678 		key_cfg->tuple_active |=
5679 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5680 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5681 			key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5682 	}
5683 
5684 	/* roce_type is used to filter roce frames
5685 	 * dst_vport is used to specify the rule
5686 	 */
5687 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5688 
5689 	ret = hclge_get_fd_allocation(hdev,
5690 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5691 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5692 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5693 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5694 	if (ret)
5695 		return ret;
5696 
5697 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5698 }
5699 
5700 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5701 				int loc, u8 *key, bool is_add)
5702 {
5703 	struct hclge_fd_tcam_config_1_cmd *req1;
5704 	struct hclge_fd_tcam_config_2_cmd *req2;
5705 	struct hclge_fd_tcam_config_3_cmd *req3;
5706 	struct hclge_desc desc[3];
5707 	int ret;
5708 
5709 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5710 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5711 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5712 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5713 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5714 
5715 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5716 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5717 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5718 
5719 	req1->stage = stage;
5720 	req1->xy_sel = sel_x ? 1 : 0;
5721 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5722 	req1->index = cpu_to_le32(loc);
5723 	req1->entry_vld = sel_x ? is_add : 0;
5724 
5725 	if (key) {
5726 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5727 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5728 		       sizeof(req2->tcam_data));
5729 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5730 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5731 	}
5732 
5733 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
5734 	if (ret)
5735 		dev_err(&hdev->pdev->dev,
5736 			"config tcam key fail, ret=%d\n",
5737 			ret);
5738 
5739 	return ret;
5740 }
5741 
5742 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5743 			      struct hclge_fd_ad_data *action)
5744 {
5745 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5746 	struct hclge_fd_ad_config_cmd *req;
5747 	struct hclge_desc desc;
5748 	u64 ad_data = 0;
5749 	int ret;
5750 
5751 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5752 
5753 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
5754 	req->index = cpu_to_le32(loc);
5755 	req->stage = stage;
5756 
5757 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5758 		      action->write_rule_id_to_bd);
5759 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5760 			action->rule_id);
5761 	if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5762 		hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5763 			      action->override_tc);
5764 		hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5765 				HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5766 	}
5767 	ad_data <<= 32;
5768 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5769 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5770 		      action->forward_to_direct_queue);
5771 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5772 			action->queue_id);
5773 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5774 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5775 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5776 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5777 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5778 			action->counter_id);
5779 
5780 	req->ad_data = cpu_to_le64(ad_data);
5781 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5782 	if (ret)
5783 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5784 
5785 	return ret;
5786 }
5787 
5788 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5789 				   struct hclge_fd_rule *rule)
5790 {
5791 	int offset, moffset, ip_offset;
5792 	enum HCLGE_FD_KEY_OPT key_opt;
5793 	u16 tmp_x_s, tmp_y_s;
5794 	u32 tmp_x_l, tmp_y_l;
5795 	u8 *p = (u8 *)rule;
5796 	int i;
5797 
5798 	if (rule->unused_tuple & BIT(tuple_bit))
5799 		return true;
5800 
5801 	key_opt = tuple_key_info[tuple_bit].key_opt;
5802 	offset = tuple_key_info[tuple_bit].offset;
5803 	moffset = tuple_key_info[tuple_bit].moffset;
5804 
5805 	switch (key_opt) {
5806 	case KEY_OPT_U8:
5807 		calc_x(*key_x, p[offset], p[moffset]);
5808 		calc_y(*key_y, p[offset], p[moffset]);
5809 
5810 		return true;
5811 	case KEY_OPT_LE16:
5812 		calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5813 		calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5814 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5815 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5816 
5817 		return true;
5818 	case KEY_OPT_LE32:
5819 		calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5820 		calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5821 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5822 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5823 
5824 		return true;
5825 	case KEY_OPT_MAC:
5826 		for (i = 0; i < ETH_ALEN; i++) {
5827 			calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5828 			       p[moffset + i]);
5829 			calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5830 			       p[moffset + i]);
5831 		}
5832 
5833 		return true;
5834 	case KEY_OPT_IP:
5835 		ip_offset = IPV4_INDEX * sizeof(u32);
5836 		calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5837 		       *(u32 *)(&p[moffset + ip_offset]));
5838 		calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5839 		       *(u32 *)(&p[moffset + ip_offset]));
5840 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5841 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5842 
5843 		return true;
5844 	default:
5845 		return false;
5846 	}
5847 }
5848 
5849 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5850 				 u8 vf_id, u8 network_port_id)
5851 {
5852 	u32 port_number = 0;
5853 
5854 	if (port_type == HOST_PORT) {
5855 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5856 				pf_id);
5857 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5858 				vf_id);
5859 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5860 	} else {
5861 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5862 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5863 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5864 	}
5865 
5866 	return port_number;
5867 }
5868 
5869 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5870 				       __le32 *key_x, __le32 *key_y,
5871 				       struct hclge_fd_rule *rule)
5872 {
5873 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5874 	u8 cur_pos = 0, tuple_size, shift_bits;
5875 	unsigned int i;
5876 
5877 	for (i = 0; i < MAX_META_DATA; i++) {
5878 		tuple_size = meta_data_key_info[i].key_length;
5879 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5880 
5881 		switch (tuple_bit) {
5882 		case BIT(ROCE_TYPE):
5883 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5884 			cur_pos += tuple_size;
5885 			break;
5886 		case BIT(DST_VPORT):
5887 			port_number = hclge_get_port_number(HOST_PORT, 0,
5888 							    rule->vf_id, 0);
5889 			hnae3_set_field(meta_data,
5890 					GENMASK(cur_pos + tuple_size, cur_pos),
5891 					cur_pos, port_number);
5892 			cur_pos += tuple_size;
5893 			break;
5894 		default:
5895 			break;
5896 		}
5897 	}
5898 
5899 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5900 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5901 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5902 
5903 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5904 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5905 }
5906 
5907 /* A complete key is combined with meta data key and tuple key.
5908  * Meta data key is stored at the MSB region, and tuple key is stored at
5909  * the LSB region, unused bits will be filled 0.
5910  */
5911 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5912 			    struct hclge_fd_rule *rule)
5913 {
5914 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5915 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5916 	u8 *cur_key_x, *cur_key_y;
5917 	u8 meta_data_region;
5918 	u8 tuple_size;
5919 	int ret;
5920 	u32 i;
5921 
5922 	memset(key_x, 0, sizeof(key_x));
5923 	memset(key_y, 0, sizeof(key_y));
5924 	cur_key_x = key_x;
5925 	cur_key_y = key_y;
5926 
5927 	for (i = 0 ; i < MAX_TUPLE; i++) {
5928 		bool tuple_valid;
5929 
5930 		tuple_size = tuple_key_info[i].key_length / 8;
5931 		if (!(key_cfg->tuple_active & BIT(i)))
5932 			continue;
5933 
5934 		tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5935 						     cur_key_y, rule);
5936 		if (tuple_valid) {
5937 			cur_key_x += tuple_size;
5938 			cur_key_y += tuple_size;
5939 		}
5940 	}
5941 
5942 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5943 			MAX_META_DATA_LENGTH / 8;
5944 
5945 	hclge_fd_convert_meta_data(key_cfg,
5946 				   (__le32 *)(key_x + meta_data_region),
5947 				   (__le32 *)(key_y + meta_data_region),
5948 				   rule);
5949 
5950 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5951 				   true);
5952 	if (ret) {
5953 		dev_err(&hdev->pdev->dev,
5954 			"fd key_y config fail, loc=%u, ret=%d\n",
5955 			rule->queue_id, ret);
5956 		return ret;
5957 	}
5958 
5959 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5960 				   true);
5961 	if (ret)
5962 		dev_err(&hdev->pdev->dev,
5963 			"fd key_x config fail, loc=%u, ret=%d\n",
5964 			rule->queue_id, ret);
5965 	return ret;
5966 }
5967 
5968 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5969 			       struct hclge_fd_rule *rule)
5970 {
5971 	struct hclge_vport *vport = hdev->vport;
5972 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5973 	struct hclge_fd_ad_data ad_data;
5974 
5975 	memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5976 	ad_data.ad_id = rule->location;
5977 
5978 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5979 		ad_data.drop_packet = true;
5980 	} else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5981 		ad_data.override_tc = true;
5982 		ad_data.queue_id =
5983 			kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5984 		ad_data.tc_size =
5985 			ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5986 	} else {
5987 		ad_data.forward_to_direct_queue = true;
5988 		ad_data.queue_id = rule->queue_id;
5989 	}
5990 
5991 	ad_data.use_counter = false;
5992 	ad_data.counter_id = 0;
5993 
5994 	ad_data.use_next_stage = false;
5995 	ad_data.next_input_key = 0;
5996 
5997 	ad_data.write_rule_id_to_bd = true;
5998 	ad_data.rule_id = rule->location;
5999 
6000 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
6001 }
6002 
6003 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
6004 				       u32 *unused_tuple)
6005 {
6006 	if (!spec || !unused_tuple)
6007 		return -EINVAL;
6008 
6009 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6010 
6011 	if (!spec->ip4src)
6012 		*unused_tuple |= BIT(INNER_SRC_IP);
6013 
6014 	if (!spec->ip4dst)
6015 		*unused_tuple |= BIT(INNER_DST_IP);
6016 
6017 	if (!spec->psrc)
6018 		*unused_tuple |= BIT(INNER_SRC_PORT);
6019 
6020 	if (!spec->pdst)
6021 		*unused_tuple |= BIT(INNER_DST_PORT);
6022 
6023 	if (!spec->tos)
6024 		*unused_tuple |= BIT(INNER_IP_TOS);
6025 
6026 	return 0;
6027 }
6028 
6029 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
6030 				    u32 *unused_tuple)
6031 {
6032 	if (!spec || !unused_tuple)
6033 		return -EINVAL;
6034 
6035 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6036 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6037 
6038 	if (!spec->ip4src)
6039 		*unused_tuple |= BIT(INNER_SRC_IP);
6040 
6041 	if (!spec->ip4dst)
6042 		*unused_tuple |= BIT(INNER_DST_IP);
6043 
6044 	if (!spec->tos)
6045 		*unused_tuple |= BIT(INNER_IP_TOS);
6046 
6047 	if (!spec->proto)
6048 		*unused_tuple |= BIT(INNER_IP_PROTO);
6049 
6050 	if (spec->l4_4_bytes)
6051 		return -EOPNOTSUPP;
6052 
6053 	if (spec->ip_ver != ETH_RX_NFC_IP4)
6054 		return -EOPNOTSUPP;
6055 
6056 	return 0;
6057 }
6058 
6059 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
6060 				       u32 *unused_tuple)
6061 {
6062 	if (!spec || !unused_tuple)
6063 		return -EINVAL;
6064 
6065 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6066 
6067 	/* check whether src/dst ip address used */
6068 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6069 		*unused_tuple |= BIT(INNER_SRC_IP);
6070 
6071 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6072 		*unused_tuple |= BIT(INNER_DST_IP);
6073 
6074 	if (!spec->psrc)
6075 		*unused_tuple |= BIT(INNER_SRC_PORT);
6076 
6077 	if (!spec->pdst)
6078 		*unused_tuple |= BIT(INNER_DST_PORT);
6079 
6080 	if (!spec->tclass)
6081 		*unused_tuple |= BIT(INNER_IP_TOS);
6082 
6083 	return 0;
6084 }
6085 
6086 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6087 				    u32 *unused_tuple)
6088 {
6089 	if (!spec || !unused_tuple)
6090 		return -EINVAL;
6091 
6092 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6093 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6094 
6095 	/* check whether src/dst ip address used */
6096 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6097 		*unused_tuple |= BIT(INNER_SRC_IP);
6098 
6099 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6100 		*unused_tuple |= BIT(INNER_DST_IP);
6101 
6102 	if (!spec->l4_proto)
6103 		*unused_tuple |= BIT(INNER_IP_PROTO);
6104 
6105 	if (!spec->tclass)
6106 		*unused_tuple |= BIT(INNER_IP_TOS);
6107 
6108 	if (spec->l4_4_bytes)
6109 		return -EOPNOTSUPP;
6110 
6111 	return 0;
6112 }
6113 
6114 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6115 {
6116 	if (!spec || !unused_tuple)
6117 		return -EINVAL;
6118 
6119 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6120 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6121 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6122 
6123 	if (is_zero_ether_addr(spec->h_source))
6124 		*unused_tuple |= BIT(INNER_SRC_MAC);
6125 
6126 	if (is_zero_ether_addr(spec->h_dest))
6127 		*unused_tuple |= BIT(INNER_DST_MAC);
6128 
6129 	if (!spec->h_proto)
6130 		*unused_tuple |= BIT(INNER_ETH_TYPE);
6131 
6132 	return 0;
6133 }
6134 
6135 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6136 				    struct ethtool_rx_flow_spec *fs,
6137 				    u32 *unused_tuple)
6138 {
6139 	if (fs->flow_type & FLOW_EXT) {
6140 		if (fs->h_ext.vlan_etype) {
6141 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6142 			return -EOPNOTSUPP;
6143 		}
6144 
6145 		if (!fs->h_ext.vlan_tci)
6146 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6147 
6148 		if (fs->m_ext.vlan_tci &&
6149 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6150 			dev_err(&hdev->pdev->dev,
6151 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6152 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6153 			return -EINVAL;
6154 		}
6155 	} else {
6156 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6157 	}
6158 
6159 	if (fs->flow_type & FLOW_MAC_EXT) {
6160 		if (hdev->fd_cfg.fd_mode !=
6161 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6162 			dev_err(&hdev->pdev->dev,
6163 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
6164 			return -EOPNOTSUPP;
6165 		}
6166 
6167 		if (is_zero_ether_addr(fs->h_ext.h_dest))
6168 			*unused_tuple |= BIT(INNER_DST_MAC);
6169 		else
6170 			*unused_tuple &= ~BIT(INNER_DST_MAC);
6171 	}
6172 
6173 	return 0;
6174 }
6175 
6176 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6177 				       struct hclge_fd_user_def_info *info)
6178 {
6179 	switch (flow_type) {
6180 	case ETHER_FLOW:
6181 		info->layer = HCLGE_FD_USER_DEF_L2;
6182 		*unused_tuple &= ~BIT(INNER_L2_RSV);
6183 		break;
6184 	case IP_USER_FLOW:
6185 	case IPV6_USER_FLOW:
6186 		info->layer = HCLGE_FD_USER_DEF_L3;
6187 		*unused_tuple &= ~BIT(INNER_L3_RSV);
6188 		break;
6189 	case TCP_V4_FLOW:
6190 	case UDP_V4_FLOW:
6191 	case TCP_V6_FLOW:
6192 	case UDP_V6_FLOW:
6193 		info->layer = HCLGE_FD_USER_DEF_L4;
6194 		*unused_tuple &= ~BIT(INNER_L4_RSV);
6195 		break;
6196 	default:
6197 		return -EOPNOTSUPP;
6198 	}
6199 
6200 	return 0;
6201 }
6202 
6203 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6204 {
6205 	return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6206 }
6207 
6208 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6209 					 struct ethtool_rx_flow_spec *fs,
6210 					 u32 *unused_tuple,
6211 					 struct hclge_fd_user_def_info *info)
6212 {
6213 	u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6214 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6215 	u16 data, offset, data_mask, offset_mask;
6216 	int ret;
6217 
6218 	info->layer = HCLGE_FD_USER_DEF_NONE;
6219 	*unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6220 
6221 	if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6222 		return 0;
6223 
6224 	/* user-def data from ethtool is 64 bit value, the bit0~15 is used
6225 	 * for data, and bit32~47 is used for offset.
6226 	 */
6227 	data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6228 	data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6229 	offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6230 	offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6231 
6232 	if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6233 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6234 		return -EOPNOTSUPP;
6235 	}
6236 
6237 	if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6238 		dev_err(&hdev->pdev->dev,
6239 			"user-def offset[%u] should be no more than %u\n",
6240 			offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6241 		return -EINVAL;
6242 	}
6243 
6244 	if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6245 		dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6246 		return -EINVAL;
6247 	}
6248 
6249 	ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6250 	if (ret) {
6251 		dev_err(&hdev->pdev->dev,
6252 			"unsupported flow type for user-def bytes, ret = %d\n",
6253 			ret);
6254 		return ret;
6255 	}
6256 
6257 	info->data = data;
6258 	info->data_mask = data_mask;
6259 	info->offset = offset;
6260 
6261 	return 0;
6262 }
6263 
6264 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6265 			       struct ethtool_rx_flow_spec *fs,
6266 			       u32 *unused_tuple,
6267 			       struct hclge_fd_user_def_info *info)
6268 {
6269 	u32 flow_type;
6270 	int ret;
6271 
6272 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6273 		dev_err(&hdev->pdev->dev,
6274 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
6275 			fs->location,
6276 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6277 		return -EINVAL;
6278 	}
6279 
6280 	ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6281 	if (ret)
6282 		return ret;
6283 
6284 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6285 	switch (flow_type) {
6286 	case SCTP_V4_FLOW:
6287 	case TCP_V4_FLOW:
6288 	case UDP_V4_FLOW:
6289 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6290 						  unused_tuple);
6291 		break;
6292 	case IP_USER_FLOW:
6293 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6294 					       unused_tuple);
6295 		break;
6296 	case SCTP_V6_FLOW:
6297 	case TCP_V6_FLOW:
6298 	case UDP_V6_FLOW:
6299 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6300 						  unused_tuple);
6301 		break;
6302 	case IPV6_USER_FLOW:
6303 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6304 					       unused_tuple);
6305 		break;
6306 	case ETHER_FLOW:
6307 		if (hdev->fd_cfg.fd_mode !=
6308 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6309 			dev_err(&hdev->pdev->dev,
6310 				"ETHER_FLOW is not supported in current fd mode!\n");
6311 			return -EOPNOTSUPP;
6312 		}
6313 
6314 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6315 						 unused_tuple);
6316 		break;
6317 	default:
6318 		dev_err(&hdev->pdev->dev,
6319 			"unsupported protocol type, protocol type = %#x\n",
6320 			flow_type);
6321 		return -EOPNOTSUPP;
6322 	}
6323 
6324 	if (ret) {
6325 		dev_err(&hdev->pdev->dev,
6326 			"failed to check flow union tuple, ret = %d\n",
6327 			ret);
6328 		return ret;
6329 	}
6330 
6331 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6332 }
6333 
6334 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6335 				      struct ethtool_rx_flow_spec *fs,
6336 				      struct hclge_fd_rule *rule, u8 ip_proto)
6337 {
6338 	rule->tuples.src_ip[IPV4_INDEX] =
6339 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6340 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6341 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6342 
6343 	rule->tuples.dst_ip[IPV4_INDEX] =
6344 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6345 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6346 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6347 
6348 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6349 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6350 
6351 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6352 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6353 
6354 	rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6355 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6356 
6357 	rule->tuples.ether_proto = ETH_P_IP;
6358 	rule->tuples_mask.ether_proto = 0xFFFF;
6359 
6360 	rule->tuples.ip_proto = ip_proto;
6361 	rule->tuples_mask.ip_proto = 0xFF;
6362 }
6363 
6364 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6365 				   struct ethtool_rx_flow_spec *fs,
6366 				   struct hclge_fd_rule *rule)
6367 {
6368 	rule->tuples.src_ip[IPV4_INDEX] =
6369 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6370 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6371 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6372 
6373 	rule->tuples.dst_ip[IPV4_INDEX] =
6374 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6375 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6376 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6377 
6378 	rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6379 	rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6380 
6381 	rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6382 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6383 
6384 	rule->tuples.ether_proto = ETH_P_IP;
6385 	rule->tuples_mask.ether_proto = 0xFFFF;
6386 }
6387 
6388 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6389 				      struct ethtool_rx_flow_spec *fs,
6390 				      struct hclge_fd_rule *rule, u8 ip_proto)
6391 {
6392 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6393 			  IPV6_SIZE);
6394 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6395 			  IPV6_SIZE);
6396 
6397 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6398 			  IPV6_SIZE);
6399 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6400 			  IPV6_SIZE);
6401 
6402 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6403 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6404 
6405 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6406 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6407 
6408 	rule->tuples.ether_proto = ETH_P_IPV6;
6409 	rule->tuples_mask.ether_proto = 0xFFFF;
6410 
6411 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6412 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6413 
6414 	rule->tuples.ip_proto = ip_proto;
6415 	rule->tuples_mask.ip_proto = 0xFF;
6416 }
6417 
6418 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6419 				   struct ethtool_rx_flow_spec *fs,
6420 				   struct hclge_fd_rule *rule)
6421 {
6422 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6423 			  IPV6_SIZE);
6424 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6425 			  IPV6_SIZE);
6426 
6427 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6428 			  IPV6_SIZE);
6429 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6430 			  IPV6_SIZE);
6431 
6432 	rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6433 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6434 
6435 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6436 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6437 
6438 	rule->tuples.ether_proto = ETH_P_IPV6;
6439 	rule->tuples_mask.ether_proto = 0xFFFF;
6440 }
6441 
6442 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6443 				     struct ethtool_rx_flow_spec *fs,
6444 				     struct hclge_fd_rule *rule)
6445 {
6446 	ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6447 	ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6448 
6449 	ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6450 	ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6451 
6452 	rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6453 	rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6454 }
6455 
6456 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6457 					struct hclge_fd_rule *rule)
6458 {
6459 	switch (info->layer) {
6460 	case HCLGE_FD_USER_DEF_L2:
6461 		rule->tuples.l2_user_def = info->data;
6462 		rule->tuples_mask.l2_user_def = info->data_mask;
6463 		break;
6464 	case HCLGE_FD_USER_DEF_L3:
6465 		rule->tuples.l3_user_def = info->data;
6466 		rule->tuples_mask.l3_user_def = info->data_mask;
6467 		break;
6468 	case HCLGE_FD_USER_DEF_L4:
6469 		rule->tuples.l4_user_def = (u32)info->data << 16;
6470 		rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6471 		break;
6472 	default:
6473 		break;
6474 	}
6475 
6476 	rule->ep.user_def = *info;
6477 }
6478 
6479 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6480 			      struct ethtool_rx_flow_spec *fs,
6481 			      struct hclge_fd_rule *rule,
6482 			      struct hclge_fd_user_def_info *info)
6483 {
6484 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6485 
6486 	switch (flow_type) {
6487 	case SCTP_V4_FLOW:
6488 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6489 		break;
6490 	case TCP_V4_FLOW:
6491 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6492 		break;
6493 	case UDP_V4_FLOW:
6494 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6495 		break;
6496 	case IP_USER_FLOW:
6497 		hclge_fd_get_ip4_tuple(hdev, fs, rule);
6498 		break;
6499 	case SCTP_V6_FLOW:
6500 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6501 		break;
6502 	case TCP_V6_FLOW:
6503 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6504 		break;
6505 	case UDP_V6_FLOW:
6506 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6507 		break;
6508 	case IPV6_USER_FLOW:
6509 		hclge_fd_get_ip6_tuple(hdev, fs, rule);
6510 		break;
6511 	case ETHER_FLOW:
6512 		hclge_fd_get_ether_tuple(hdev, fs, rule);
6513 		break;
6514 	default:
6515 		return -EOPNOTSUPP;
6516 	}
6517 
6518 	if (fs->flow_type & FLOW_EXT) {
6519 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6520 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6521 		hclge_fd_get_user_def_tuple(info, rule);
6522 	}
6523 
6524 	if (fs->flow_type & FLOW_MAC_EXT) {
6525 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6526 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6527 	}
6528 
6529 	return 0;
6530 }
6531 
6532 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6533 				struct hclge_fd_rule *rule)
6534 {
6535 	int ret;
6536 
6537 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6538 	if (ret)
6539 		return ret;
6540 
6541 	return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6542 }
6543 
6544 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6545 				     struct hclge_fd_rule *rule)
6546 {
6547 	int ret;
6548 
6549 	spin_lock_bh(&hdev->fd_rule_lock);
6550 
6551 	if (hdev->fd_active_type != rule->rule_type &&
6552 	    (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6553 	     hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6554 		dev_err(&hdev->pdev->dev,
6555 			"mode conflict(new type %d, active type %d), please delete existent rules first\n",
6556 			rule->rule_type, hdev->fd_active_type);
6557 		spin_unlock_bh(&hdev->fd_rule_lock);
6558 		return -EINVAL;
6559 	}
6560 
6561 	ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6562 	if (ret)
6563 		goto out;
6564 
6565 	ret = hclge_clear_arfs_rules(hdev);
6566 	if (ret)
6567 		goto out;
6568 
6569 	ret = hclge_fd_config_rule(hdev, rule);
6570 	if (ret)
6571 		goto out;
6572 
6573 	rule->state = HCLGE_FD_ACTIVE;
6574 	hdev->fd_active_type = rule->rule_type;
6575 	hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6576 
6577 out:
6578 	spin_unlock_bh(&hdev->fd_rule_lock);
6579 	return ret;
6580 }
6581 
6582 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6583 {
6584 	struct hclge_vport *vport = hclge_get_vport(handle);
6585 	struct hclge_dev *hdev = vport->back;
6586 
6587 	return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6588 }
6589 
6590 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6591 				      u16 *vport_id, u8 *action, u16 *queue_id)
6592 {
6593 	struct hclge_vport *vport = hdev->vport;
6594 
6595 	if (ring_cookie == RX_CLS_FLOW_DISC) {
6596 		*action = HCLGE_FD_ACTION_DROP_PACKET;
6597 	} else {
6598 		u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6599 		u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6600 		u16 tqps;
6601 
6602 		if (vf > hdev->num_req_vfs) {
6603 			dev_err(&hdev->pdev->dev,
6604 				"Error: vf id (%u) > max vf num (%u)\n",
6605 				vf, hdev->num_req_vfs);
6606 			return -EINVAL;
6607 		}
6608 
6609 		*vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6610 		tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6611 
6612 		if (ring >= tqps) {
6613 			dev_err(&hdev->pdev->dev,
6614 				"Error: queue id (%u) > max tqp num (%u)\n",
6615 				ring, tqps - 1);
6616 			return -EINVAL;
6617 		}
6618 
6619 		*action = HCLGE_FD_ACTION_SELECT_QUEUE;
6620 		*queue_id = ring;
6621 	}
6622 
6623 	return 0;
6624 }
6625 
6626 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6627 			      struct ethtool_rxnfc *cmd)
6628 {
6629 	struct hclge_vport *vport = hclge_get_vport(handle);
6630 	struct hclge_dev *hdev = vport->back;
6631 	struct hclge_fd_user_def_info info;
6632 	u16 dst_vport_id = 0, q_index = 0;
6633 	struct ethtool_rx_flow_spec *fs;
6634 	struct hclge_fd_rule *rule;
6635 	u32 unused = 0;
6636 	u8 action;
6637 	int ret;
6638 
6639 	if (!hnae3_dev_fd_supported(hdev)) {
6640 		dev_err(&hdev->pdev->dev,
6641 			"flow table director is not supported\n");
6642 		return -EOPNOTSUPP;
6643 	}
6644 
6645 	if (!hdev->fd_en) {
6646 		dev_err(&hdev->pdev->dev,
6647 			"please enable flow director first\n");
6648 		return -EOPNOTSUPP;
6649 	}
6650 
6651 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6652 
6653 	ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6654 	if (ret)
6655 		return ret;
6656 
6657 	ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6658 					 &action, &q_index);
6659 	if (ret)
6660 		return ret;
6661 
6662 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6663 	if (!rule)
6664 		return -ENOMEM;
6665 
6666 	ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6667 	if (ret) {
6668 		kfree(rule);
6669 		return ret;
6670 	}
6671 
6672 	rule->flow_type = fs->flow_type;
6673 	rule->location = fs->location;
6674 	rule->unused_tuple = unused;
6675 	rule->vf_id = dst_vport_id;
6676 	rule->queue_id = q_index;
6677 	rule->action = action;
6678 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
6679 
6680 	ret = hclge_add_fd_entry_common(hdev, rule);
6681 	if (ret)
6682 		kfree(rule);
6683 
6684 	return ret;
6685 }
6686 
6687 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6688 			      struct ethtool_rxnfc *cmd)
6689 {
6690 	struct hclge_vport *vport = hclge_get_vport(handle);
6691 	struct hclge_dev *hdev = vport->back;
6692 	struct ethtool_rx_flow_spec *fs;
6693 	int ret;
6694 
6695 	if (!hnae3_dev_fd_supported(hdev))
6696 		return -EOPNOTSUPP;
6697 
6698 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6699 
6700 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6701 		return -EINVAL;
6702 
6703 	spin_lock_bh(&hdev->fd_rule_lock);
6704 	if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6705 	    !test_bit(fs->location, hdev->fd_bmap)) {
6706 		dev_err(&hdev->pdev->dev,
6707 			"Delete fail, rule %u is inexistent\n", fs->location);
6708 		spin_unlock_bh(&hdev->fd_rule_lock);
6709 		return -ENOENT;
6710 	}
6711 
6712 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6713 				   NULL, false);
6714 	if (ret)
6715 		goto out;
6716 
6717 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6718 
6719 out:
6720 	spin_unlock_bh(&hdev->fd_rule_lock);
6721 	return ret;
6722 }
6723 
6724 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6725 					 bool clear_list)
6726 {
6727 	struct hclge_fd_rule *rule;
6728 	struct hlist_node *node;
6729 	u16 location;
6730 
6731 	if (!hnae3_dev_fd_supported(hdev))
6732 		return;
6733 
6734 	spin_lock_bh(&hdev->fd_rule_lock);
6735 
6736 	for_each_set_bit(location, hdev->fd_bmap,
6737 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6738 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6739 				     NULL, false);
6740 
6741 	if (clear_list) {
6742 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6743 					  rule_node) {
6744 			hlist_del(&rule->rule_node);
6745 			kfree(rule);
6746 		}
6747 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6748 		hdev->hclge_fd_rule_num = 0;
6749 		bitmap_zero(hdev->fd_bmap,
6750 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6751 	}
6752 
6753 	spin_unlock_bh(&hdev->fd_rule_lock);
6754 }
6755 
6756 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6757 {
6758 	hclge_clear_fd_rules_in_list(hdev, true);
6759 	hclge_fd_disable_user_def(hdev);
6760 }
6761 
6762 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6763 {
6764 	struct hclge_vport *vport = hclge_get_vport(handle);
6765 	struct hclge_dev *hdev = vport->back;
6766 	struct hclge_fd_rule *rule;
6767 	struct hlist_node *node;
6768 
6769 	/* Return ok here, because reset error handling will check this
6770 	 * return value. If error is returned here, the reset process will
6771 	 * fail.
6772 	 */
6773 	if (!hnae3_dev_fd_supported(hdev))
6774 		return 0;
6775 
6776 	/* if fd is disabled, should not restore it when reset */
6777 	if (!hdev->fd_en)
6778 		return 0;
6779 
6780 	spin_lock_bh(&hdev->fd_rule_lock);
6781 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6782 		if (rule->state == HCLGE_FD_ACTIVE)
6783 			rule->state = HCLGE_FD_TO_ADD;
6784 	}
6785 	spin_unlock_bh(&hdev->fd_rule_lock);
6786 	set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6787 
6788 	return 0;
6789 }
6790 
6791 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6792 				 struct ethtool_rxnfc *cmd)
6793 {
6794 	struct hclge_vport *vport = hclge_get_vport(handle);
6795 	struct hclge_dev *hdev = vport->back;
6796 
6797 	if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6798 		return -EOPNOTSUPP;
6799 
6800 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
6801 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6802 
6803 	return 0;
6804 }
6805 
6806 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6807 				     struct ethtool_tcpip4_spec *spec,
6808 				     struct ethtool_tcpip4_spec *spec_mask)
6809 {
6810 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6811 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6812 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6813 
6814 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6815 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6816 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6817 
6818 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6819 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6820 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6821 
6822 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6823 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6824 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6825 
6826 	spec->tos = rule->tuples.ip_tos;
6827 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6828 			0 : rule->tuples_mask.ip_tos;
6829 }
6830 
6831 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6832 				  struct ethtool_usrip4_spec *spec,
6833 				  struct ethtool_usrip4_spec *spec_mask)
6834 {
6835 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6836 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6837 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6838 
6839 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6840 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6841 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6842 
6843 	spec->tos = rule->tuples.ip_tos;
6844 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6845 			0 : rule->tuples_mask.ip_tos;
6846 
6847 	spec->proto = rule->tuples.ip_proto;
6848 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6849 			0 : rule->tuples_mask.ip_proto;
6850 
6851 	spec->ip_ver = ETH_RX_NFC_IP4;
6852 }
6853 
6854 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6855 				     struct ethtool_tcpip6_spec *spec,
6856 				     struct ethtool_tcpip6_spec *spec_mask)
6857 {
6858 	cpu_to_be32_array(spec->ip6src,
6859 			  rule->tuples.src_ip, IPV6_SIZE);
6860 	cpu_to_be32_array(spec->ip6dst,
6861 			  rule->tuples.dst_ip, IPV6_SIZE);
6862 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6863 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6864 	else
6865 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6866 				  IPV6_SIZE);
6867 
6868 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6869 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6870 	else
6871 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6872 				  IPV6_SIZE);
6873 
6874 	spec->tclass = rule->tuples.ip_tos;
6875 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6876 			0 : rule->tuples_mask.ip_tos;
6877 
6878 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6879 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6880 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6881 
6882 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6883 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6884 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6885 }
6886 
6887 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6888 				  struct ethtool_usrip6_spec *spec,
6889 				  struct ethtool_usrip6_spec *spec_mask)
6890 {
6891 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6892 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6893 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6894 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6895 	else
6896 		cpu_to_be32_array(spec_mask->ip6src,
6897 				  rule->tuples_mask.src_ip, IPV6_SIZE);
6898 
6899 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6900 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6901 	else
6902 		cpu_to_be32_array(spec_mask->ip6dst,
6903 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
6904 
6905 	spec->tclass = rule->tuples.ip_tos;
6906 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6907 			0 : rule->tuples_mask.ip_tos;
6908 
6909 	spec->l4_proto = rule->tuples.ip_proto;
6910 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6911 			0 : rule->tuples_mask.ip_proto;
6912 }
6913 
6914 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6915 				    struct ethhdr *spec,
6916 				    struct ethhdr *spec_mask)
6917 {
6918 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6919 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6920 
6921 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6922 		eth_zero_addr(spec_mask->h_source);
6923 	else
6924 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6925 
6926 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
6927 		eth_zero_addr(spec_mask->h_dest);
6928 	else
6929 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6930 
6931 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6932 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6933 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6934 }
6935 
6936 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6937 				       struct hclge_fd_rule *rule)
6938 {
6939 	if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6940 	    HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6941 		fs->h_ext.data[0] = 0;
6942 		fs->h_ext.data[1] = 0;
6943 		fs->m_ext.data[0] = 0;
6944 		fs->m_ext.data[1] = 0;
6945 	} else {
6946 		fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6947 		fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6948 		fs->m_ext.data[0] =
6949 				cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6950 		fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6951 	}
6952 }
6953 
6954 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6955 				  struct hclge_fd_rule *rule)
6956 {
6957 	if (fs->flow_type & FLOW_EXT) {
6958 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6959 		fs->m_ext.vlan_tci =
6960 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6961 				0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6962 
6963 		hclge_fd_get_user_def_info(fs, rule);
6964 	}
6965 
6966 	if (fs->flow_type & FLOW_MAC_EXT) {
6967 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6968 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6969 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6970 		else
6971 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6972 					rule->tuples_mask.dst_mac);
6973 	}
6974 }
6975 
6976 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6977 				  struct ethtool_rxnfc *cmd)
6978 {
6979 	struct hclge_vport *vport = hclge_get_vport(handle);
6980 	struct hclge_fd_rule *rule = NULL;
6981 	struct hclge_dev *hdev = vport->back;
6982 	struct ethtool_rx_flow_spec *fs;
6983 	struct hlist_node *node2;
6984 
6985 	if (!hnae3_dev_fd_supported(hdev))
6986 		return -EOPNOTSUPP;
6987 
6988 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6989 
6990 	spin_lock_bh(&hdev->fd_rule_lock);
6991 
6992 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6993 		if (rule->location >= fs->location)
6994 			break;
6995 	}
6996 
6997 	if (!rule || fs->location != rule->location) {
6998 		spin_unlock_bh(&hdev->fd_rule_lock);
6999 
7000 		return -ENOENT;
7001 	}
7002 
7003 	fs->flow_type = rule->flow_type;
7004 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
7005 	case SCTP_V4_FLOW:
7006 	case TCP_V4_FLOW:
7007 	case UDP_V4_FLOW:
7008 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
7009 					 &fs->m_u.tcp_ip4_spec);
7010 		break;
7011 	case IP_USER_FLOW:
7012 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
7013 				      &fs->m_u.usr_ip4_spec);
7014 		break;
7015 	case SCTP_V6_FLOW:
7016 	case TCP_V6_FLOW:
7017 	case UDP_V6_FLOW:
7018 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
7019 					 &fs->m_u.tcp_ip6_spec);
7020 		break;
7021 	case IPV6_USER_FLOW:
7022 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
7023 				      &fs->m_u.usr_ip6_spec);
7024 		break;
7025 	/* The flow type of fd rule has been checked before adding in to rule
7026 	 * list. As other flow types have been handled, it must be ETHER_FLOW
7027 	 * for the default case
7028 	 */
7029 	default:
7030 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
7031 					&fs->m_u.ether_spec);
7032 		break;
7033 	}
7034 
7035 	hclge_fd_get_ext_info(fs, rule);
7036 
7037 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
7038 		fs->ring_cookie = RX_CLS_FLOW_DISC;
7039 	} else {
7040 		u64 vf_id;
7041 
7042 		fs->ring_cookie = rule->queue_id;
7043 		vf_id = rule->vf_id;
7044 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
7045 		fs->ring_cookie |= vf_id;
7046 	}
7047 
7048 	spin_unlock_bh(&hdev->fd_rule_lock);
7049 
7050 	return 0;
7051 }
7052 
7053 static int hclge_get_all_rules(struct hnae3_handle *handle,
7054 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
7055 {
7056 	struct hclge_vport *vport = hclge_get_vport(handle);
7057 	struct hclge_dev *hdev = vport->back;
7058 	struct hclge_fd_rule *rule;
7059 	struct hlist_node *node2;
7060 	int cnt = 0;
7061 
7062 	if (!hnae3_dev_fd_supported(hdev))
7063 		return -EOPNOTSUPP;
7064 
7065 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7066 
7067 	spin_lock_bh(&hdev->fd_rule_lock);
7068 	hlist_for_each_entry_safe(rule, node2,
7069 				  &hdev->fd_rule_list, rule_node) {
7070 		if (cnt == cmd->rule_cnt) {
7071 			spin_unlock_bh(&hdev->fd_rule_lock);
7072 			return -EMSGSIZE;
7073 		}
7074 
7075 		if (rule->state == HCLGE_FD_TO_DEL)
7076 			continue;
7077 
7078 		rule_locs[cnt] = rule->location;
7079 		cnt++;
7080 	}
7081 
7082 	spin_unlock_bh(&hdev->fd_rule_lock);
7083 
7084 	cmd->rule_cnt = cnt;
7085 
7086 	return 0;
7087 }
7088 
7089 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7090 				     struct hclge_fd_rule_tuples *tuples)
7091 {
7092 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7093 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7094 
7095 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7096 	tuples->ip_proto = fkeys->basic.ip_proto;
7097 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7098 
7099 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7100 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7101 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7102 	} else {
7103 		int i;
7104 
7105 		for (i = 0; i < IPV6_SIZE; i++) {
7106 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7107 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7108 		}
7109 	}
7110 }
7111 
7112 /* traverse all rules, check whether an existed rule has the same tuples */
7113 static struct hclge_fd_rule *
7114 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7115 			  const struct hclge_fd_rule_tuples *tuples)
7116 {
7117 	struct hclge_fd_rule *rule = NULL;
7118 	struct hlist_node *node;
7119 
7120 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7121 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7122 			return rule;
7123 	}
7124 
7125 	return NULL;
7126 }
7127 
7128 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7129 				     struct hclge_fd_rule *rule)
7130 {
7131 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7132 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7133 			     BIT(INNER_SRC_PORT);
7134 	rule->action = 0;
7135 	rule->vf_id = 0;
7136 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7137 	rule->state = HCLGE_FD_TO_ADD;
7138 	if (tuples->ether_proto == ETH_P_IP) {
7139 		if (tuples->ip_proto == IPPROTO_TCP)
7140 			rule->flow_type = TCP_V4_FLOW;
7141 		else
7142 			rule->flow_type = UDP_V4_FLOW;
7143 	} else {
7144 		if (tuples->ip_proto == IPPROTO_TCP)
7145 			rule->flow_type = TCP_V6_FLOW;
7146 		else
7147 			rule->flow_type = UDP_V6_FLOW;
7148 	}
7149 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7150 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7151 }
7152 
7153 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7154 				      u16 flow_id, struct flow_keys *fkeys)
7155 {
7156 	struct hclge_vport *vport = hclge_get_vport(handle);
7157 	struct hclge_fd_rule_tuples new_tuples = {};
7158 	struct hclge_dev *hdev = vport->back;
7159 	struct hclge_fd_rule *rule;
7160 	u16 bit_id;
7161 
7162 	if (!hnae3_dev_fd_supported(hdev))
7163 		return -EOPNOTSUPP;
7164 
7165 	/* when there is already fd rule existed add by user,
7166 	 * arfs should not work
7167 	 */
7168 	spin_lock_bh(&hdev->fd_rule_lock);
7169 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7170 	    hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7171 		spin_unlock_bh(&hdev->fd_rule_lock);
7172 		return -EOPNOTSUPP;
7173 	}
7174 
7175 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7176 
7177 	/* check is there flow director filter existed for this flow,
7178 	 * if not, create a new filter for it;
7179 	 * if filter exist with different queue id, modify the filter;
7180 	 * if filter exist with same queue id, do nothing
7181 	 */
7182 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7183 	if (!rule) {
7184 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7185 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7186 			spin_unlock_bh(&hdev->fd_rule_lock);
7187 			return -ENOSPC;
7188 		}
7189 
7190 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7191 		if (!rule) {
7192 			spin_unlock_bh(&hdev->fd_rule_lock);
7193 			return -ENOMEM;
7194 		}
7195 
7196 		rule->location = bit_id;
7197 		rule->arfs.flow_id = flow_id;
7198 		rule->queue_id = queue_id;
7199 		hclge_fd_build_arfs_rule(&new_tuples, rule);
7200 		hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7201 		hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7202 	} else if (rule->queue_id != queue_id) {
7203 		rule->queue_id = queue_id;
7204 		rule->state = HCLGE_FD_TO_ADD;
7205 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7206 		hclge_task_schedule(hdev, 0);
7207 	}
7208 	spin_unlock_bh(&hdev->fd_rule_lock);
7209 	return rule->location;
7210 }
7211 
7212 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7213 {
7214 #ifdef CONFIG_RFS_ACCEL
7215 	struct hnae3_handle *handle = &hdev->vport[0].nic;
7216 	struct hclge_fd_rule *rule;
7217 	struct hlist_node *node;
7218 
7219 	spin_lock_bh(&hdev->fd_rule_lock);
7220 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7221 		spin_unlock_bh(&hdev->fd_rule_lock);
7222 		return;
7223 	}
7224 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7225 		if (rule->state != HCLGE_FD_ACTIVE)
7226 			continue;
7227 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7228 					rule->arfs.flow_id, rule->location)) {
7229 			rule->state = HCLGE_FD_TO_DEL;
7230 			set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7231 		}
7232 	}
7233 	spin_unlock_bh(&hdev->fd_rule_lock);
7234 #endif
7235 }
7236 
7237 /* make sure being called after lock up with fd_rule_lock */
7238 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7239 {
7240 #ifdef CONFIG_RFS_ACCEL
7241 	struct hclge_fd_rule *rule;
7242 	struct hlist_node *node;
7243 	int ret;
7244 
7245 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7246 		return 0;
7247 
7248 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7249 		switch (rule->state) {
7250 		case HCLGE_FD_TO_DEL:
7251 		case HCLGE_FD_ACTIVE:
7252 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7253 						   rule->location, NULL, false);
7254 			if (ret)
7255 				return ret;
7256 			fallthrough;
7257 		case HCLGE_FD_TO_ADD:
7258 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7259 			hlist_del(&rule->rule_node);
7260 			kfree(rule);
7261 			break;
7262 		default:
7263 			break;
7264 		}
7265 	}
7266 	hclge_sync_fd_state(hdev);
7267 
7268 #endif
7269 	return 0;
7270 }
7271 
7272 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7273 				    struct hclge_fd_rule *rule)
7274 {
7275 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7276 		struct flow_match_basic match;
7277 		u16 ethtype_key, ethtype_mask;
7278 
7279 		flow_rule_match_basic(flow, &match);
7280 		ethtype_key = ntohs(match.key->n_proto);
7281 		ethtype_mask = ntohs(match.mask->n_proto);
7282 
7283 		if (ethtype_key == ETH_P_ALL) {
7284 			ethtype_key = 0;
7285 			ethtype_mask = 0;
7286 		}
7287 		rule->tuples.ether_proto = ethtype_key;
7288 		rule->tuples_mask.ether_proto = ethtype_mask;
7289 		rule->tuples.ip_proto = match.key->ip_proto;
7290 		rule->tuples_mask.ip_proto = match.mask->ip_proto;
7291 	} else {
7292 		rule->unused_tuple |= BIT(INNER_IP_PROTO);
7293 		rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7294 	}
7295 }
7296 
7297 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7298 				  struct hclge_fd_rule *rule)
7299 {
7300 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7301 		struct flow_match_eth_addrs match;
7302 
7303 		flow_rule_match_eth_addrs(flow, &match);
7304 		ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7305 		ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7306 		ether_addr_copy(rule->tuples.src_mac, match.key->src);
7307 		ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7308 	} else {
7309 		rule->unused_tuple |= BIT(INNER_DST_MAC);
7310 		rule->unused_tuple |= BIT(INNER_SRC_MAC);
7311 	}
7312 }
7313 
7314 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7315 				   struct hclge_fd_rule *rule)
7316 {
7317 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7318 		struct flow_match_vlan match;
7319 
7320 		flow_rule_match_vlan(flow, &match);
7321 		rule->tuples.vlan_tag1 = match.key->vlan_id |
7322 				(match.key->vlan_priority << VLAN_PRIO_SHIFT);
7323 		rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7324 				(match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7325 	} else {
7326 		rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7327 	}
7328 }
7329 
7330 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7331 				 struct hclge_fd_rule *rule)
7332 {
7333 	u16 addr_type = 0;
7334 
7335 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7336 		struct flow_match_control match;
7337 
7338 		flow_rule_match_control(flow, &match);
7339 		addr_type = match.key->addr_type;
7340 	}
7341 
7342 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7343 		struct flow_match_ipv4_addrs match;
7344 
7345 		flow_rule_match_ipv4_addrs(flow, &match);
7346 		rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7347 		rule->tuples_mask.src_ip[IPV4_INDEX] =
7348 						be32_to_cpu(match.mask->src);
7349 		rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7350 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
7351 						be32_to_cpu(match.mask->dst);
7352 	} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7353 		struct flow_match_ipv6_addrs match;
7354 
7355 		flow_rule_match_ipv6_addrs(flow, &match);
7356 		be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7357 				  IPV6_SIZE);
7358 		be32_to_cpu_array(rule->tuples_mask.src_ip,
7359 				  match.mask->src.s6_addr32, IPV6_SIZE);
7360 		be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7361 				  IPV6_SIZE);
7362 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
7363 				  match.mask->dst.s6_addr32, IPV6_SIZE);
7364 	} else {
7365 		rule->unused_tuple |= BIT(INNER_SRC_IP);
7366 		rule->unused_tuple |= BIT(INNER_DST_IP);
7367 	}
7368 }
7369 
7370 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7371 				   struct hclge_fd_rule *rule)
7372 {
7373 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7374 		struct flow_match_ports match;
7375 
7376 		flow_rule_match_ports(flow, &match);
7377 
7378 		rule->tuples.src_port = be16_to_cpu(match.key->src);
7379 		rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7380 		rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7381 		rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7382 	} else {
7383 		rule->unused_tuple |= BIT(INNER_SRC_PORT);
7384 		rule->unused_tuple |= BIT(INNER_DST_PORT);
7385 	}
7386 }
7387 
7388 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7389 				  struct flow_cls_offload *cls_flower,
7390 				  struct hclge_fd_rule *rule)
7391 {
7392 	struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7393 	struct flow_dissector *dissector = flow->match.dissector;
7394 
7395 	if (dissector->used_keys &
7396 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7397 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
7398 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7399 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
7400 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7401 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7402 	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7403 		dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7404 			dissector->used_keys);
7405 		return -EOPNOTSUPP;
7406 	}
7407 
7408 	hclge_get_cls_key_basic(flow, rule);
7409 	hclge_get_cls_key_mac(flow, rule);
7410 	hclge_get_cls_key_vlan(flow, rule);
7411 	hclge_get_cls_key_ip(flow, rule);
7412 	hclge_get_cls_key_port(flow, rule);
7413 
7414 	return 0;
7415 }
7416 
7417 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7418 				  struct flow_cls_offload *cls_flower, int tc)
7419 {
7420 	u32 prio = cls_flower->common.prio;
7421 
7422 	if (tc < 0 || tc > hdev->tc_max) {
7423 		dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7424 		return -EINVAL;
7425 	}
7426 
7427 	if (prio == 0 ||
7428 	    prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7429 		dev_err(&hdev->pdev->dev,
7430 			"prio %u should be in range[1, %u]\n",
7431 			prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7432 		return -EINVAL;
7433 	}
7434 
7435 	if (test_bit(prio - 1, hdev->fd_bmap)) {
7436 		dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7437 		return -EINVAL;
7438 	}
7439 	return 0;
7440 }
7441 
7442 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7443 				struct flow_cls_offload *cls_flower,
7444 				int tc)
7445 {
7446 	struct hclge_vport *vport = hclge_get_vport(handle);
7447 	struct hclge_dev *hdev = vport->back;
7448 	struct hclge_fd_rule *rule;
7449 	int ret;
7450 
7451 	ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7452 	if (ret) {
7453 		dev_err(&hdev->pdev->dev,
7454 			"failed to check cls flower params, ret = %d\n", ret);
7455 		return ret;
7456 	}
7457 
7458 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7459 	if (!rule)
7460 		return -ENOMEM;
7461 
7462 	ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7463 	if (ret) {
7464 		kfree(rule);
7465 		return ret;
7466 	}
7467 
7468 	rule->action = HCLGE_FD_ACTION_SELECT_TC;
7469 	rule->cls_flower.tc = tc;
7470 	rule->location = cls_flower->common.prio - 1;
7471 	rule->vf_id = 0;
7472 	rule->cls_flower.cookie = cls_flower->cookie;
7473 	rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7474 
7475 	ret = hclge_add_fd_entry_common(hdev, rule);
7476 	if (ret)
7477 		kfree(rule);
7478 
7479 	return ret;
7480 }
7481 
7482 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7483 						   unsigned long cookie)
7484 {
7485 	struct hclge_fd_rule *rule;
7486 	struct hlist_node *node;
7487 
7488 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7489 		if (rule->cls_flower.cookie == cookie)
7490 			return rule;
7491 	}
7492 
7493 	return NULL;
7494 }
7495 
7496 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7497 				struct flow_cls_offload *cls_flower)
7498 {
7499 	struct hclge_vport *vport = hclge_get_vport(handle);
7500 	struct hclge_dev *hdev = vport->back;
7501 	struct hclge_fd_rule *rule;
7502 	int ret;
7503 
7504 	spin_lock_bh(&hdev->fd_rule_lock);
7505 
7506 	rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7507 	if (!rule) {
7508 		spin_unlock_bh(&hdev->fd_rule_lock);
7509 		return -EINVAL;
7510 	}
7511 
7512 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7513 				   NULL, false);
7514 	if (ret) {
7515 		spin_unlock_bh(&hdev->fd_rule_lock);
7516 		return ret;
7517 	}
7518 
7519 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7520 	spin_unlock_bh(&hdev->fd_rule_lock);
7521 
7522 	return 0;
7523 }
7524 
7525 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7526 {
7527 	struct hclge_fd_rule *rule;
7528 	struct hlist_node *node;
7529 	int ret = 0;
7530 
7531 	if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7532 		return;
7533 
7534 	spin_lock_bh(&hdev->fd_rule_lock);
7535 
7536 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7537 		switch (rule->state) {
7538 		case HCLGE_FD_TO_ADD:
7539 			ret = hclge_fd_config_rule(hdev, rule);
7540 			if (ret)
7541 				goto out;
7542 			rule->state = HCLGE_FD_ACTIVE;
7543 			break;
7544 		case HCLGE_FD_TO_DEL:
7545 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7546 						   rule->location, NULL, false);
7547 			if (ret)
7548 				goto out;
7549 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7550 			hclge_fd_free_node(hdev, rule);
7551 			break;
7552 		default:
7553 			break;
7554 		}
7555 	}
7556 
7557 out:
7558 	if (ret)
7559 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7560 
7561 	spin_unlock_bh(&hdev->fd_rule_lock);
7562 }
7563 
7564 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7565 {
7566 	if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7567 		bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7568 
7569 		hclge_clear_fd_rules_in_list(hdev, clear_list);
7570 	}
7571 
7572 	hclge_sync_fd_user_def_cfg(hdev, false);
7573 
7574 	hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7575 }
7576 
7577 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7578 {
7579 	struct hclge_vport *vport = hclge_get_vport(handle);
7580 	struct hclge_dev *hdev = vport->back;
7581 
7582 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7583 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7584 }
7585 
7586 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7587 {
7588 	struct hclge_vport *vport = hclge_get_vport(handle);
7589 	struct hclge_dev *hdev = vport->back;
7590 
7591 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7592 }
7593 
7594 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7595 {
7596 	struct hclge_vport *vport = hclge_get_vport(handle);
7597 	struct hclge_dev *hdev = vport->back;
7598 
7599 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7600 }
7601 
7602 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7603 {
7604 	struct hclge_vport *vport = hclge_get_vport(handle);
7605 	struct hclge_dev *hdev = vport->back;
7606 
7607 	return hdev->rst_stats.hw_reset_done_cnt;
7608 }
7609 
7610 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7611 {
7612 	struct hclge_vport *vport = hclge_get_vport(handle);
7613 	struct hclge_dev *hdev = vport->back;
7614 
7615 	hdev->fd_en = enable;
7616 
7617 	if (!enable)
7618 		set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7619 	else
7620 		hclge_restore_fd_entries(handle);
7621 
7622 	hclge_task_schedule(hdev, 0);
7623 }
7624 
7625 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7626 {
7627 	struct hclge_desc desc;
7628 	struct hclge_config_mac_mode_cmd *req =
7629 		(struct hclge_config_mac_mode_cmd *)desc.data;
7630 	u32 loop_en = 0;
7631 	int ret;
7632 
7633 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7634 
7635 	if (enable) {
7636 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7637 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7638 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7639 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7640 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7641 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7642 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7643 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7644 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7645 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7646 	}
7647 
7648 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7649 
7650 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7651 	if (ret)
7652 		dev_err(&hdev->pdev->dev,
7653 			"mac enable fail, ret =%d.\n", ret);
7654 }
7655 
7656 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7657 				     u8 switch_param, u8 param_mask)
7658 {
7659 	struct hclge_mac_vlan_switch_cmd *req;
7660 	struct hclge_desc desc;
7661 	u32 func_id;
7662 	int ret;
7663 
7664 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7665 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7666 
7667 	/* read current config parameter */
7668 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7669 				   true);
7670 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7671 	req->func_id = cpu_to_le32(func_id);
7672 
7673 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7674 	if (ret) {
7675 		dev_err(&hdev->pdev->dev,
7676 			"read mac vlan switch parameter fail, ret = %d\n", ret);
7677 		return ret;
7678 	}
7679 
7680 	/* modify and write new config parameter */
7681 	hclge_cmd_reuse_desc(&desc, false);
7682 	req->switch_param = (req->switch_param & param_mask) | switch_param;
7683 	req->param_mask = param_mask;
7684 
7685 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7686 	if (ret)
7687 		dev_err(&hdev->pdev->dev,
7688 			"set mac vlan switch parameter fail, ret = %d\n", ret);
7689 	return ret;
7690 }
7691 
7692 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7693 				       int link_ret)
7694 {
7695 #define HCLGE_PHY_LINK_STATUS_NUM  200
7696 
7697 	struct phy_device *phydev = hdev->hw.mac.phydev;
7698 	int i = 0;
7699 	int ret;
7700 
7701 	do {
7702 		ret = phy_read_status(phydev);
7703 		if (ret) {
7704 			dev_err(&hdev->pdev->dev,
7705 				"phy update link status fail, ret = %d\n", ret);
7706 			return;
7707 		}
7708 
7709 		if (phydev->link == link_ret)
7710 			break;
7711 
7712 		msleep(HCLGE_LINK_STATUS_MS);
7713 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7714 }
7715 
7716 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7717 {
7718 #define HCLGE_MAC_LINK_STATUS_NUM  100
7719 
7720 	int link_status;
7721 	int i = 0;
7722 	int ret;
7723 
7724 	do {
7725 		ret = hclge_get_mac_link_status(hdev, &link_status);
7726 		if (ret)
7727 			return ret;
7728 		if (link_status == link_ret)
7729 			return 0;
7730 
7731 		msleep(HCLGE_LINK_STATUS_MS);
7732 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7733 	return -EBUSY;
7734 }
7735 
7736 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7737 					  bool is_phy)
7738 {
7739 	int link_ret;
7740 
7741 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7742 
7743 	if (is_phy)
7744 		hclge_phy_link_status_wait(hdev, link_ret);
7745 
7746 	return hclge_mac_link_status_wait(hdev, link_ret);
7747 }
7748 
7749 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7750 {
7751 	struct hclge_config_mac_mode_cmd *req;
7752 	struct hclge_desc desc;
7753 	u32 loop_en;
7754 	int ret;
7755 
7756 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7757 	/* 1 Read out the MAC mode config at first */
7758 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7759 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7760 	if (ret) {
7761 		dev_err(&hdev->pdev->dev,
7762 			"mac loopback get fail, ret =%d.\n", ret);
7763 		return ret;
7764 	}
7765 
7766 	/* 2 Then setup the loopback flag */
7767 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7768 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7769 
7770 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7771 
7772 	/* 3 Config mac work mode with loopback flag
7773 	 * and its original configure parameters
7774 	 */
7775 	hclge_cmd_reuse_desc(&desc, false);
7776 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7777 	if (ret)
7778 		dev_err(&hdev->pdev->dev,
7779 			"mac loopback set fail, ret =%d.\n", ret);
7780 	return ret;
7781 }
7782 
7783 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7784 				     enum hnae3_loop loop_mode)
7785 {
7786 #define HCLGE_COMMON_LB_RETRY_MS	10
7787 #define HCLGE_COMMON_LB_RETRY_NUM	100
7788 
7789 	struct hclge_common_lb_cmd *req;
7790 	struct hclge_desc desc;
7791 	int ret, i = 0;
7792 	u8 loop_mode_b;
7793 
7794 	req = (struct hclge_common_lb_cmd *)desc.data;
7795 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7796 
7797 	switch (loop_mode) {
7798 	case HNAE3_LOOP_SERIAL_SERDES:
7799 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7800 		break;
7801 	case HNAE3_LOOP_PARALLEL_SERDES:
7802 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7803 		break;
7804 	case HNAE3_LOOP_PHY:
7805 		loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7806 		break;
7807 	default:
7808 		dev_err(&hdev->pdev->dev,
7809 			"unsupported common loopback mode %d\n", loop_mode);
7810 		return -ENOTSUPP;
7811 	}
7812 
7813 	if (en) {
7814 		req->enable = loop_mode_b;
7815 		req->mask = loop_mode_b;
7816 	} else {
7817 		req->mask = loop_mode_b;
7818 	}
7819 
7820 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7821 	if (ret) {
7822 		dev_err(&hdev->pdev->dev,
7823 			"common loopback set fail, ret = %d\n", ret);
7824 		return ret;
7825 	}
7826 
7827 	do {
7828 		msleep(HCLGE_COMMON_LB_RETRY_MS);
7829 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7830 					   true);
7831 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7832 		if (ret) {
7833 			dev_err(&hdev->pdev->dev,
7834 				"common loopback get, ret = %d\n", ret);
7835 			return ret;
7836 		}
7837 	} while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7838 		 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7839 
7840 	if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7841 		dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7842 		return -EBUSY;
7843 	} else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7844 		dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7845 		return -EIO;
7846 	}
7847 	return ret;
7848 }
7849 
7850 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7851 				     enum hnae3_loop loop_mode)
7852 {
7853 	int ret;
7854 
7855 	ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7856 	if (ret)
7857 		return ret;
7858 
7859 	hclge_cfg_mac_mode(hdev, en);
7860 
7861 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7862 	if (ret)
7863 		dev_err(&hdev->pdev->dev,
7864 			"serdes loopback config mac mode timeout\n");
7865 
7866 	return ret;
7867 }
7868 
7869 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7870 				     struct phy_device *phydev)
7871 {
7872 	int ret;
7873 
7874 	if (!phydev->suspended) {
7875 		ret = phy_suspend(phydev);
7876 		if (ret)
7877 			return ret;
7878 	}
7879 
7880 	ret = phy_resume(phydev);
7881 	if (ret)
7882 		return ret;
7883 
7884 	return phy_loopback(phydev, true);
7885 }
7886 
7887 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7888 				      struct phy_device *phydev)
7889 {
7890 	int ret;
7891 
7892 	ret = phy_loopback(phydev, false);
7893 	if (ret)
7894 		return ret;
7895 
7896 	return phy_suspend(phydev);
7897 }
7898 
7899 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7900 {
7901 	struct phy_device *phydev = hdev->hw.mac.phydev;
7902 	int ret;
7903 
7904 	if (!phydev) {
7905 		if (hnae3_dev_phy_imp_supported(hdev))
7906 			return hclge_set_common_loopback(hdev, en,
7907 							 HNAE3_LOOP_PHY);
7908 		return -ENOTSUPP;
7909 	}
7910 
7911 	if (en)
7912 		ret = hclge_enable_phy_loopback(hdev, phydev);
7913 	else
7914 		ret = hclge_disable_phy_loopback(hdev, phydev);
7915 	if (ret) {
7916 		dev_err(&hdev->pdev->dev,
7917 			"set phy loopback fail, ret = %d\n", ret);
7918 		return ret;
7919 	}
7920 
7921 	hclge_cfg_mac_mode(hdev, en);
7922 
7923 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7924 	if (ret)
7925 		dev_err(&hdev->pdev->dev,
7926 			"phy loopback config mac mode timeout\n");
7927 
7928 	return ret;
7929 }
7930 
7931 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7932 				     u16 stream_id, bool enable)
7933 {
7934 	struct hclge_desc desc;
7935 	struct hclge_cfg_com_tqp_queue_cmd *req =
7936 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7937 
7938 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7939 	req->tqp_id = cpu_to_le16(tqp_id);
7940 	req->stream_id = cpu_to_le16(stream_id);
7941 	if (enable)
7942 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7943 
7944 	return hclge_cmd_send(&hdev->hw, &desc, 1);
7945 }
7946 
7947 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7948 {
7949 	struct hclge_vport *vport = hclge_get_vport(handle);
7950 	struct hclge_dev *hdev = vport->back;
7951 	int ret;
7952 	u16 i;
7953 
7954 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
7955 		ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7956 		if (ret)
7957 			return ret;
7958 	}
7959 	return 0;
7960 }
7961 
7962 static int hclge_set_loopback(struct hnae3_handle *handle,
7963 			      enum hnae3_loop loop_mode, bool en)
7964 {
7965 	struct hclge_vport *vport = hclge_get_vport(handle);
7966 	struct hclge_dev *hdev = vport->back;
7967 	int ret;
7968 
7969 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7970 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7971 	 * the same, the packets are looped back in the SSU. If SSU loopback
7972 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7973 	 */
7974 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7975 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7976 
7977 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7978 						HCLGE_SWITCH_ALW_LPBK_MASK);
7979 		if (ret)
7980 			return ret;
7981 	}
7982 
7983 	switch (loop_mode) {
7984 	case HNAE3_LOOP_APP:
7985 		ret = hclge_set_app_loopback(hdev, en);
7986 		break;
7987 	case HNAE3_LOOP_SERIAL_SERDES:
7988 	case HNAE3_LOOP_PARALLEL_SERDES:
7989 		ret = hclge_set_common_loopback(hdev, en, loop_mode);
7990 		break;
7991 	case HNAE3_LOOP_PHY:
7992 		ret = hclge_set_phy_loopback(hdev, en);
7993 		break;
7994 	default:
7995 		ret = -ENOTSUPP;
7996 		dev_err(&hdev->pdev->dev,
7997 			"loop_mode %d is not supported\n", loop_mode);
7998 		break;
7999 	}
8000 
8001 	if (ret)
8002 		return ret;
8003 
8004 	ret = hclge_tqp_enable(handle, en);
8005 	if (ret)
8006 		dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
8007 			en ? "enable" : "disable", ret);
8008 
8009 	return ret;
8010 }
8011 
8012 static int hclge_set_default_loopback(struct hclge_dev *hdev)
8013 {
8014 	int ret;
8015 
8016 	ret = hclge_set_app_loopback(hdev, false);
8017 	if (ret)
8018 		return ret;
8019 
8020 	ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8021 	if (ret)
8022 		return ret;
8023 
8024 	return hclge_cfg_common_loopback(hdev, false,
8025 					 HNAE3_LOOP_PARALLEL_SERDES);
8026 }
8027 
8028 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
8029 {
8030 	struct hclge_vport *vport = hclge_get_vport(handle);
8031 	struct hnae3_knic_private_info *kinfo;
8032 	struct hnae3_queue *queue;
8033 	struct hclge_tqp *tqp;
8034 	int i;
8035 
8036 	kinfo = &vport->nic.kinfo;
8037 	for (i = 0; i < kinfo->num_tqps; i++) {
8038 		queue = handle->kinfo.tqp[i];
8039 		tqp = container_of(queue, struct hclge_tqp, q);
8040 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
8041 	}
8042 }
8043 
8044 static void hclge_flush_link_update(struct hclge_dev *hdev)
8045 {
8046 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
8047 
8048 	unsigned long last = hdev->serv_processed_cnt;
8049 	int i = 0;
8050 
8051 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8052 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8053 	       last == hdev->serv_processed_cnt)
8054 		usleep_range(1, 1);
8055 }
8056 
8057 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8058 {
8059 	struct hclge_vport *vport = hclge_get_vport(handle);
8060 	struct hclge_dev *hdev = vport->back;
8061 
8062 	if (enable) {
8063 		hclge_task_schedule(hdev, 0);
8064 	} else {
8065 		/* Set the DOWN flag here to disable link updating */
8066 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
8067 
8068 		/* flush memory to make sure DOWN is seen by service task */
8069 		smp_mb__before_atomic();
8070 		hclge_flush_link_update(hdev);
8071 	}
8072 }
8073 
8074 static int hclge_ae_start(struct hnae3_handle *handle)
8075 {
8076 	struct hclge_vport *vport = hclge_get_vport(handle);
8077 	struct hclge_dev *hdev = vport->back;
8078 
8079 	/* mac enable */
8080 	hclge_cfg_mac_mode(hdev, true);
8081 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8082 	hdev->hw.mac.link = 0;
8083 
8084 	/* reset tqp stats */
8085 	hclge_reset_tqp_stats(handle);
8086 
8087 	hclge_mac_start_phy(hdev);
8088 
8089 	return 0;
8090 }
8091 
8092 static void hclge_ae_stop(struct hnae3_handle *handle)
8093 {
8094 	struct hclge_vport *vport = hclge_get_vport(handle);
8095 	struct hclge_dev *hdev = vport->back;
8096 
8097 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8098 	spin_lock_bh(&hdev->fd_rule_lock);
8099 	hclge_clear_arfs_rules(hdev);
8100 	spin_unlock_bh(&hdev->fd_rule_lock);
8101 
8102 	/* If it is not PF reset, the firmware will disable the MAC,
8103 	 * so it only need to stop phy here.
8104 	 */
8105 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8106 	    hdev->reset_type != HNAE3_FUNC_RESET) {
8107 		hclge_mac_stop_phy(hdev);
8108 		hclge_update_link_status(hdev);
8109 		return;
8110 	}
8111 
8112 	hclge_reset_tqp(handle);
8113 
8114 	hclge_config_mac_tnl_int(hdev, false);
8115 
8116 	/* Mac disable */
8117 	hclge_cfg_mac_mode(hdev, false);
8118 
8119 	hclge_mac_stop_phy(hdev);
8120 
8121 	/* reset tqp stats */
8122 	hclge_reset_tqp_stats(handle);
8123 	hclge_update_link_status(hdev);
8124 }
8125 
8126 int hclge_vport_start(struct hclge_vport *vport)
8127 {
8128 	struct hclge_dev *hdev = vport->back;
8129 
8130 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8131 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8132 	vport->last_active_jiffies = jiffies;
8133 
8134 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8135 		if (vport->vport_id) {
8136 			hclge_restore_mac_table_common(vport);
8137 			hclge_restore_vport_vlan_table(vport);
8138 		} else {
8139 			hclge_restore_hw_table(hdev);
8140 		}
8141 	}
8142 
8143 	clear_bit(vport->vport_id, hdev->vport_config_block);
8144 
8145 	return 0;
8146 }
8147 
8148 void hclge_vport_stop(struct hclge_vport *vport)
8149 {
8150 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8151 }
8152 
8153 static int hclge_client_start(struct hnae3_handle *handle)
8154 {
8155 	struct hclge_vport *vport = hclge_get_vport(handle);
8156 
8157 	return hclge_vport_start(vport);
8158 }
8159 
8160 static void hclge_client_stop(struct hnae3_handle *handle)
8161 {
8162 	struct hclge_vport *vport = hclge_get_vport(handle);
8163 
8164 	hclge_vport_stop(vport);
8165 }
8166 
8167 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8168 					 u16 cmdq_resp, u8  resp_code,
8169 					 enum hclge_mac_vlan_tbl_opcode op)
8170 {
8171 	struct hclge_dev *hdev = vport->back;
8172 
8173 	if (cmdq_resp) {
8174 		dev_err(&hdev->pdev->dev,
8175 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8176 			cmdq_resp);
8177 		return -EIO;
8178 	}
8179 
8180 	if (op == HCLGE_MAC_VLAN_ADD) {
8181 		if (!resp_code || resp_code == 1)
8182 			return 0;
8183 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8184 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
8185 			return -ENOSPC;
8186 
8187 		dev_err(&hdev->pdev->dev,
8188 			"add mac addr failed for undefined, code=%u.\n",
8189 			resp_code);
8190 		return -EIO;
8191 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
8192 		if (!resp_code) {
8193 			return 0;
8194 		} else if (resp_code == 1) {
8195 			dev_dbg(&hdev->pdev->dev,
8196 				"remove mac addr failed for miss.\n");
8197 			return -ENOENT;
8198 		}
8199 
8200 		dev_err(&hdev->pdev->dev,
8201 			"remove mac addr failed for undefined, code=%u.\n",
8202 			resp_code);
8203 		return -EIO;
8204 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
8205 		if (!resp_code) {
8206 			return 0;
8207 		} else if (resp_code == 1) {
8208 			dev_dbg(&hdev->pdev->dev,
8209 				"lookup mac addr failed for miss.\n");
8210 			return -ENOENT;
8211 		}
8212 
8213 		dev_err(&hdev->pdev->dev,
8214 			"lookup mac addr failed for undefined, code=%u.\n",
8215 			resp_code);
8216 		return -EIO;
8217 	}
8218 
8219 	dev_err(&hdev->pdev->dev,
8220 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8221 
8222 	return -EINVAL;
8223 }
8224 
8225 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8226 {
8227 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8228 
8229 	unsigned int word_num;
8230 	unsigned int bit_num;
8231 
8232 	if (vfid > 255 || vfid < 0)
8233 		return -EIO;
8234 
8235 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8236 		word_num = vfid / 32;
8237 		bit_num  = vfid % 32;
8238 		if (clr)
8239 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8240 		else
8241 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8242 	} else {
8243 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8244 		bit_num  = vfid % 32;
8245 		if (clr)
8246 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8247 		else
8248 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8249 	}
8250 
8251 	return 0;
8252 }
8253 
8254 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8255 {
8256 #define HCLGE_DESC_NUMBER 3
8257 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8258 	int i, j;
8259 
8260 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8261 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8262 			if (desc[i].data[j])
8263 				return false;
8264 
8265 	return true;
8266 }
8267 
8268 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8269 				   const u8 *addr, bool is_mc)
8270 {
8271 	const unsigned char *mac_addr = addr;
8272 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8273 		       (mac_addr[0]) | (mac_addr[1] << 8);
8274 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8275 
8276 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8277 	if (is_mc) {
8278 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8279 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8280 	}
8281 
8282 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8283 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8284 }
8285 
8286 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8287 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
8288 {
8289 	struct hclge_dev *hdev = vport->back;
8290 	struct hclge_desc desc;
8291 	u8 resp_code;
8292 	u16 retval;
8293 	int ret;
8294 
8295 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8296 
8297 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8298 
8299 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8300 	if (ret) {
8301 		dev_err(&hdev->pdev->dev,
8302 			"del mac addr failed for cmd_send, ret =%d.\n",
8303 			ret);
8304 		return ret;
8305 	}
8306 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8307 	retval = le16_to_cpu(desc.retval);
8308 
8309 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8310 					     HCLGE_MAC_VLAN_REMOVE);
8311 }
8312 
8313 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8314 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
8315 				     struct hclge_desc *desc,
8316 				     bool is_mc)
8317 {
8318 	struct hclge_dev *hdev = vport->back;
8319 	u8 resp_code;
8320 	u16 retval;
8321 	int ret;
8322 
8323 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8324 	if (is_mc) {
8325 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8326 		memcpy(desc[0].data,
8327 		       req,
8328 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8329 		hclge_cmd_setup_basic_desc(&desc[1],
8330 					   HCLGE_OPC_MAC_VLAN_ADD,
8331 					   true);
8332 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8333 		hclge_cmd_setup_basic_desc(&desc[2],
8334 					   HCLGE_OPC_MAC_VLAN_ADD,
8335 					   true);
8336 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
8337 	} else {
8338 		memcpy(desc[0].data,
8339 		       req,
8340 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8341 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
8342 	}
8343 	if (ret) {
8344 		dev_err(&hdev->pdev->dev,
8345 			"lookup mac addr failed for cmd_send, ret =%d.\n",
8346 			ret);
8347 		return ret;
8348 	}
8349 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8350 	retval = le16_to_cpu(desc[0].retval);
8351 
8352 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8353 					     HCLGE_MAC_VLAN_LKUP);
8354 }
8355 
8356 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8357 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
8358 				  struct hclge_desc *mc_desc)
8359 {
8360 	struct hclge_dev *hdev = vport->back;
8361 	int cfg_status;
8362 	u8 resp_code;
8363 	u16 retval;
8364 	int ret;
8365 
8366 	if (!mc_desc) {
8367 		struct hclge_desc desc;
8368 
8369 		hclge_cmd_setup_basic_desc(&desc,
8370 					   HCLGE_OPC_MAC_VLAN_ADD,
8371 					   false);
8372 		memcpy(desc.data, req,
8373 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8374 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8375 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8376 		retval = le16_to_cpu(desc.retval);
8377 
8378 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8379 							   resp_code,
8380 							   HCLGE_MAC_VLAN_ADD);
8381 	} else {
8382 		hclge_cmd_reuse_desc(&mc_desc[0], false);
8383 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8384 		hclge_cmd_reuse_desc(&mc_desc[1], false);
8385 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8386 		hclge_cmd_reuse_desc(&mc_desc[2], false);
8387 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8388 		memcpy(mc_desc[0].data, req,
8389 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8390 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8391 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8392 		retval = le16_to_cpu(mc_desc[0].retval);
8393 
8394 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8395 							   resp_code,
8396 							   HCLGE_MAC_VLAN_ADD);
8397 	}
8398 
8399 	if (ret) {
8400 		dev_err(&hdev->pdev->dev,
8401 			"add mac addr failed for cmd_send, ret =%d.\n",
8402 			ret);
8403 		return ret;
8404 	}
8405 
8406 	return cfg_status;
8407 }
8408 
8409 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8410 			       u16 *allocated_size)
8411 {
8412 	struct hclge_umv_spc_alc_cmd *req;
8413 	struct hclge_desc desc;
8414 	int ret;
8415 
8416 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8417 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8418 
8419 	req->space_size = cpu_to_le32(space_size);
8420 
8421 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8422 	if (ret) {
8423 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8424 			ret);
8425 		return ret;
8426 	}
8427 
8428 	*allocated_size = le32_to_cpu(desc.data[1]);
8429 
8430 	return 0;
8431 }
8432 
8433 static int hclge_init_umv_space(struct hclge_dev *hdev)
8434 {
8435 	u16 allocated_size = 0;
8436 	int ret;
8437 
8438 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8439 	if (ret)
8440 		return ret;
8441 
8442 	if (allocated_size < hdev->wanted_umv_size)
8443 		dev_warn(&hdev->pdev->dev,
8444 			 "failed to alloc umv space, want %u, get %u\n",
8445 			 hdev->wanted_umv_size, allocated_size);
8446 
8447 	hdev->max_umv_size = allocated_size;
8448 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8449 	hdev->share_umv_size = hdev->priv_umv_size +
8450 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8451 
8452 	return 0;
8453 }
8454 
8455 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8456 {
8457 	struct hclge_vport *vport;
8458 	int i;
8459 
8460 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8461 		vport = &hdev->vport[i];
8462 		vport->used_umv_num = 0;
8463 	}
8464 
8465 	mutex_lock(&hdev->vport_lock);
8466 	hdev->share_umv_size = hdev->priv_umv_size +
8467 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8468 	mutex_unlock(&hdev->vport_lock);
8469 }
8470 
8471 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8472 {
8473 	struct hclge_dev *hdev = vport->back;
8474 	bool is_full;
8475 
8476 	if (need_lock)
8477 		mutex_lock(&hdev->vport_lock);
8478 
8479 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8480 		   hdev->share_umv_size == 0);
8481 
8482 	if (need_lock)
8483 		mutex_unlock(&hdev->vport_lock);
8484 
8485 	return is_full;
8486 }
8487 
8488 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8489 {
8490 	struct hclge_dev *hdev = vport->back;
8491 
8492 	if (is_free) {
8493 		if (vport->used_umv_num > hdev->priv_umv_size)
8494 			hdev->share_umv_size++;
8495 
8496 		if (vport->used_umv_num > 0)
8497 			vport->used_umv_num--;
8498 	} else {
8499 		if (vport->used_umv_num >= hdev->priv_umv_size &&
8500 		    hdev->share_umv_size > 0)
8501 			hdev->share_umv_size--;
8502 		vport->used_umv_num++;
8503 	}
8504 }
8505 
8506 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8507 						  const u8 *mac_addr)
8508 {
8509 	struct hclge_mac_node *mac_node, *tmp;
8510 
8511 	list_for_each_entry_safe(mac_node, tmp, list, node)
8512 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8513 			return mac_node;
8514 
8515 	return NULL;
8516 }
8517 
8518 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8519 				  enum HCLGE_MAC_NODE_STATE state)
8520 {
8521 	switch (state) {
8522 	/* from set_rx_mode or tmp_add_list */
8523 	case HCLGE_MAC_TO_ADD:
8524 		if (mac_node->state == HCLGE_MAC_TO_DEL)
8525 			mac_node->state = HCLGE_MAC_ACTIVE;
8526 		break;
8527 	/* only from set_rx_mode */
8528 	case HCLGE_MAC_TO_DEL:
8529 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
8530 			list_del(&mac_node->node);
8531 			kfree(mac_node);
8532 		} else {
8533 			mac_node->state = HCLGE_MAC_TO_DEL;
8534 		}
8535 		break;
8536 	/* only from tmp_add_list, the mac_node->state won't be
8537 	 * ACTIVE.
8538 	 */
8539 	case HCLGE_MAC_ACTIVE:
8540 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8541 			mac_node->state = HCLGE_MAC_ACTIVE;
8542 
8543 		break;
8544 	}
8545 }
8546 
8547 int hclge_update_mac_list(struct hclge_vport *vport,
8548 			  enum HCLGE_MAC_NODE_STATE state,
8549 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
8550 			  const unsigned char *addr)
8551 {
8552 	struct hclge_dev *hdev = vport->back;
8553 	struct hclge_mac_node *mac_node;
8554 	struct list_head *list;
8555 
8556 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8557 		&vport->uc_mac_list : &vport->mc_mac_list;
8558 
8559 	spin_lock_bh(&vport->mac_list_lock);
8560 
8561 	/* if the mac addr is already in the mac list, no need to add a new
8562 	 * one into it, just check the mac addr state, convert it to a new
8563 	 * state, or just remove it, or do nothing.
8564 	 */
8565 	mac_node = hclge_find_mac_node(list, addr);
8566 	if (mac_node) {
8567 		hclge_update_mac_node(mac_node, state);
8568 		spin_unlock_bh(&vport->mac_list_lock);
8569 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8570 		return 0;
8571 	}
8572 
8573 	/* if this address is never added, unnecessary to delete */
8574 	if (state == HCLGE_MAC_TO_DEL) {
8575 		spin_unlock_bh(&vport->mac_list_lock);
8576 		dev_err(&hdev->pdev->dev,
8577 			"failed to delete address %pM from mac list\n",
8578 			addr);
8579 		return -ENOENT;
8580 	}
8581 
8582 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8583 	if (!mac_node) {
8584 		spin_unlock_bh(&vport->mac_list_lock);
8585 		return -ENOMEM;
8586 	}
8587 
8588 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8589 
8590 	mac_node->state = state;
8591 	ether_addr_copy(mac_node->mac_addr, addr);
8592 	list_add_tail(&mac_node->node, list);
8593 
8594 	spin_unlock_bh(&vport->mac_list_lock);
8595 
8596 	return 0;
8597 }
8598 
8599 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8600 			     const unsigned char *addr)
8601 {
8602 	struct hclge_vport *vport = hclge_get_vport(handle);
8603 
8604 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8605 				     addr);
8606 }
8607 
8608 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8609 			     const unsigned char *addr)
8610 {
8611 	struct hclge_dev *hdev = vport->back;
8612 	struct hclge_mac_vlan_tbl_entry_cmd req;
8613 	struct hclge_desc desc;
8614 	u16 egress_port = 0;
8615 	int ret;
8616 
8617 	/* mac addr check */
8618 	if (is_zero_ether_addr(addr) ||
8619 	    is_broadcast_ether_addr(addr) ||
8620 	    is_multicast_ether_addr(addr)) {
8621 		dev_err(&hdev->pdev->dev,
8622 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8623 			 addr, is_zero_ether_addr(addr),
8624 			 is_broadcast_ether_addr(addr),
8625 			 is_multicast_ether_addr(addr));
8626 		return -EINVAL;
8627 	}
8628 
8629 	memset(&req, 0, sizeof(req));
8630 
8631 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8632 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8633 
8634 	req.egress_port = cpu_to_le16(egress_port);
8635 
8636 	hclge_prepare_mac_addr(&req, addr, false);
8637 
8638 	/* Lookup the mac address in the mac_vlan table, and add
8639 	 * it if the entry is inexistent. Repeated unicast entry
8640 	 * is not allowed in the mac vlan table.
8641 	 */
8642 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8643 	if (ret == -ENOENT) {
8644 		mutex_lock(&hdev->vport_lock);
8645 		if (!hclge_is_umv_space_full(vport, false)) {
8646 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8647 			if (!ret)
8648 				hclge_update_umv_space(vport, false);
8649 			mutex_unlock(&hdev->vport_lock);
8650 			return ret;
8651 		}
8652 		mutex_unlock(&hdev->vport_lock);
8653 
8654 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8655 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8656 				hdev->priv_umv_size);
8657 
8658 		return -ENOSPC;
8659 	}
8660 
8661 	/* check if we just hit the duplicate */
8662 	if (!ret) {
8663 		dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8664 			 vport->vport_id, addr);
8665 		return 0;
8666 	}
8667 
8668 	dev_err(&hdev->pdev->dev,
8669 		"PF failed to add unicast entry(%pM) in the MAC table\n",
8670 		addr);
8671 
8672 	return ret;
8673 }
8674 
8675 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8676 			    const unsigned char *addr)
8677 {
8678 	struct hclge_vport *vport = hclge_get_vport(handle);
8679 
8680 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8681 				     addr);
8682 }
8683 
8684 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8685 			    const unsigned char *addr)
8686 {
8687 	struct hclge_dev *hdev = vport->back;
8688 	struct hclge_mac_vlan_tbl_entry_cmd req;
8689 	int ret;
8690 
8691 	/* mac addr check */
8692 	if (is_zero_ether_addr(addr) ||
8693 	    is_broadcast_ether_addr(addr) ||
8694 	    is_multicast_ether_addr(addr)) {
8695 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8696 			addr);
8697 		return -EINVAL;
8698 	}
8699 
8700 	memset(&req, 0, sizeof(req));
8701 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8702 	hclge_prepare_mac_addr(&req, addr, false);
8703 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
8704 	if (!ret) {
8705 		mutex_lock(&hdev->vport_lock);
8706 		hclge_update_umv_space(vport, true);
8707 		mutex_unlock(&hdev->vport_lock);
8708 	} else if (ret == -ENOENT) {
8709 		ret = 0;
8710 	}
8711 
8712 	return ret;
8713 }
8714 
8715 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8716 			     const unsigned char *addr)
8717 {
8718 	struct hclge_vport *vport = hclge_get_vport(handle);
8719 
8720 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8721 				     addr);
8722 }
8723 
8724 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8725 			     const unsigned char *addr)
8726 {
8727 	struct hclge_dev *hdev = vport->back;
8728 	struct hclge_mac_vlan_tbl_entry_cmd req;
8729 	struct hclge_desc desc[3];
8730 	int status;
8731 
8732 	/* mac addr check */
8733 	if (!is_multicast_ether_addr(addr)) {
8734 		dev_err(&hdev->pdev->dev,
8735 			"Add mc mac err! invalid mac:%pM.\n",
8736 			 addr);
8737 		return -EINVAL;
8738 	}
8739 	memset(&req, 0, sizeof(req));
8740 	hclge_prepare_mac_addr(&req, addr, true);
8741 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8742 	if (status) {
8743 		/* This mac addr do not exist, add new entry for it */
8744 		memset(desc[0].data, 0, sizeof(desc[0].data));
8745 		memset(desc[1].data, 0, sizeof(desc[0].data));
8746 		memset(desc[2].data, 0, sizeof(desc[0].data));
8747 	}
8748 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8749 	if (status)
8750 		return status;
8751 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8752 	/* if already overflow, not to print each time */
8753 	if (status == -ENOSPC &&
8754 	    !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8755 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8756 
8757 	return status;
8758 }
8759 
8760 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8761 			    const unsigned char *addr)
8762 {
8763 	struct hclge_vport *vport = hclge_get_vport(handle);
8764 
8765 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8766 				     addr);
8767 }
8768 
8769 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8770 			    const unsigned char *addr)
8771 {
8772 	struct hclge_dev *hdev = vport->back;
8773 	struct hclge_mac_vlan_tbl_entry_cmd req;
8774 	enum hclge_cmd_status status;
8775 	struct hclge_desc desc[3];
8776 
8777 	/* mac addr check */
8778 	if (!is_multicast_ether_addr(addr)) {
8779 		dev_dbg(&hdev->pdev->dev,
8780 			"Remove mc mac err! invalid mac:%pM.\n",
8781 			 addr);
8782 		return -EINVAL;
8783 	}
8784 
8785 	memset(&req, 0, sizeof(req));
8786 	hclge_prepare_mac_addr(&req, addr, true);
8787 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8788 	if (!status) {
8789 		/* This mac addr exist, remove this handle's VFID for it */
8790 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8791 		if (status)
8792 			return status;
8793 
8794 		if (hclge_is_all_function_id_zero(desc))
8795 			/* All the vfid is zero, so need to delete this entry */
8796 			status = hclge_remove_mac_vlan_tbl(vport, &req);
8797 		else
8798 			/* Not all the vfid is zero, update the vfid */
8799 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8800 	} else if (status == -ENOENT) {
8801 		status = 0;
8802 	}
8803 
8804 	return status;
8805 }
8806 
8807 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8808 				      struct list_head *list,
8809 				      int (*sync)(struct hclge_vport *,
8810 						  const unsigned char *))
8811 {
8812 	struct hclge_mac_node *mac_node, *tmp;
8813 	int ret;
8814 
8815 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8816 		ret = sync(vport, mac_node->mac_addr);
8817 		if (!ret) {
8818 			mac_node->state = HCLGE_MAC_ACTIVE;
8819 		} else {
8820 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8821 				&vport->state);
8822 			break;
8823 		}
8824 	}
8825 }
8826 
8827 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8828 					struct list_head *list,
8829 					int (*unsync)(struct hclge_vport *,
8830 						      const unsigned char *))
8831 {
8832 	struct hclge_mac_node *mac_node, *tmp;
8833 	int ret;
8834 
8835 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8836 		ret = unsync(vport, mac_node->mac_addr);
8837 		if (!ret || ret == -ENOENT) {
8838 			list_del(&mac_node->node);
8839 			kfree(mac_node);
8840 		} else {
8841 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8842 				&vport->state);
8843 			break;
8844 		}
8845 	}
8846 }
8847 
8848 static bool hclge_sync_from_add_list(struct list_head *add_list,
8849 				     struct list_head *mac_list)
8850 {
8851 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8852 	bool all_added = true;
8853 
8854 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8855 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8856 			all_added = false;
8857 
8858 		/* if the mac address from tmp_add_list is not in the
8859 		 * uc/mc_mac_list, it means have received a TO_DEL request
8860 		 * during the time window of adding the mac address into mac
8861 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8862 		 * then it will be removed at next time. else it must be TO_ADD,
8863 		 * this address hasn't been added into mac table,
8864 		 * so just remove the mac node.
8865 		 */
8866 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8867 		if (new_node) {
8868 			hclge_update_mac_node(new_node, mac_node->state);
8869 			list_del(&mac_node->node);
8870 			kfree(mac_node);
8871 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8872 			mac_node->state = HCLGE_MAC_TO_DEL;
8873 			list_move_tail(&mac_node->node, mac_list);
8874 		} else {
8875 			list_del(&mac_node->node);
8876 			kfree(mac_node);
8877 		}
8878 	}
8879 
8880 	return all_added;
8881 }
8882 
8883 static void hclge_sync_from_del_list(struct list_head *del_list,
8884 				     struct list_head *mac_list)
8885 {
8886 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8887 
8888 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8889 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8890 		if (new_node) {
8891 			/* If the mac addr exists in the mac list, it means
8892 			 * received a new TO_ADD request during the time window
8893 			 * of configuring the mac address. For the mac node
8894 			 * state is TO_ADD, and the address is already in the
8895 			 * in the hardware(due to delete fail), so we just need
8896 			 * to change the mac node state to ACTIVE.
8897 			 */
8898 			new_node->state = HCLGE_MAC_ACTIVE;
8899 			list_del(&mac_node->node);
8900 			kfree(mac_node);
8901 		} else {
8902 			list_move_tail(&mac_node->node, mac_list);
8903 		}
8904 	}
8905 }
8906 
8907 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8908 					enum HCLGE_MAC_ADDR_TYPE mac_type,
8909 					bool is_all_added)
8910 {
8911 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8912 		if (is_all_added)
8913 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8914 		else
8915 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8916 	} else {
8917 		if (is_all_added)
8918 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8919 		else
8920 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8921 	}
8922 }
8923 
8924 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8925 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
8926 {
8927 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8928 	struct list_head tmp_add_list, tmp_del_list;
8929 	struct list_head *list;
8930 	bool all_added;
8931 
8932 	INIT_LIST_HEAD(&tmp_add_list);
8933 	INIT_LIST_HEAD(&tmp_del_list);
8934 
8935 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
8936 	 * we can add/delete these mac addr outside the spin lock
8937 	 */
8938 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8939 		&vport->uc_mac_list : &vport->mc_mac_list;
8940 
8941 	spin_lock_bh(&vport->mac_list_lock);
8942 
8943 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8944 		switch (mac_node->state) {
8945 		case HCLGE_MAC_TO_DEL:
8946 			list_move_tail(&mac_node->node, &tmp_del_list);
8947 			break;
8948 		case HCLGE_MAC_TO_ADD:
8949 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8950 			if (!new_node)
8951 				goto stop_traverse;
8952 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8953 			new_node->state = mac_node->state;
8954 			list_add_tail(&new_node->node, &tmp_add_list);
8955 			break;
8956 		default:
8957 			break;
8958 		}
8959 	}
8960 
8961 stop_traverse:
8962 	spin_unlock_bh(&vport->mac_list_lock);
8963 
8964 	/* delete first, in order to get max mac table space for adding */
8965 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8966 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8967 					    hclge_rm_uc_addr_common);
8968 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8969 					  hclge_add_uc_addr_common);
8970 	} else {
8971 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8972 					    hclge_rm_mc_addr_common);
8973 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8974 					  hclge_add_mc_addr_common);
8975 	}
8976 
8977 	/* if some mac addresses were added/deleted fail, move back to the
8978 	 * mac_list, and retry at next time.
8979 	 */
8980 	spin_lock_bh(&vport->mac_list_lock);
8981 
8982 	hclge_sync_from_del_list(&tmp_del_list, list);
8983 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8984 
8985 	spin_unlock_bh(&vport->mac_list_lock);
8986 
8987 	hclge_update_overflow_flags(vport, mac_type, all_added);
8988 }
8989 
8990 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8991 {
8992 	struct hclge_dev *hdev = vport->back;
8993 
8994 	if (test_bit(vport->vport_id, hdev->vport_config_block))
8995 		return false;
8996 
8997 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8998 		return true;
8999 
9000 	return false;
9001 }
9002 
9003 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9004 {
9005 	int i;
9006 
9007 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9008 		struct hclge_vport *vport = &hdev->vport[i];
9009 
9010 		if (!hclge_need_sync_mac_table(vport))
9011 			continue;
9012 
9013 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9014 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9015 	}
9016 }
9017 
9018 static void hclge_build_del_list(struct list_head *list,
9019 				 bool is_del_list,
9020 				 struct list_head *tmp_del_list)
9021 {
9022 	struct hclge_mac_node *mac_cfg, *tmp;
9023 
9024 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9025 		switch (mac_cfg->state) {
9026 		case HCLGE_MAC_TO_DEL:
9027 		case HCLGE_MAC_ACTIVE:
9028 			list_move_tail(&mac_cfg->node, tmp_del_list);
9029 			break;
9030 		case HCLGE_MAC_TO_ADD:
9031 			if (is_del_list) {
9032 				list_del(&mac_cfg->node);
9033 				kfree(mac_cfg);
9034 			}
9035 			break;
9036 		}
9037 	}
9038 }
9039 
9040 static void hclge_unsync_del_list(struct hclge_vport *vport,
9041 				  int (*unsync)(struct hclge_vport *vport,
9042 						const unsigned char *addr),
9043 				  bool is_del_list,
9044 				  struct list_head *tmp_del_list)
9045 {
9046 	struct hclge_mac_node *mac_cfg, *tmp;
9047 	int ret;
9048 
9049 	list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9050 		ret = unsync(vport, mac_cfg->mac_addr);
9051 		if (!ret || ret == -ENOENT) {
9052 			/* clear all mac addr from hardware, but remain these
9053 			 * mac addr in the mac list, and restore them after
9054 			 * vf reset finished.
9055 			 */
9056 			if (!is_del_list &&
9057 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
9058 				mac_cfg->state = HCLGE_MAC_TO_ADD;
9059 			} else {
9060 				list_del(&mac_cfg->node);
9061 				kfree(mac_cfg);
9062 			}
9063 		} else if (is_del_list) {
9064 			mac_cfg->state = HCLGE_MAC_TO_DEL;
9065 		}
9066 	}
9067 }
9068 
9069 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9070 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
9071 {
9072 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9073 	struct hclge_dev *hdev = vport->back;
9074 	struct list_head tmp_del_list, *list;
9075 
9076 	if (mac_type == HCLGE_MAC_ADDR_UC) {
9077 		list = &vport->uc_mac_list;
9078 		unsync = hclge_rm_uc_addr_common;
9079 	} else {
9080 		list = &vport->mc_mac_list;
9081 		unsync = hclge_rm_mc_addr_common;
9082 	}
9083 
9084 	INIT_LIST_HEAD(&tmp_del_list);
9085 
9086 	if (!is_del_list)
9087 		set_bit(vport->vport_id, hdev->vport_config_block);
9088 
9089 	spin_lock_bh(&vport->mac_list_lock);
9090 
9091 	hclge_build_del_list(list, is_del_list, &tmp_del_list);
9092 
9093 	spin_unlock_bh(&vport->mac_list_lock);
9094 
9095 	hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9096 
9097 	spin_lock_bh(&vport->mac_list_lock);
9098 
9099 	hclge_sync_from_del_list(&tmp_del_list, list);
9100 
9101 	spin_unlock_bh(&vport->mac_list_lock);
9102 }
9103 
9104 /* remove all mac address when uninitailize */
9105 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9106 					enum HCLGE_MAC_ADDR_TYPE mac_type)
9107 {
9108 	struct hclge_mac_node *mac_node, *tmp;
9109 	struct hclge_dev *hdev = vport->back;
9110 	struct list_head tmp_del_list, *list;
9111 
9112 	INIT_LIST_HEAD(&tmp_del_list);
9113 
9114 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9115 		&vport->uc_mac_list : &vport->mc_mac_list;
9116 
9117 	spin_lock_bh(&vport->mac_list_lock);
9118 
9119 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9120 		switch (mac_node->state) {
9121 		case HCLGE_MAC_TO_DEL:
9122 		case HCLGE_MAC_ACTIVE:
9123 			list_move_tail(&mac_node->node, &tmp_del_list);
9124 			break;
9125 		case HCLGE_MAC_TO_ADD:
9126 			list_del(&mac_node->node);
9127 			kfree(mac_node);
9128 			break;
9129 		}
9130 	}
9131 
9132 	spin_unlock_bh(&vport->mac_list_lock);
9133 
9134 	if (mac_type == HCLGE_MAC_ADDR_UC)
9135 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9136 					    hclge_rm_uc_addr_common);
9137 	else
9138 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9139 					    hclge_rm_mc_addr_common);
9140 
9141 	if (!list_empty(&tmp_del_list))
9142 		dev_warn(&hdev->pdev->dev,
9143 			 "uninit %s mac list for vport %u not completely.\n",
9144 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9145 			 vport->vport_id);
9146 
9147 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9148 		list_del(&mac_node->node);
9149 		kfree(mac_node);
9150 	}
9151 }
9152 
9153 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9154 {
9155 	struct hclge_vport *vport;
9156 	int i;
9157 
9158 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9159 		vport = &hdev->vport[i];
9160 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9161 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9162 	}
9163 }
9164 
9165 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9166 					      u16 cmdq_resp, u8 resp_code)
9167 {
9168 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
9169 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
9170 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
9171 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
9172 
9173 	int return_status;
9174 
9175 	if (cmdq_resp) {
9176 		dev_err(&hdev->pdev->dev,
9177 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9178 			cmdq_resp);
9179 		return -EIO;
9180 	}
9181 
9182 	switch (resp_code) {
9183 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
9184 	case HCLGE_ETHERTYPE_ALREADY_ADD:
9185 		return_status = 0;
9186 		break;
9187 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9188 		dev_err(&hdev->pdev->dev,
9189 			"add mac ethertype failed for manager table overflow.\n");
9190 		return_status = -EIO;
9191 		break;
9192 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
9193 		dev_err(&hdev->pdev->dev,
9194 			"add mac ethertype failed for key conflict.\n");
9195 		return_status = -EIO;
9196 		break;
9197 	default:
9198 		dev_err(&hdev->pdev->dev,
9199 			"add mac ethertype failed for undefined, code=%u.\n",
9200 			resp_code);
9201 		return_status = -EIO;
9202 	}
9203 
9204 	return return_status;
9205 }
9206 
9207 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9208 				     u8 *mac_addr)
9209 {
9210 	struct hclge_mac_vlan_tbl_entry_cmd req;
9211 	struct hclge_dev *hdev = vport->back;
9212 	struct hclge_desc desc;
9213 	u16 egress_port = 0;
9214 	int i;
9215 
9216 	if (is_zero_ether_addr(mac_addr))
9217 		return false;
9218 
9219 	memset(&req, 0, sizeof(req));
9220 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9221 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9222 	req.egress_port = cpu_to_le16(egress_port);
9223 	hclge_prepare_mac_addr(&req, mac_addr, false);
9224 
9225 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9226 		return true;
9227 
9228 	vf_idx += HCLGE_VF_VPORT_START_NUM;
9229 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9230 		if (i != vf_idx &&
9231 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9232 			return true;
9233 
9234 	return false;
9235 }
9236 
9237 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9238 			    u8 *mac_addr)
9239 {
9240 	struct hclge_vport *vport = hclge_get_vport(handle);
9241 	struct hclge_dev *hdev = vport->back;
9242 
9243 	vport = hclge_get_vf_vport(hdev, vf);
9244 	if (!vport)
9245 		return -EINVAL;
9246 
9247 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9248 		dev_info(&hdev->pdev->dev,
9249 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
9250 			 mac_addr);
9251 		return 0;
9252 	}
9253 
9254 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9255 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9256 			mac_addr);
9257 		return -EEXIST;
9258 	}
9259 
9260 	ether_addr_copy(vport->vf_info.mac, mac_addr);
9261 
9262 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9263 		dev_info(&hdev->pdev->dev,
9264 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9265 			 vf, mac_addr);
9266 		return hclge_inform_reset_assert_to_vf(vport);
9267 	}
9268 
9269 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9270 		 vf, mac_addr);
9271 	return 0;
9272 }
9273 
9274 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9275 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
9276 {
9277 	struct hclge_desc desc;
9278 	u8 resp_code;
9279 	u16 retval;
9280 	int ret;
9281 
9282 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9283 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9284 
9285 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9286 	if (ret) {
9287 		dev_err(&hdev->pdev->dev,
9288 			"add mac ethertype failed for cmd_send, ret =%d.\n",
9289 			ret);
9290 		return ret;
9291 	}
9292 
9293 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9294 	retval = le16_to_cpu(desc.retval);
9295 
9296 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9297 }
9298 
9299 static int init_mgr_tbl(struct hclge_dev *hdev)
9300 {
9301 	int ret;
9302 	int i;
9303 
9304 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9305 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9306 		if (ret) {
9307 			dev_err(&hdev->pdev->dev,
9308 				"add mac ethertype failed, ret =%d.\n",
9309 				ret);
9310 			return ret;
9311 		}
9312 	}
9313 
9314 	return 0;
9315 }
9316 
9317 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9318 {
9319 	struct hclge_vport *vport = hclge_get_vport(handle);
9320 	struct hclge_dev *hdev = vport->back;
9321 
9322 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
9323 }
9324 
9325 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9326 				       const u8 *old_addr, const u8 *new_addr)
9327 {
9328 	struct list_head *list = &vport->uc_mac_list;
9329 	struct hclge_mac_node *old_node, *new_node;
9330 
9331 	new_node = hclge_find_mac_node(list, new_addr);
9332 	if (!new_node) {
9333 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9334 		if (!new_node)
9335 			return -ENOMEM;
9336 
9337 		new_node->state = HCLGE_MAC_TO_ADD;
9338 		ether_addr_copy(new_node->mac_addr, new_addr);
9339 		list_add(&new_node->node, list);
9340 	} else {
9341 		if (new_node->state == HCLGE_MAC_TO_DEL)
9342 			new_node->state = HCLGE_MAC_ACTIVE;
9343 
9344 		/* make sure the new addr is in the list head, avoid dev
9345 		 * addr may be not re-added into mac table for the umv space
9346 		 * limitation after global/imp reset which will clear mac
9347 		 * table by hardware.
9348 		 */
9349 		list_move(&new_node->node, list);
9350 	}
9351 
9352 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9353 		old_node = hclge_find_mac_node(list, old_addr);
9354 		if (old_node) {
9355 			if (old_node->state == HCLGE_MAC_TO_ADD) {
9356 				list_del(&old_node->node);
9357 				kfree(old_node);
9358 			} else {
9359 				old_node->state = HCLGE_MAC_TO_DEL;
9360 			}
9361 		}
9362 	}
9363 
9364 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9365 
9366 	return 0;
9367 }
9368 
9369 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9370 			      bool is_first)
9371 {
9372 	const unsigned char *new_addr = (const unsigned char *)p;
9373 	struct hclge_vport *vport = hclge_get_vport(handle);
9374 	struct hclge_dev *hdev = vport->back;
9375 	unsigned char *old_addr = NULL;
9376 	int ret;
9377 
9378 	/* mac addr check */
9379 	if (is_zero_ether_addr(new_addr) ||
9380 	    is_broadcast_ether_addr(new_addr) ||
9381 	    is_multicast_ether_addr(new_addr)) {
9382 		dev_err(&hdev->pdev->dev,
9383 			"change uc mac err! invalid mac: %pM.\n",
9384 			 new_addr);
9385 		return -EINVAL;
9386 	}
9387 
9388 	ret = hclge_pause_addr_cfg(hdev, new_addr);
9389 	if (ret) {
9390 		dev_err(&hdev->pdev->dev,
9391 			"failed to configure mac pause address, ret = %d\n",
9392 			ret);
9393 		return ret;
9394 	}
9395 
9396 	if (!is_first)
9397 		old_addr = hdev->hw.mac.mac_addr;
9398 
9399 	spin_lock_bh(&vport->mac_list_lock);
9400 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9401 	if (ret) {
9402 		dev_err(&hdev->pdev->dev,
9403 			"failed to change the mac addr:%pM, ret = %d\n",
9404 			new_addr, ret);
9405 		spin_unlock_bh(&vport->mac_list_lock);
9406 
9407 		if (!is_first)
9408 			hclge_pause_addr_cfg(hdev, old_addr);
9409 
9410 		return ret;
9411 	}
9412 	/* we must update dev addr with spin lock protect, preventing dev addr
9413 	 * being removed by set_rx_mode path.
9414 	 */
9415 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9416 	spin_unlock_bh(&vport->mac_list_lock);
9417 
9418 	hclge_task_schedule(hdev, 0);
9419 
9420 	return 0;
9421 }
9422 
9423 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9424 {
9425 	struct mii_ioctl_data *data = if_mii(ifr);
9426 
9427 	if (!hnae3_dev_phy_imp_supported(hdev))
9428 		return -EOPNOTSUPP;
9429 
9430 	switch (cmd) {
9431 	case SIOCGMIIPHY:
9432 		data->phy_id = hdev->hw.mac.phy_addr;
9433 		/* this command reads phy id and register at the same time */
9434 		fallthrough;
9435 	case SIOCGMIIREG:
9436 		data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9437 		return 0;
9438 
9439 	case SIOCSMIIREG:
9440 		return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9441 	default:
9442 		return -EOPNOTSUPP;
9443 	}
9444 }
9445 
9446 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9447 			  int cmd)
9448 {
9449 	struct hclge_vport *vport = hclge_get_vport(handle);
9450 	struct hclge_dev *hdev = vport->back;
9451 
9452 	switch (cmd) {
9453 	case SIOCGHWTSTAMP:
9454 		return hclge_ptp_get_cfg(hdev, ifr);
9455 	case SIOCSHWTSTAMP:
9456 		return hclge_ptp_set_cfg(hdev, ifr);
9457 	default:
9458 		if (!hdev->hw.mac.phydev)
9459 			return hclge_mii_ioctl(hdev, ifr, cmd);
9460 	}
9461 
9462 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9463 }
9464 
9465 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9466 					     bool bypass_en)
9467 {
9468 	struct hclge_port_vlan_filter_bypass_cmd *req;
9469 	struct hclge_desc desc;
9470 	int ret;
9471 
9472 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9473 	req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9474 	req->vf_id = vf_id;
9475 	hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9476 		      bypass_en ? 1 : 0);
9477 
9478 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9479 	if (ret)
9480 		dev_err(&hdev->pdev->dev,
9481 			"failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9482 			vf_id, ret);
9483 
9484 	return ret;
9485 }
9486 
9487 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9488 				      u8 fe_type, bool filter_en, u8 vf_id)
9489 {
9490 	struct hclge_vlan_filter_ctrl_cmd *req;
9491 	struct hclge_desc desc;
9492 	int ret;
9493 
9494 	/* read current vlan filter parameter */
9495 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9496 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9497 	req->vlan_type = vlan_type;
9498 	req->vf_id = vf_id;
9499 
9500 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9501 	if (ret) {
9502 		dev_err(&hdev->pdev->dev,
9503 			"failed to get vlan filter config, ret = %d.\n", ret);
9504 		return ret;
9505 	}
9506 
9507 	/* modify and write new config parameter */
9508 	hclge_cmd_reuse_desc(&desc, false);
9509 	req->vlan_fe = filter_en ?
9510 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9511 
9512 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9513 	if (ret)
9514 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9515 			ret);
9516 
9517 	return ret;
9518 }
9519 
9520 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9521 {
9522 	struct hclge_dev *hdev = vport->back;
9523 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9524 	int ret;
9525 
9526 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9527 		return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9528 						  HCLGE_FILTER_FE_EGRESS_V1_B,
9529 						  enable, vport->vport_id);
9530 
9531 	ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9532 					 HCLGE_FILTER_FE_EGRESS, enable,
9533 					 vport->vport_id);
9534 	if (ret)
9535 		return ret;
9536 
9537 	if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps))
9538 		ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9539 							!enable);
9540 	else if (!vport->vport_id)
9541 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9542 						 HCLGE_FILTER_FE_INGRESS,
9543 						 enable, 0);
9544 
9545 	return ret;
9546 }
9547 
9548 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9549 {
9550 	struct hnae3_handle *handle = &vport->nic;
9551 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9552 	struct hclge_dev *hdev = vport->back;
9553 
9554 	if (vport->vport_id) {
9555 		if (vport->port_base_vlan_cfg.state !=
9556 			HNAE3_PORT_BASE_VLAN_DISABLE)
9557 			return true;
9558 
9559 		if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9560 			return false;
9561 	} else if (handle->netdev_flags & HNAE3_USER_UPE) {
9562 		return false;
9563 	}
9564 
9565 	if (!vport->req_vlan_fltr_en)
9566 		return false;
9567 
9568 	/* compatible with former device, always enable vlan filter */
9569 	if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9570 		return true;
9571 
9572 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9573 		if (vlan->vlan_id != 0)
9574 			return true;
9575 
9576 	return false;
9577 }
9578 
9579 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9580 {
9581 	struct hclge_dev *hdev = vport->back;
9582 	bool need_en;
9583 	int ret;
9584 
9585 	mutex_lock(&hdev->vport_lock);
9586 
9587 	vport->req_vlan_fltr_en = request_en;
9588 
9589 	need_en = hclge_need_enable_vport_vlan_filter(vport);
9590 	if (need_en == vport->cur_vlan_fltr_en) {
9591 		mutex_unlock(&hdev->vport_lock);
9592 		return 0;
9593 	}
9594 
9595 	ret = hclge_set_vport_vlan_filter(vport, need_en);
9596 	if (ret) {
9597 		mutex_unlock(&hdev->vport_lock);
9598 		return ret;
9599 	}
9600 
9601 	vport->cur_vlan_fltr_en = need_en;
9602 
9603 	mutex_unlock(&hdev->vport_lock);
9604 
9605 	return 0;
9606 }
9607 
9608 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9609 {
9610 	struct hclge_vport *vport = hclge_get_vport(handle);
9611 
9612 	return hclge_enable_vport_vlan_filter(vport, enable);
9613 }
9614 
9615 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9616 					bool is_kill, u16 vlan,
9617 					struct hclge_desc *desc)
9618 {
9619 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
9620 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
9621 	u8 vf_byte_val;
9622 	u8 vf_byte_off;
9623 	int ret;
9624 
9625 	hclge_cmd_setup_basic_desc(&desc[0],
9626 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9627 	hclge_cmd_setup_basic_desc(&desc[1],
9628 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9629 
9630 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9631 
9632 	vf_byte_off = vfid / 8;
9633 	vf_byte_val = 1 << (vfid % 8);
9634 
9635 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9636 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9637 
9638 	req0->vlan_id  = cpu_to_le16(vlan);
9639 	req0->vlan_cfg = is_kill;
9640 
9641 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9642 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9643 	else
9644 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9645 
9646 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
9647 	if (ret) {
9648 		dev_err(&hdev->pdev->dev,
9649 			"Send vf vlan command fail, ret =%d.\n",
9650 			ret);
9651 		return ret;
9652 	}
9653 
9654 	return 0;
9655 }
9656 
9657 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9658 					  bool is_kill, struct hclge_desc *desc)
9659 {
9660 	struct hclge_vlan_filter_vf_cfg_cmd *req;
9661 
9662 	req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9663 
9664 	if (!is_kill) {
9665 #define HCLGE_VF_VLAN_NO_ENTRY	2
9666 		if (!req->resp_code || req->resp_code == 1)
9667 			return 0;
9668 
9669 		if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9670 			set_bit(vfid, hdev->vf_vlan_full);
9671 			dev_warn(&hdev->pdev->dev,
9672 				 "vf vlan table is full, vf vlan filter is disabled\n");
9673 			return 0;
9674 		}
9675 
9676 		dev_err(&hdev->pdev->dev,
9677 			"Add vf vlan filter fail, ret =%u.\n",
9678 			req->resp_code);
9679 	} else {
9680 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
9681 		if (!req->resp_code)
9682 			return 0;
9683 
9684 		/* vf vlan filter is disabled when vf vlan table is full,
9685 		 * then new vlan id will not be added into vf vlan table.
9686 		 * Just return 0 without warning, avoid massive verbose
9687 		 * print logs when unload.
9688 		 */
9689 		if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9690 			return 0;
9691 
9692 		dev_err(&hdev->pdev->dev,
9693 			"Kill vf vlan filter fail, ret =%u.\n",
9694 			req->resp_code);
9695 	}
9696 
9697 	return -EIO;
9698 }
9699 
9700 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9701 				    bool is_kill, u16 vlan)
9702 {
9703 	struct hclge_vport *vport = &hdev->vport[vfid];
9704 	struct hclge_desc desc[2];
9705 	int ret;
9706 
9707 	/* if vf vlan table is full, firmware will close vf vlan filter, it
9708 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
9709 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
9710 	 * new vlan, because tx packets with these vlan id will be dropped.
9711 	 */
9712 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9713 		if (vport->vf_info.spoofchk && vlan) {
9714 			dev_err(&hdev->pdev->dev,
9715 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
9716 			return -EPERM;
9717 		}
9718 		return 0;
9719 	}
9720 
9721 	ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9722 	if (ret)
9723 		return ret;
9724 
9725 	return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9726 }
9727 
9728 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9729 				      u16 vlan_id, bool is_kill)
9730 {
9731 	struct hclge_vlan_filter_pf_cfg_cmd *req;
9732 	struct hclge_desc desc;
9733 	u8 vlan_offset_byte_val;
9734 	u8 vlan_offset_byte;
9735 	u8 vlan_offset_160;
9736 	int ret;
9737 
9738 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9739 
9740 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9741 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9742 			   HCLGE_VLAN_BYTE_SIZE;
9743 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9744 
9745 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9746 	req->vlan_offset = vlan_offset_160;
9747 	req->vlan_cfg = is_kill;
9748 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9749 
9750 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9751 	if (ret)
9752 		dev_err(&hdev->pdev->dev,
9753 			"port vlan command, send fail, ret =%d.\n", ret);
9754 	return ret;
9755 }
9756 
9757 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9758 				    u16 vport_id, u16 vlan_id,
9759 				    bool is_kill)
9760 {
9761 	u16 vport_idx, vport_num = 0;
9762 	int ret;
9763 
9764 	if (is_kill && !vlan_id)
9765 		return 0;
9766 
9767 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9768 	if (ret) {
9769 		dev_err(&hdev->pdev->dev,
9770 			"Set %u vport vlan filter config fail, ret =%d.\n",
9771 			vport_id, ret);
9772 		return ret;
9773 	}
9774 
9775 	/* vlan 0 may be added twice when 8021q module is enabled */
9776 	if (!is_kill && !vlan_id &&
9777 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
9778 		return 0;
9779 
9780 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9781 		dev_err(&hdev->pdev->dev,
9782 			"Add port vlan failed, vport %u is already in vlan %u\n",
9783 			vport_id, vlan_id);
9784 		return -EINVAL;
9785 	}
9786 
9787 	if (is_kill &&
9788 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9789 		dev_err(&hdev->pdev->dev,
9790 			"Delete port vlan failed, vport %u is not in vlan %u\n",
9791 			vport_id, vlan_id);
9792 		return -EINVAL;
9793 	}
9794 
9795 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9796 		vport_num++;
9797 
9798 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9799 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9800 						 is_kill);
9801 
9802 	return ret;
9803 }
9804 
9805 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9806 {
9807 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9808 	struct hclge_vport_vtag_tx_cfg_cmd *req;
9809 	struct hclge_dev *hdev = vport->back;
9810 	struct hclge_desc desc;
9811 	u16 bmap_index;
9812 	int status;
9813 
9814 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9815 
9816 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9817 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9818 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9819 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9820 		      vcfg->accept_tag1 ? 1 : 0);
9821 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9822 		      vcfg->accept_untag1 ? 1 : 0);
9823 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9824 		      vcfg->accept_tag2 ? 1 : 0);
9825 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9826 		      vcfg->accept_untag2 ? 1 : 0);
9827 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9828 		      vcfg->insert_tag1_en ? 1 : 0);
9829 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9830 		      vcfg->insert_tag2_en ? 1 : 0);
9831 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9832 		      vcfg->tag_shift_mode_en ? 1 : 0);
9833 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9834 
9835 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9836 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9837 			HCLGE_VF_NUM_PER_BYTE;
9838 	req->vf_bitmap[bmap_index] =
9839 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9840 
9841 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9842 	if (status)
9843 		dev_err(&hdev->pdev->dev,
9844 			"Send port txvlan cfg command fail, ret =%d\n",
9845 			status);
9846 
9847 	return status;
9848 }
9849 
9850 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9851 {
9852 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9853 	struct hclge_vport_vtag_rx_cfg_cmd *req;
9854 	struct hclge_dev *hdev = vport->back;
9855 	struct hclge_desc desc;
9856 	u16 bmap_index;
9857 	int status;
9858 
9859 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9860 
9861 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9862 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9863 		      vcfg->strip_tag1_en ? 1 : 0);
9864 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9865 		      vcfg->strip_tag2_en ? 1 : 0);
9866 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9867 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
9868 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9869 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
9870 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9871 		      vcfg->strip_tag1_discard_en ? 1 : 0);
9872 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9873 		      vcfg->strip_tag2_discard_en ? 1 : 0);
9874 
9875 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9876 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9877 			HCLGE_VF_NUM_PER_BYTE;
9878 	req->vf_bitmap[bmap_index] =
9879 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9880 
9881 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9882 	if (status)
9883 		dev_err(&hdev->pdev->dev,
9884 			"Send port rxvlan cfg command fail, ret =%d\n",
9885 			status);
9886 
9887 	return status;
9888 }
9889 
9890 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9891 				  u16 port_base_vlan_state,
9892 				  u16 vlan_tag, u8 qos)
9893 {
9894 	int ret;
9895 
9896 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9897 		vport->txvlan_cfg.accept_tag1 = true;
9898 		vport->txvlan_cfg.insert_tag1_en = false;
9899 		vport->txvlan_cfg.default_tag1 = 0;
9900 	} else {
9901 		struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9902 
9903 		vport->txvlan_cfg.accept_tag1 =
9904 			ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9905 		vport->txvlan_cfg.insert_tag1_en = true;
9906 		vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9907 						 vlan_tag;
9908 	}
9909 
9910 	vport->txvlan_cfg.accept_untag1 = true;
9911 
9912 	/* accept_tag2 and accept_untag2 are not supported on
9913 	 * pdev revision(0x20), new revision support them,
9914 	 * this two fields can not be configured by user.
9915 	 */
9916 	vport->txvlan_cfg.accept_tag2 = true;
9917 	vport->txvlan_cfg.accept_untag2 = true;
9918 	vport->txvlan_cfg.insert_tag2_en = false;
9919 	vport->txvlan_cfg.default_tag2 = 0;
9920 	vport->txvlan_cfg.tag_shift_mode_en = true;
9921 
9922 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9923 		vport->rxvlan_cfg.strip_tag1_en = false;
9924 		vport->rxvlan_cfg.strip_tag2_en =
9925 				vport->rxvlan_cfg.rx_vlan_offload_en;
9926 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
9927 	} else {
9928 		vport->rxvlan_cfg.strip_tag1_en =
9929 				vport->rxvlan_cfg.rx_vlan_offload_en;
9930 		vport->rxvlan_cfg.strip_tag2_en = true;
9931 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
9932 	}
9933 
9934 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
9935 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9936 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9937 
9938 	ret = hclge_set_vlan_tx_offload_cfg(vport);
9939 	if (ret)
9940 		return ret;
9941 
9942 	return hclge_set_vlan_rx_offload_cfg(vport);
9943 }
9944 
9945 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9946 {
9947 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9948 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9949 	struct hclge_desc desc;
9950 	int status;
9951 
9952 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9953 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9954 	rx_req->ot_fst_vlan_type =
9955 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9956 	rx_req->ot_sec_vlan_type =
9957 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9958 	rx_req->in_fst_vlan_type =
9959 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9960 	rx_req->in_sec_vlan_type =
9961 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9962 
9963 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9964 	if (status) {
9965 		dev_err(&hdev->pdev->dev,
9966 			"Send rxvlan protocol type command fail, ret =%d\n",
9967 			status);
9968 		return status;
9969 	}
9970 
9971 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9972 
9973 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9974 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9975 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9976 
9977 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9978 	if (status)
9979 		dev_err(&hdev->pdev->dev,
9980 			"Send txvlan protocol type command fail, ret =%d\n",
9981 			status);
9982 
9983 	return status;
9984 }
9985 
9986 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9987 {
9988 #define HCLGE_DEF_VLAN_TYPE		0x8100
9989 
9990 	struct hnae3_handle *handle = &hdev->vport[0].nic;
9991 	struct hclge_vport *vport;
9992 	int ret;
9993 	int i;
9994 
9995 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9996 		/* for revision 0x21, vf vlan filter is per function */
9997 		for (i = 0; i < hdev->num_alloc_vport; i++) {
9998 			vport = &hdev->vport[i];
9999 			ret = hclge_set_vlan_filter_ctrl(hdev,
10000 							 HCLGE_FILTER_TYPE_VF,
10001 							 HCLGE_FILTER_FE_EGRESS,
10002 							 true,
10003 							 vport->vport_id);
10004 			if (ret)
10005 				return ret;
10006 			vport->cur_vlan_fltr_en = true;
10007 		}
10008 
10009 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10010 						 HCLGE_FILTER_FE_INGRESS, true,
10011 						 0);
10012 		if (ret)
10013 			return ret;
10014 	} else {
10015 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10016 						 HCLGE_FILTER_FE_EGRESS_V1_B,
10017 						 true, 0);
10018 		if (ret)
10019 			return ret;
10020 	}
10021 
10022 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10023 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10024 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10025 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10026 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
10027 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
10028 
10029 	ret = hclge_set_vlan_protocol_type(hdev);
10030 	if (ret)
10031 		return ret;
10032 
10033 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10034 		u16 vlan_tag;
10035 		u8 qos;
10036 
10037 		vport = &hdev->vport[i];
10038 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10039 		qos = vport->port_base_vlan_cfg.vlan_info.qos;
10040 
10041 		ret = hclge_vlan_offload_cfg(vport,
10042 					     vport->port_base_vlan_cfg.state,
10043 					     vlan_tag, qos);
10044 		if (ret)
10045 			return ret;
10046 	}
10047 
10048 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10049 }
10050 
10051 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10052 				       bool writen_to_tbl)
10053 {
10054 	struct hclge_vport_vlan_cfg *vlan;
10055 
10056 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10057 	if (!vlan)
10058 		return;
10059 
10060 	vlan->hd_tbl_status = writen_to_tbl;
10061 	vlan->vlan_id = vlan_id;
10062 
10063 	list_add_tail(&vlan->node, &vport->vlan_list);
10064 }
10065 
10066 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10067 {
10068 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10069 	struct hclge_dev *hdev = vport->back;
10070 	int ret;
10071 
10072 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10073 		if (!vlan->hd_tbl_status) {
10074 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10075 						       vport->vport_id,
10076 						       vlan->vlan_id, false);
10077 			if (ret) {
10078 				dev_err(&hdev->pdev->dev,
10079 					"restore vport vlan list failed, ret=%d\n",
10080 					ret);
10081 				return ret;
10082 			}
10083 		}
10084 		vlan->hd_tbl_status = true;
10085 	}
10086 
10087 	return 0;
10088 }
10089 
10090 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10091 				      bool is_write_tbl)
10092 {
10093 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10094 	struct hclge_dev *hdev = vport->back;
10095 
10096 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10097 		if (vlan->vlan_id == vlan_id) {
10098 			if (is_write_tbl && vlan->hd_tbl_status)
10099 				hclge_set_vlan_filter_hw(hdev,
10100 							 htons(ETH_P_8021Q),
10101 							 vport->vport_id,
10102 							 vlan_id,
10103 							 true);
10104 
10105 			list_del(&vlan->node);
10106 			kfree(vlan);
10107 			break;
10108 		}
10109 	}
10110 }
10111 
10112 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10113 {
10114 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10115 	struct hclge_dev *hdev = vport->back;
10116 
10117 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10118 		if (vlan->hd_tbl_status)
10119 			hclge_set_vlan_filter_hw(hdev,
10120 						 htons(ETH_P_8021Q),
10121 						 vport->vport_id,
10122 						 vlan->vlan_id,
10123 						 true);
10124 
10125 		vlan->hd_tbl_status = false;
10126 		if (is_del_list) {
10127 			list_del(&vlan->node);
10128 			kfree(vlan);
10129 		}
10130 	}
10131 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
10132 }
10133 
10134 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10135 {
10136 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10137 	struct hclge_vport *vport;
10138 	int i;
10139 
10140 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10141 		vport = &hdev->vport[i];
10142 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10143 			list_del(&vlan->node);
10144 			kfree(vlan);
10145 		}
10146 	}
10147 }
10148 
10149 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10150 {
10151 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10152 	struct hclge_dev *hdev = vport->back;
10153 	u16 vlan_proto;
10154 	u16 vlan_id;
10155 	u16 state;
10156 	int ret;
10157 
10158 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10159 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10160 	state = vport->port_base_vlan_cfg.state;
10161 
10162 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10163 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10164 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10165 					 vport->vport_id, vlan_id,
10166 					 false);
10167 		return;
10168 	}
10169 
10170 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10171 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10172 					       vport->vport_id,
10173 					       vlan->vlan_id, false);
10174 		if (ret)
10175 			break;
10176 		vlan->hd_tbl_status = true;
10177 	}
10178 }
10179 
10180 /* For global reset and imp reset, hardware will clear the mac table,
10181  * so we change the mac address state from ACTIVE to TO_ADD, then they
10182  * can be restored in the service task after reset complete. Furtherly,
10183  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10184  * be restored after reset, so just remove these mac nodes from mac_list.
10185  */
10186 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10187 {
10188 	struct hclge_mac_node *mac_node, *tmp;
10189 
10190 	list_for_each_entry_safe(mac_node, tmp, list, node) {
10191 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
10192 			mac_node->state = HCLGE_MAC_TO_ADD;
10193 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10194 			list_del(&mac_node->node);
10195 			kfree(mac_node);
10196 		}
10197 	}
10198 }
10199 
10200 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10201 {
10202 	spin_lock_bh(&vport->mac_list_lock);
10203 
10204 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10205 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10206 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10207 
10208 	spin_unlock_bh(&vport->mac_list_lock);
10209 }
10210 
10211 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10212 {
10213 	struct hclge_vport *vport = &hdev->vport[0];
10214 	struct hnae3_handle *handle = &vport->nic;
10215 
10216 	hclge_restore_mac_table_common(vport);
10217 	hclge_restore_vport_vlan_table(vport);
10218 	set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10219 	hclge_restore_fd_entries(handle);
10220 }
10221 
10222 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10223 {
10224 	struct hclge_vport *vport = hclge_get_vport(handle);
10225 
10226 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10227 		vport->rxvlan_cfg.strip_tag1_en = false;
10228 		vport->rxvlan_cfg.strip_tag2_en = enable;
10229 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
10230 	} else {
10231 		vport->rxvlan_cfg.strip_tag1_en = enable;
10232 		vport->rxvlan_cfg.strip_tag2_en = true;
10233 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
10234 	}
10235 
10236 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
10237 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10238 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10239 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10240 
10241 	return hclge_set_vlan_rx_offload_cfg(vport);
10242 }
10243 
10244 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10245 {
10246 	struct hclge_dev *hdev = vport->back;
10247 
10248 	if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10249 		set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10250 }
10251 
10252 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10253 					    u16 port_base_vlan_state,
10254 					    struct hclge_vlan_info *new_info,
10255 					    struct hclge_vlan_info *old_info)
10256 {
10257 	struct hclge_dev *hdev = vport->back;
10258 	int ret;
10259 
10260 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10261 		hclge_rm_vport_all_vlan_table(vport, false);
10262 		/* force clear VLAN 0 */
10263 		ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10264 		if (ret)
10265 			return ret;
10266 		return hclge_set_vlan_filter_hw(hdev,
10267 						 htons(new_info->vlan_proto),
10268 						 vport->vport_id,
10269 						 new_info->vlan_tag,
10270 						 false);
10271 	}
10272 
10273 	/* force add VLAN 0 */
10274 	ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10275 	if (ret)
10276 		return ret;
10277 
10278 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10279 				       vport->vport_id, old_info->vlan_tag,
10280 				       true);
10281 	if (ret)
10282 		return ret;
10283 
10284 	return hclge_add_vport_all_vlan_table(vport);
10285 }
10286 
10287 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10288 					  const struct hclge_vlan_info *old_cfg)
10289 {
10290 	if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10291 		return true;
10292 
10293 	if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10294 		return true;
10295 
10296 	return false;
10297 }
10298 
10299 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10300 				    struct hclge_vlan_info *vlan_info)
10301 {
10302 	struct hnae3_handle *nic = &vport->nic;
10303 	struct hclge_vlan_info *old_vlan_info;
10304 	struct hclge_dev *hdev = vport->back;
10305 	int ret;
10306 
10307 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10308 
10309 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10310 				     vlan_info->qos);
10311 	if (ret)
10312 		return ret;
10313 
10314 	if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10315 		goto out;
10316 
10317 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10318 		/* add new VLAN tag */
10319 		ret = hclge_set_vlan_filter_hw(hdev,
10320 					       htons(vlan_info->vlan_proto),
10321 					       vport->vport_id,
10322 					       vlan_info->vlan_tag,
10323 					       false);
10324 		if (ret)
10325 			return ret;
10326 
10327 		/* remove old VLAN tag */
10328 		if (old_vlan_info->vlan_tag == 0)
10329 			ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10330 						       true, 0);
10331 		else
10332 			ret = hclge_set_vlan_filter_hw(hdev,
10333 						       htons(ETH_P_8021Q),
10334 						       vport->vport_id,
10335 						       old_vlan_info->vlan_tag,
10336 						       true);
10337 		if (ret) {
10338 			dev_err(&hdev->pdev->dev,
10339 				"failed to clear vport%u port base vlan %u, ret = %d.\n",
10340 				vport->vport_id, old_vlan_info->vlan_tag, ret);
10341 			return ret;
10342 		}
10343 
10344 		goto out;
10345 	}
10346 
10347 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10348 					       old_vlan_info);
10349 	if (ret)
10350 		return ret;
10351 
10352 out:
10353 	vport->port_base_vlan_cfg.state = state;
10354 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10355 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10356 	else
10357 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10358 
10359 	vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10360 	hclge_set_vport_vlan_fltr_change(vport);
10361 
10362 	return 0;
10363 }
10364 
10365 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10366 					  enum hnae3_port_base_vlan_state state,
10367 					  u16 vlan, u8 qos)
10368 {
10369 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10370 		if (!vlan && !qos)
10371 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10372 
10373 		return HNAE3_PORT_BASE_VLAN_ENABLE;
10374 	}
10375 
10376 	if (!vlan && !qos)
10377 		return HNAE3_PORT_BASE_VLAN_DISABLE;
10378 
10379 	if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10380 	    vport->port_base_vlan_cfg.vlan_info.qos == qos)
10381 		return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10382 
10383 	return HNAE3_PORT_BASE_VLAN_MODIFY;
10384 }
10385 
10386 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10387 				    u16 vlan, u8 qos, __be16 proto)
10388 {
10389 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10390 	struct hclge_vport *vport = hclge_get_vport(handle);
10391 	struct hclge_dev *hdev = vport->back;
10392 	struct hclge_vlan_info vlan_info;
10393 	u16 state;
10394 	int ret;
10395 
10396 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10397 		return -EOPNOTSUPP;
10398 
10399 	vport = hclge_get_vf_vport(hdev, vfid);
10400 	if (!vport)
10401 		return -EINVAL;
10402 
10403 	/* qos is a 3 bits value, so can not be bigger than 7 */
10404 	if (vlan > VLAN_N_VID - 1 || qos > 7)
10405 		return -EINVAL;
10406 	if (proto != htons(ETH_P_8021Q))
10407 		return -EPROTONOSUPPORT;
10408 
10409 	state = hclge_get_port_base_vlan_state(vport,
10410 					       vport->port_base_vlan_cfg.state,
10411 					       vlan, qos);
10412 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10413 		return 0;
10414 
10415 	vlan_info.vlan_tag = vlan;
10416 	vlan_info.qos = qos;
10417 	vlan_info.vlan_proto = ntohs(proto);
10418 
10419 	ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10420 	if (ret) {
10421 		dev_err(&hdev->pdev->dev,
10422 			"failed to update port base vlan for vf %d, ret = %d\n",
10423 			vfid, ret);
10424 		return ret;
10425 	}
10426 
10427 	/* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10428 	 * VLAN state.
10429 	 */
10430 	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10431 	    test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10432 		hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10433 						  vport->vport_id, state,
10434 						  &vlan_info);
10435 
10436 	return 0;
10437 }
10438 
10439 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10440 {
10441 	struct hclge_vlan_info *vlan_info;
10442 	struct hclge_vport *vport;
10443 	int ret;
10444 	int vf;
10445 
10446 	/* clear port base vlan for all vf */
10447 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10448 		vport = &hdev->vport[vf];
10449 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10450 
10451 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10452 					       vport->vport_id,
10453 					       vlan_info->vlan_tag, true);
10454 		if (ret)
10455 			dev_err(&hdev->pdev->dev,
10456 				"failed to clear vf vlan for vf%d, ret = %d\n",
10457 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10458 	}
10459 }
10460 
10461 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10462 			  u16 vlan_id, bool is_kill)
10463 {
10464 	struct hclge_vport *vport = hclge_get_vport(handle);
10465 	struct hclge_dev *hdev = vport->back;
10466 	bool writen_to_tbl = false;
10467 	int ret = 0;
10468 
10469 	/* When device is resetting or reset failed, firmware is unable to
10470 	 * handle mailbox. Just record the vlan id, and remove it after
10471 	 * reset finished.
10472 	 */
10473 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10474 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10475 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10476 		return -EBUSY;
10477 	}
10478 
10479 	/* when port base vlan enabled, we use port base vlan as the vlan
10480 	 * filter entry. In this case, we don't update vlan filter table
10481 	 * when user add new vlan or remove exist vlan, just update the vport
10482 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
10483 	 * table until port base vlan disabled
10484 	 */
10485 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10486 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10487 					       vlan_id, is_kill);
10488 		writen_to_tbl = true;
10489 	}
10490 
10491 	if (!ret) {
10492 		if (is_kill)
10493 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10494 		else
10495 			hclge_add_vport_vlan_table(vport, vlan_id,
10496 						   writen_to_tbl);
10497 	} else if (is_kill) {
10498 		/* when remove hw vlan filter failed, record the vlan id,
10499 		 * and try to remove it from hw later, to be consistence
10500 		 * with stack
10501 		 */
10502 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10503 	}
10504 
10505 	hclge_set_vport_vlan_fltr_change(vport);
10506 
10507 	return ret;
10508 }
10509 
10510 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10511 {
10512 	struct hclge_vport *vport;
10513 	int ret;
10514 	u16 i;
10515 
10516 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10517 		vport = &hdev->vport[i];
10518 		if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10519 					&vport->state))
10520 			continue;
10521 
10522 		ret = hclge_enable_vport_vlan_filter(vport,
10523 						     vport->req_vlan_fltr_en);
10524 		if (ret) {
10525 			dev_err(&hdev->pdev->dev,
10526 				"failed to sync vlan filter state for vport%u, ret = %d\n",
10527 				vport->vport_id, ret);
10528 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10529 				&vport->state);
10530 			return;
10531 		}
10532 	}
10533 }
10534 
10535 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10536 {
10537 #define HCLGE_MAX_SYNC_COUNT	60
10538 
10539 	int i, ret, sync_cnt = 0;
10540 	u16 vlan_id;
10541 
10542 	/* start from vport 1 for PF is always alive */
10543 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10544 		struct hclge_vport *vport = &hdev->vport[i];
10545 
10546 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10547 					 VLAN_N_VID);
10548 		while (vlan_id != VLAN_N_VID) {
10549 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10550 						       vport->vport_id, vlan_id,
10551 						       true);
10552 			if (ret && ret != -EINVAL)
10553 				return;
10554 
10555 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10556 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10557 			hclge_set_vport_vlan_fltr_change(vport);
10558 
10559 			sync_cnt++;
10560 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10561 				return;
10562 
10563 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10564 						 VLAN_N_VID);
10565 		}
10566 	}
10567 
10568 	hclge_sync_vlan_fltr_state(hdev);
10569 }
10570 
10571 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10572 {
10573 	struct hclge_config_max_frm_size_cmd *req;
10574 	struct hclge_desc desc;
10575 
10576 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10577 
10578 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10579 	req->max_frm_size = cpu_to_le16(new_mps);
10580 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10581 
10582 	return hclge_cmd_send(&hdev->hw, &desc, 1);
10583 }
10584 
10585 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10586 {
10587 	struct hclge_vport *vport = hclge_get_vport(handle);
10588 
10589 	return hclge_set_vport_mtu(vport, new_mtu);
10590 }
10591 
10592 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10593 {
10594 	struct hclge_dev *hdev = vport->back;
10595 	int i, max_frm_size, ret;
10596 
10597 	/* HW supprt 2 layer vlan */
10598 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10599 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10600 	    max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10601 		return -EINVAL;
10602 
10603 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10604 	mutex_lock(&hdev->vport_lock);
10605 	/* VF's mps must fit within hdev->mps */
10606 	if (vport->vport_id && max_frm_size > hdev->mps) {
10607 		mutex_unlock(&hdev->vport_lock);
10608 		return -EINVAL;
10609 	} else if (vport->vport_id) {
10610 		vport->mps = max_frm_size;
10611 		mutex_unlock(&hdev->vport_lock);
10612 		return 0;
10613 	}
10614 
10615 	/* PF's mps must be greater then VF's mps */
10616 	for (i = 1; i < hdev->num_alloc_vport; i++)
10617 		if (max_frm_size < hdev->vport[i].mps) {
10618 			mutex_unlock(&hdev->vport_lock);
10619 			return -EINVAL;
10620 		}
10621 
10622 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10623 
10624 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
10625 	if (ret) {
10626 		dev_err(&hdev->pdev->dev,
10627 			"Change mtu fail, ret =%d\n", ret);
10628 		goto out;
10629 	}
10630 
10631 	hdev->mps = max_frm_size;
10632 	vport->mps = max_frm_size;
10633 
10634 	ret = hclge_buffer_alloc(hdev);
10635 	if (ret)
10636 		dev_err(&hdev->pdev->dev,
10637 			"Allocate buffer fail, ret =%d\n", ret);
10638 
10639 out:
10640 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10641 	mutex_unlock(&hdev->vport_lock);
10642 	return ret;
10643 }
10644 
10645 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10646 				    bool enable)
10647 {
10648 	struct hclge_reset_tqp_queue_cmd *req;
10649 	struct hclge_desc desc;
10650 	int ret;
10651 
10652 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10653 
10654 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10655 	req->tqp_id = cpu_to_le16(queue_id);
10656 	if (enable)
10657 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10658 
10659 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10660 	if (ret) {
10661 		dev_err(&hdev->pdev->dev,
10662 			"Send tqp reset cmd error, status =%d\n", ret);
10663 		return ret;
10664 	}
10665 
10666 	return 0;
10667 }
10668 
10669 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10670 {
10671 	struct hclge_reset_tqp_queue_cmd *req;
10672 	struct hclge_desc desc;
10673 	int ret;
10674 
10675 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10676 
10677 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10678 	req->tqp_id = cpu_to_le16(queue_id);
10679 
10680 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10681 	if (ret) {
10682 		dev_err(&hdev->pdev->dev,
10683 			"Get reset status error, status =%d\n", ret);
10684 		return ret;
10685 	}
10686 
10687 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10688 }
10689 
10690 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10691 {
10692 	struct hnae3_queue *queue;
10693 	struct hclge_tqp *tqp;
10694 
10695 	queue = handle->kinfo.tqp[queue_id];
10696 	tqp = container_of(queue, struct hclge_tqp, q);
10697 
10698 	return tqp->index;
10699 }
10700 
10701 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10702 {
10703 	struct hclge_vport *vport = hclge_get_vport(handle);
10704 	struct hclge_dev *hdev = vport->back;
10705 	u16 reset_try_times = 0;
10706 	int reset_status;
10707 	u16 queue_gid;
10708 	int ret;
10709 	u16 i;
10710 
10711 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
10712 		queue_gid = hclge_covert_handle_qid_global(handle, i);
10713 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10714 		if (ret) {
10715 			dev_err(&hdev->pdev->dev,
10716 				"failed to send reset tqp cmd, ret = %d\n",
10717 				ret);
10718 			return ret;
10719 		}
10720 
10721 		while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10722 			reset_status = hclge_get_reset_status(hdev, queue_gid);
10723 			if (reset_status)
10724 				break;
10725 
10726 			/* Wait for tqp hw reset */
10727 			usleep_range(1000, 1200);
10728 		}
10729 
10730 		if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10731 			dev_err(&hdev->pdev->dev,
10732 				"wait for tqp hw reset timeout\n");
10733 			return -ETIME;
10734 		}
10735 
10736 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10737 		if (ret) {
10738 			dev_err(&hdev->pdev->dev,
10739 				"failed to deassert soft reset, ret = %d\n",
10740 				ret);
10741 			return ret;
10742 		}
10743 		reset_try_times = 0;
10744 	}
10745 	return 0;
10746 }
10747 
10748 static int hclge_reset_rcb(struct hnae3_handle *handle)
10749 {
10750 #define HCLGE_RESET_RCB_NOT_SUPPORT	0U
10751 #define HCLGE_RESET_RCB_SUCCESS		1U
10752 
10753 	struct hclge_vport *vport = hclge_get_vport(handle);
10754 	struct hclge_dev *hdev = vport->back;
10755 	struct hclge_reset_cmd *req;
10756 	struct hclge_desc desc;
10757 	u8 return_status;
10758 	u16 queue_gid;
10759 	int ret;
10760 
10761 	queue_gid = hclge_covert_handle_qid_global(handle, 0);
10762 
10763 	req = (struct hclge_reset_cmd *)desc.data;
10764 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10765 	hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10766 	req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10767 	req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10768 
10769 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10770 	if (ret) {
10771 		dev_err(&hdev->pdev->dev,
10772 			"failed to send rcb reset cmd, ret = %d\n", ret);
10773 		return ret;
10774 	}
10775 
10776 	return_status = req->fun_reset_rcb_return_status;
10777 	if (return_status == HCLGE_RESET_RCB_SUCCESS)
10778 		return 0;
10779 
10780 	if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10781 		dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10782 			return_status);
10783 		return -EIO;
10784 	}
10785 
10786 	/* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10787 	 * again to reset all tqps
10788 	 */
10789 	return hclge_reset_tqp_cmd(handle);
10790 }
10791 
10792 int hclge_reset_tqp(struct hnae3_handle *handle)
10793 {
10794 	struct hclge_vport *vport = hclge_get_vport(handle);
10795 	struct hclge_dev *hdev = vport->back;
10796 	int ret;
10797 
10798 	/* only need to disable PF's tqp */
10799 	if (!vport->vport_id) {
10800 		ret = hclge_tqp_enable(handle, false);
10801 		if (ret) {
10802 			dev_err(&hdev->pdev->dev,
10803 				"failed to disable tqp, ret = %d\n", ret);
10804 			return ret;
10805 		}
10806 	}
10807 
10808 	return hclge_reset_rcb(handle);
10809 }
10810 
10811 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10812 {
10813 	struct hclge_vport *vport = hclge_get_vport(handle);
10814 	struct hclge_dev *hdev = vport->back;
10815 
10816 	return hdev->fw_version;
10817 }
10818 
10819 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10820 {
10821 	struct phy_device *phydev = hdev->hw.mac.phydev;
10822 
10823 	if (!phydev)
10824 		return;
10825 
10826 	phy_set_asym_pause(phydev, rx_en, tx_en);
10827 }
10828 
10829 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10830 {
10831 	int ret;
10832 
10833 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10834 		return 0;
10835 
10836 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10837 	if (ret)
10838 		dev_err(&hdev->pdev->dev,
10839 			"configure pauseparam error, ret = %d.\n", ret);
10840 
10841 	return ret;
10842 }
10843 
10844 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10845 {
10846 	struct phy_device *phydev = hdev->hw.mac.phydev;
10847 	u16 remote_advertising = 0;
10848 	u16 local_advertising;
10849 	u32 rx_pause, tx_pause;
10850 	u8 flowctl;
10851 
10852 	if (!phydev->link || !phydev->autoneg)
10853 		return 0;
10854 
10855 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10856 
10857 	if (phydev->pause)
10858 		remote_advertising = LPA_PAUSE_CAP;
10859 
10860 	if (phydev->asym_pause)
10861 		remote_advertising |= LPA_PAUSE_ASYM;
10862 
10863 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10864 					   remote_advertising);
10865 	tx_pause = flowctl & FLOW_CTRL_TX;
10866 	rx_pause = flowctl & FLOW_CTRL_RX;
10867 
10868 	if (phydev->duplex == HCLGE_MAC_HALF) {
10869 		tx_pause = 0;
10870 		rx_pause = 0;
10871 	}
10872 
10873 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10874 }
10875 
10876 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10877 				 u32 *rx_en, u32 *tx_en)
10878 {
10879 	struct hclge_vport *vport = hclge_get_vport(handle);
10880 	struct hclge_dev *hdev = vport->back;
10881 	u8 media_type = hdev->hw.mac.media_type;
10882 
10883 	*auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10884 		    hclge_get_autoneg(handle) : 0;
10885 
10886 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10887 		*rx_en = 0;
10888 		*tx_en = 0;
10889 		return;
10890 	}
10891 
10892 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10893 		*rx_en = 1;
10894 		*tx_en = 0;
10895 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10896 		*tx_en = 1;
10897 		*rx_en = 0;
10898 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10899 		*rx_en = 1;
10900 		*tx_en = 1;
10901 	} else {
10902 		*rx_en = 0;
10903 		*tx_en = 0;
10904 	}
10905 }
10906 
10907 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10908 					 u32 rx_en, u32 tx_en)
10909 {
10910 	if (rx_en && tx_en)
10911 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
10912 	else if (rx_en && !tx_en)
10913 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10914 	else if (!rx_en && tx_en)
10915 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10916 	else
10917 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
10918 
10919 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10920 }
10921 
10922 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10923 				u32 rx_en, u32 tx_en)
10924 {
10925 	struct hclge_vport *vport = hclge_get_vport(handle);
10926 	struct hclge_dev *hdev = vport->back;
10927 	struct phy_device *phydev = hdev->hw.mac.phydev;
10928 	u32 fc_autoneg;
10929 
10930 	if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10931 		fc_autoneg = hclge_get_autoneg(handle);
10932 		if (auto_neg != fc_autoneg) {
10933 			dev_info(&hdev->pdev->dev,
10934 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10935 			return -EOPNOTSUPP;
10936 		}
10937 	}
10938 
10939 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10940 		dev_info(&hdev->pdev->dev,
10941 			 "Priority flow control enabled. Cannot set link flow control.\n");
10942 		return -EOPNOTSUPP;
10943 	}
10944 
10945 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10946 
10947 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10948 
10949 	if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10950 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10951 
10952 	if (phydev)
10953 		return phy_start_aneg(phydev);
10954 
10955 	return -EOPNOTSUPP;
10956 }
10957 
10958 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10959 					  u8 *auto_neg, u32 *speed, u8 *duplex)
10960 {
10961 	struct hclge_vport *vport = hclge_get_vport(handle);
10962 	struct hclge_dev *hdev = vport->back;
10963 
10964 	if (speed)
10965 		*speed = hdev->hw.mac.speed;
10966 	if (duplex)
10967 		*duplex = hdev->hw.mac.duplex;
10968 	if (auto_neg)
10969 		*auto_neg = hdev->hw.mac.autoneg;
10970 }
10971 
10972 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10973 				 u8 *module_type)
10974 {
10975 	struct hclge_vport *vport = hclge_get_vport(handle);
10976 	struct hclge_dev *hdev = vport->back;
10977 
10978 	/* When nic is down, the service task is not running, doesn't update
10979 	 * the port information per second. Query the port information before
10980 	 * return the media type, ensure getting the correct media information.
10981 	 */
10982 	hclge_update_port_info(hdev);
10983 
10984 	if (media_type)
10985 		*media_type = hdev->hw.mac.media_type;
10986 
10987 	if (module_type)
10988 		*module_type = hdev->hw.mac.module_type;
10989 }
10990 
10991 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10992 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
10993 {
10994 	struct hclge_vport *vport = hclge_get_vport(handle);
10995 	struct hclge_dev *hdev = vport->back;
10996 	struct phy_device *phydev = hdev->hw.mac.phydev;
10997 	int mdix_ctrl, mdix, is_resolved;
10998 	unsigned int retval;
10999 
11000 	if (!phydev) {
11001 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11002 		*tp_mdix = ETH_TP_MDI_INVALID;
11003 		return;
11004 	}
11005 
11006 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11007 
11008 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11009 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11010 				    HCLGE_PHY_MDIX_CTRL_S);
11011 
11012 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11013 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11014 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11015 
11016 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11017 
11018 	switch (mdix_ctrl) {
11019 	case 0x0:
11020 		*tp_mdix_ctrl = ETH_TP_MDI;
11021 		break;
11022 	case 0x1:
11023 		*tp_mdix_ctrl = ETH_TP_MDI_X;
11024 		break;
11025 	case 0x3:
11026 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11027 		break;
11028 	default:
11029 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11030 		break;
11031 	}
11032 
11033 	if (!is_resolved)
11034 		*tp_mdix = ETH_TP_MDI_INVALID;
11035 	else if (mdix)
11036 		*tp_mdix = ETH_TP_MDI_X;
11037 	else
11038 		*tp_mdix = ETH_TP_MDI;
11039 }
11040 
11041 static void hclge_info_show(struct hclge_dev *hdev)
11042 {
11043 	struct device *dev = &hdev->pdev->dev;
11044 
11045 	dev_info(dev, "PF info begin:\n");
11046 
11047 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11048 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11049 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11050 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11051 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11052 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11053 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11054 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11055 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11056 	dev_info(dev, "This is %s PF\n",
11057 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11058 	dev_info(dev, "DCB %s\n",
11059 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11060 	dev_info(dev, "MQPRIO %s\n",
11061 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11062 
11063 	dev_info(dev, "PF info end.\n");
11064 }
11065 
11066 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11067 					  struct hclge_vport *vport)
11068 {
11069 	struct hnae3_client *client = vport->nic.client;
11070 	struct hclge_dev *hdev = ae_dev->priv;
11071 	int rst_cnt = hdev->rst_stats.reset_cnt;
11072 	int ret;
11073 
11074 	ret = client->ops->init_instance(&vport->nic);
11075 	if (ret)
11076 		return ret;
11077 
11078 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11079 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11080 	    rst_cnt != hdev->rst_stats.reset_cnt) {
11081 		ret = -EBUSY;
11082 		goto init_nic_err;
11083 	}
11084 
11085 	/* Enable nic hw error interrupts */
11086 	ret = hclge_config_nic_hw_error(hdev, true);
11087 	if (ret) {
11088 		dev_err(&ae_dev->pdev->dev,
11089 			"fail(%d) to enable hw error interrupts\n", ret);
11090 		goto init_nic_err;
11091 	}
11092 
11093 	hnae3_set_client_init_flag(client, ae_dev, 1);
11094 
11095 	if (netif_msg_drv(&hdev->vport->nic))
11096 		hclge_info_show(hdev);
11097 
11098 	return ret;
11099 
11100 init_nic_err:
11101 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11102 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11103 		msleep(HCLGE_WAIT_RESET_DONE);
11104 
11105 	client->ops->uninit_instance(&vport->nic, 0);
11106 
11107 	return ret;
11108 }
11109 
11110 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11111 					   struct hclge_vport *vport)
11112 {
11113 	struct hclge_dev *hdev = ae_dev->priv;
11114 	struct hnae3_client *client;
11115 	int rst_cnt;
11116 	int ret;
11117 
11118 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11119 	    !hdev->nic_client)
11120 		return 0;
11121 
11122 	client = hdev->roce_client;
11123 	ret = hclge_init_roce_base_info(vport);
11124 	if (ret)
11125 		return ret;
11126 
11127 	rst_cnt = hdev->rst_stats.reset_cnt;
11128 	ret = client->ops->init_instance(&vport->roce);
11129 	if (ret)
11130 		return ret;
11131 
11132 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11133 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11134 	    rst_cnt != hdev->rst_stats.reset_cnt) {
11135 		ret = -EBUSY;
11136 		goto init_roce_err;
11137 	}
11138 
11139 	/* Enable roce ras interrupts */
11140 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
11141 	if (ret) {
11142 		dev_err(&ae_dev->pdev->dev,
11143 			"fail(%d) to enable roce ras interrupts\n", ret);
11144 		goto init_roce_err;
11145 	}
11146 
11147 	hnae3_set_client_init_flag(client, ae_dev, 1);
11148 
11149 	return 0;
11150 
11151 init_roce_err:
11152 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11153 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11154 		msleep(HCLGE_WAIT_RESET_DONE);
11155 
11156 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11157 
11158 	return ret;
11159 }
11160 
11161 static int hclge_init_client_instance(struct hnae3_client *client,
11162 				      struct hnae3_ae_dev *ae_dev)
11163 {
11164 	struct hclge_dev *hdev = ae_dev->priv;
11165 	struct hclge_vport *vport = &hdev->vport[0];
11166 	int ret;
11167 
11168 	switch (client->type) {
11169 	case HNAE3_CLIENT_KNIC:
11170 		hdev->nic_client = client;
11171 		vport->nic.client = client;
11172 		ret = hclge_init_nic_client_instance(ae_dev, vport);
11173 		if (ret)
11174 			goto clear_nic;
11175 
11176 		ret = hclge_init_roce_client_instance(ae_dev, vport);
11177 		if (ret)
11178 			goto clear_roce;
11179 
11180 		break;
11181 	case HNAE3_CLIENT_ROCE:
11182 		if (hnae3_dev_roce_supported(hdev)) {
11183 			hdev->roce_client = client;
11184 			vport->roce.client = client;
11185 		}
11186 
11187 		ret = hclge_init_roce_client_instance(ae_dev, vport);
11188 		if (ret)
11189 			goto clear_roce;
11190 
11191 		break;
11192 	default:
11193 		return -EINVAL;
11194 	}
11195 
11196 	return 0;
11197 
11198 clear_nic:
11199 	hdev->nic_client = NULL;
11200 	vport->nic.client = NULL;
11201 	return ret;
11202 clear_roce:
11203 	hdev->roce_client = NULL;
11204 	vport->roce.client = NULL;
11205 	return ret;
11206 }
11207 
11208 static void hclge_uninit_client_instance(struct hnae3_client *client,
11209 					 struct hnae3_ae_dev *ae_dev)
11210 {
11211 	struct hclge_dev *hdev = ae_dev->priv;
11212 	struct hclge_vport *vport = &hdev->vport[0];
11213 
11214 	if (hdev->roce_client) {
11215 		clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11216 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11217 			msleep(HCLGE_WAIT_RESET_DONE);
11218 
11219 		hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11220 		hdev->roce_client = NULL;
11221 		vport->roce.client = NULL;
11222 	}
11223 	if (client->type == HNAE3_CLIENT_ROCE)
11224 		return;
11225 	if (hdev->nic_client && client->ops->uninit_instance) {
11226 		clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11227 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11228 			msleep(HCLGE_WAIT_RESET_DONE);
11229 
11230 		client->ops->uninit_instance(&vport->nic, 0);
11231 		hdev->nic_client = NULL;
11232 		vport->nic.client = NULL;
11233 	}
11234 }
11235 
11236 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11237 {
11238 #define HCLGE_MEM_BAR		4
11239 
11240 	struct pci_dev *pdev = hdev->pdev;
11241 	struct hclge_hw *hw = &hdev->hw;
11242 
11243 	/* for device does not have device memory, return directly */
11244 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11245 		return 0;
11246 
11247 	hw->mem_base = devm_ioremap_wc(&pdev->dev,
11248 				       pci_resource_start(pdev, HCLGE_MEM_BAR),
11249 				       pci_resource_len(pdev, HCLGE_MEM_BAR));
11250 	if (!hw->mem_base) {
11251 		dev_err(&pdev->dev, "failed to map device memory\n");
11252 		return -EFAULT;
11253 	}
11254 
11255 	return 0;
11256 }
11257 
11258 static int hclge_pci_init(struct hclge_dev *hdev)
11259 {
11260 	struct pci_dev *pdev = hdev->pdev;
11261 	struct hclge_hw *hw;
11262 	int ret;
11263 
11264 	ret = pci_enable_device(pdev);
11265 	if (ret) {
11266 		dev_err(&pdev->dev, "failed to enable PCI device\n");
11267 		return ret;
11268 	}
11269 
11270 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11271 	if (ret) {
11272 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11273 		if (ret) {
11274 			dev_err(&pdev->dev,
11275 				"can't set consistent PCI DMA");
11276 			goto err_disable_device;
11277 		}
11278 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11279 	}
11280 
11281 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11282 	if (ret) {
11283 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11284 		goto err_disable_device;
11285 	}
11286 
11287 	pci_set_master(pdev);
11288 	hw = &hdev->hw;
11289 	hw->io_base = pcim_iomap(pdev, 2, 0);
11290 	if (!hw->io_base) {
11291 		dev_err(&pdev->dev, "Can't map configuration register space\n");
11292 		ret = -ENOMEM;
11293 		goto err_clr_master;
11294 	}
11295 
11296 	ret = hclge_dev_mem_map(hdev);
11297 	if (ret)
11298 		goto err_unmap_io_base;
11299 
11300 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11301 
11302 	return 0;
11303 
11304 err_unmap_io_base:
11305 	pcim_iounmap(pdev, hdev->hw.io_base);
11306 err_clr_master:
11307 	pci_clear_master(pdev);
11308 	pci_release_regions(pdev);
11309 err_disable_device:
11310 	pci_disable_device(pdev);
11311 
11312 	return ret;
11313 }
11314 
11315 static void hclge_pci_uninit(struct hclge_dev *hdev)
11316 {
11317 	struct pci_dev *pdev = hdev->pdev;
11318 
11319 	if (hdev->hw.mem_base)
11320 		devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11321 
11322 	pcim_iounmap(pdev, hdev->hw.io_base);
11323 	pci_free_irq_vectors(pdev);
11324 	pci_clear_master(pdev);
11325 	pci_release_mem_regions(pdev);
11326 	pci_disable_device(pdev);
11327 }
11328 
11329 static void hclge_state_init(struct hclge_dev *hdev)
11330 {
11331 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11332 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11333 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11334 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11335 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11336 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11337 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11338 }
11339 
11340 static void hclge_state_uninit(struct hclge_dev *hdev)
11341 {
11342 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11343 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11344 
11345 	if (hdev->reset_timer.function)
11346 		del_timer_sync(&hdev->reset_timer);
11347 	if (hdev->service_task.work.func)
11348 		cancel_delayed_work_sync(&hdev->service_task);
11349 }
11350 
11351 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11352 					enum hnae3_reset_type rst_type)
11353 {
11354 #define HCLGE_RESET_RETRY_WAIT_MS	500
11355 #define HCLGE_RESET_RETRY_CNT	5
11356 
11357 	struct hclge_dev *hdev = ae_dev->priv;
11358 	int retry_cnt = 0;
11359 	int ret;
11360 
11361 retry:
11362 	down(&hdev->reset_sem);
11363 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11364 	hdev->reset_type = rst_type;
11365 	ret = hclge_reset_prepare(hdev);
11366 	if (ret || hdev->reset_pending) {
11367 		dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11368 			ret);
11369 		if (hdev->reset_pending ||
11370 		    retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11371 			dev_err(&hdev->pdev->dev,
11372 				"reset_pending:0x%lx, retry_cnt:%d\n",
11373 				hdev->reset_pending, retry_cnt);
11374 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11375 			up(&hdev->reset_sem);
11376 			msleep(HCLGE_RESET_RETRY_WAIT_MS);
11377 			goto retry;
11378 		}
11379 	}
11380 
11381 	/* disable misc vector before reset done */
11382 	hclge_enable_vector(&hdev->misc_vector, false);
11383 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11384 
11385 	if (hdev->reset_type == HNAE3_FLR_RESET)
11386 		hdev->rst_stats.flr_rst_cnt++;
11387 }
11388 
11389 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11390 {
11391 	struct hclge_dev *hdev = ae_dev->priv;
11392 	int ret;
11393 
11394 	hclge_enable_vector(&hdev->misc_vector, true);
11395 
11396 	ret = hclge_reset_rebuild(hdev);
11397 	if (ret)
11398 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11399 
11400 	hdev->reset_type = HNAE3_NONE_RESET;
11401 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11402 	up(&hdev->reset_sem);
11403 }
11404 
11405 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11406 {
11407 	u16 i;
11408 
11409 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11410 		struct hclge_vport *vport = &hdev->vport[i];
11411 		int ret;
11412 
11413 		 /* Send cmd to clear VF's FUNC_RST_ING */
11414 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11415 		if (ret)
11416 			dev_warn(&hdev->pdev->dev,
11417 				 "clear vf(%u) rst failed %d!\n",
11418 				 vport->vport_id, ret);
11419 	}
11420 }
11421 
11422 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11423 {
11424 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11425 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11426 }
11427 
11428 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11429 {
11430 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11431 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11432 }
11433 
11434 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11435 {
11436 	struct pci_dev *pdev = ae_dev->pdev;
11437 	struct hclge_dev *hdev;
11438 	int ret;
11439 
11440 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11441 	if (!hdev)
11442 		return -ENOMEM;
11443 
11444 	hdev->pdev = pdev;
11445 	hdev->ae_dev = ae_dev;
11446 	hdev->reset_type = HNAE3_NONE_RESET;
11447 	hdev->reset_level = HNAE3_FUNC_RESET;
11448 	ae_dev->priv = hdev;
11449 
11450 	/* HW supprt 2 layer vlan */
11451 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11452 
11453 	mutex_init(&hdev->vport_lock);
11454 	spin_lock_init(&hdev->fd_rule_lock);
11455 	sema_init(&hdev->reset_sem, 1);
11456 
11457 	ret = hclge_pci_init(hdev);
11458 	if (ret)
11459 		goto out;
11460 
11461 	/* Firmware command queue initialize */
11462 	ret = hclge_cmd_queue_init(hdev);
11463 	if (ret)
11464 		goto err_pci_uninit;
11465 
11466 	/* Firmware command initialize */
11467 	ret = hclge_cmd_init(hdev);
11468 	if (ret)
11469 		goto err_cmd_uninit;
11470 
11471 	ret = hclge_get_cap(hdev);
11472 	if (ret)
11473 		goto err_cmd_uninit;
11474 
11475 	ret = hclge_query_dev_specs(hdev);
11476 	if (ret) {
11477 		dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11478 			ret);
11479 		goto err_cmd_uninit;
11480 	}
11481 
11482 	ret = hclge_configure(hdev);
11483 	if (ret) {
11484 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11485 		goto err_cmd_uninit;
11486 	}
11487 
11488 	ret = hclge_init_msi(hdev);
11489 	if (ret) {
11490 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11491 		goto err_cmd_uninit;
11492 	}
11493 
11494 	ret = hclge_misc_irq_init(hdev);
11495 	if (ret)
11496 		goto err_msi_uninit;
11497 
11498 	ret = hclge_alloc_tqps(hdev);
11499 	if (ret) {
11500 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11501 		goto err_msi_irq_uninit;
11502 	}
11503 
11504 	ret = hclge_alloc_vport(hdev);
11505 	if (ret)
11506 		goto err_msi_irq_uninit;
11507 
11508 	ret = hclge_map_tqp(hdev);
11509 	if (ret)
11510 		goto err_msi_irq_uninit;
11511 
11512 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11513 	    !hnae3_dev_phy_imp_supported(hdev)) {
11514 		ret = hclge_mac_mdio_config(hdev);
11515 		if (ret)
11516 			goto err_msi_irq_uninit;
11517 	}
11518 
11519 	ret = hclge_init_umv_space(hdev);
11520 	if (ret)
11521 		goto err_mdiobus_unreg;
11522 
11523 	ret = hclge_mac_init(hdev);
11524 	if (ret) {
11525 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11526 		goto err_mdiobus_unreg;
11527 	}
11528 
11529 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11530 	if (ret) {
11531 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11532 		goto err_mdiobus_unreg;
11533 	}
11534 
11535 	ret = hclge_config_gro(hdev, true);
11536 	if (ret)
11537 		goto err_mdiobus_unreg;
11538 
11539 	ret = hclge_init_vlan_config(hdev);
11540 	if (ret) {
11541 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11542 		goto err_mdiobus_unreg;
11543 	}
11544 
11545 	ret = hclge_tm_schd_init(hdev);
11546 	if (ret) {
11547 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11548 		goto err_mdiobus_unreg;
11549 	}
11550 
11551 	ret = hclge_rss_init_cfg(hdev);
11552 	if (ret) {
11553 		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11554 		goto err_mdiobus_unreg;
11555 	}
11556 
11557 	ret = hclge_rss_init_hw(hdev);
11558 	if (ret) {
11559 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11560 		goto err_mdiobus_unreg;
11561 	}
11562 
11563 	ret = init_mgr_tbl(hdev);
11564 	if (ret) {
11565 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11566 		goto err_mdiobus_unreg;
11567 	}
11568 
11569 	ret = hclge_init_fd_config(hdev);
11570 	if (ret) {
11571 		dev_err(&pdev->dev,
11572 			"fd table init fail, ret=%d\n", ret);
11573 		goto err_mdiobus_unreg;
11574 	}
11575 
11576 	ret = hclge_ptp_init(hdev);
11577 	if (ret)
11578 		goto err_mdiobus_unreg;
11579 
11580 	INIT_KFIFO(hdev->mac_tnl_log);
11581 
11582 	hclge_dcb_ops_set(hdev);
11583 
11584 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11585 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11586 
11587 	/* Setup affinity after service timer setup because add_timer_on
11588 	 * is called in affinity notify.
11589 	 */
11590 	hclge_misc_affinity_setup(hdev);
11591 
11592 	hclge_clear_all_event_cause(hdev);
11593 	hclge_clear_resetting_state(hdev);
11594 
11595 	/* Log and clear the hw errors those already occurred */
11596 	if (hnae3_dev_ras_imp_supported(hdev))
11597 		hclge_handle_occurred_error(hdev);
11598 	else
11599 		hclge_handle_all_hns_hw_errors(ae_dev);
11600 
11601 	/* request delayed reset for the error recovery because an immediate
11602 	 * global reset on a PF affecting pending initialization of other PFs
11603 	 */
11604 	if (ae_dev->hw_err_reset_req) {
11605 		enum hnae3_reset_type reset_level;
11606 
11607 		reset_level = hclge_get_reset_level(ae_dev,
11608 						    &ae_dev->hw_err_reset_req);
11609 		hclge_set_def_reset_request(ae_dev, reset_level);
11610 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11611 	}
11612 
11613 	hclge_init_rxd_adv_layout(hdev);
11614 
11615 	/* Enable MISC vector(vector0) */
11616 	hclge_enable_vector(&hdev->misc_vector, true);
11617 
11618 	hclge_state_init(hdev);
11619 	hdev->last_reset_time = jiffies;
11620 
11621 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11622 		 HCLGE_DRIVER_NAME);
11623 
11624 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11625 
11626 	return 0;
11627 
11628 err_mdiobus_unreg:
11629 	if (hdev->hw.mac.phydev)
11630 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
11631 err_msi_irq_uninit:
11632 	hclge_misc_irq_uninit(hdev);
11633 err_msi_uninit:
11634 	pci_free_irq_vectors(pdev);
11635 err_cmd_uninit:
11636 	hclge_cmd_uninit(hdev);
11637 err_pci_uninit:
11638 	pcim_iounmap(pdev, hdev->hw.io_base);
11639 	pci_clear_master(pdev);
11640 	pci_release_regions(pdev);
11641 	pci_disable_device(pdev);
11642 out:
11643 	mutex_destroy(&hdev->vport_lock);
11644 	return ret;
11645 }
11646 
11647 static void hclge_stats_clear(struct hclge_dev *hdev)
11648 {
11649 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11650 }
11651 
11652 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11653 {
11654 	return hclge_config_switch_param(hdev, vf, enable,
11655 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11656 }
11657 
11658 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11659 {
11660 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11661 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
11662 					  enable, vf);
11663 }
11664 
11665 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11666 {
11667 	int ret;
11668 
11669 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11670 	if (ret) {
11671 		dev_err(&hdev->pdev->dev,
11672 			"Set vf %d mac spoof check %s failed, ret=%d\n",
11673 			vf, enable ? "on" : "off", ret);
11674 		return ret;
11675 	}
11676 
11677 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11678 	if (ret)
11679 		dev_err(&hdev->pdev->dev,
11680 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
11681 			vf, enable ? "on" : "off", ret);
11682 
11683 	return ret;
11684 }
11685 
11686 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11687 				 bool enable)
11688 {
11689 	struct hclge_vport *vport = hclge_get_vport(handle);
11690 	struct hclge_dev *hdev = vport->back;
11691 	u32 new_spoofchk = enable ? 1 : 0;
11692 	int ret;
11693 
11694 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11695 		return -EOPNOTSUPP;
11696 
11697 	vport = hclge_get_vf_vport(hdev, vf);
11698 	if (!vport)
11699 		return -EINVAL;
11700 
11701 	if (vport->vf_info.spoofchk == new_spoofchk)
11702 		return 0;
11703 
11704 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11705 		dev_warn(&hdev->pdev->dev,
11706 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11707 			 vf);
11708 	else if (enable && hclge_is_umv_space_full(vport, true))
11709 		dev_warn(&hdev->pdev->dev,
11710 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11711 			 vf);
11712 
11713 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11714 	if (ret)
11715 		return ret;
11716 
11717 	vport->vf_info.spoofchk = new_spoofchk;
11718 	return 0;
11719 }
11720 
11721 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11722 {
11723 	struct hclge_vport *vport = hdev->vport;
11724 	int ret;
11725 	int i;
11726 
11727 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11728 		return 0;
11729 
11730 	/* resume the vf spoof check state after reset */
11731 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11732 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11733 					       vport->vf_info.spoofchk);
11734 		if (ret)
11735 			return ret;
11736 
11737 		vport++;
11738 	}
11739 
11740 	return 0;
11741 }
11742 
11743 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11744 {
11745 	struct hclge_vport *vport = hclge_get_vport(handle);
11746 	struct hclge_dev *hdev = vport->back;
11747 	u32 new_trusted = enable ? 1 : 0;
11748 
11749 	vport = hclge_get_vf_vport(hdev, vf);
11750 	if (!vport)
11751 		return -EINVAL;
11752 
11753 	if (vport->vf_info.trusted == new_trusted)
11754 		return 0;
11755 
11756 	vport->vf_info.trusted = new_trusted;
11757 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11758 	hclge_task_schedule(hdev, 0);
11759 
11760 	return 0;
11761 }
11762 
11763 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11764 {
11765 	int ret;
11766 	int vf;
11767 
11768 	/* reset vf rate to default value */
11769 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11770 		struct hclge_vport *vport = &hdev->vport[vf];
11771 
11772 		vport->vf_info.max_tx_rate = 0;
11773 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11774 		if (ret)
11775 			dev_err(&hdev->pdev->dev,
11776 				"vf%d failed to reset to default, ret=%d\n",
11777 				vf - HCLGE_VF_VPORT_START_NUM, ret);
11778 	}
11779 }
11780 
11781 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11782 				     int min_tx_rate, int max_tx_rate)
11783 {
11784 	if (min_tx_rate != 0 ||
11785 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11786 		dev_err(&hdev->pdev->dev,
11787 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11788 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11789 		return -EINVAL;
11790 	}
11791 
11792 	return 0;
11793 }
11794 
11795 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11796 			     int min_tx_rate, int max_tx_rate, bool force)
11797 {
11798 	struct hclge_vport *vport = hclge_get_vport(handle);
11799 	struct hclge_dev *hdev = vport->back;
11800 	int ret;
11801 
11802 	ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11803 	if (ret)
11804 		return ret;
11805 
11806 	vport = hclge_get_vf_vport(hdev, vf);
11807 	if (!vport)
11808 		return -EINVAL;
11809 
11810 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11811 		return 0;
11812 
11813 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11814 	if (ret)
11815 		return ret;
11816 
11817 	vport->vf_info.max_tx_rate = max_tx_rate;
11818 
11819 	return 0;
11820 }
11821 
11822 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11823 {
11824 	struct hnae3_handle *handle = &hdev->vport->nic;
11825 	struct hclge_vport *vport;
11826 	int ret;
11827 	int vf;
11828 
11829 	/* resume the vf max_tx_rate after reset */
11830 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11831 		vport = hclge_get_vf_vport(hdev, vf);
11832 		if (!vport)
11833 			return -EINVAL;
11834 
11835 		/* zero means max rate, after reset, firmware already set it to
11836 		 * max rate, so just continue.
11837 		 */
11838 		if (!vport->vf_info.max_tx_rate)
11839 			continue;
11840 
11841 		ret = hclge_set_vf_rate(handle, vf, 0,
11842 					vport->vf_info.max_tx_rate, true);
11843 		if (ret) {
11844 			dev_err(&hdev->pdev->dev,
11845 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
11846 				vf, vport->vf_info.max_tx_rate, ret);
11847 			return ret;
11848 		}
11849 	}
11850 
11851 	return 0;
11852 }
11853 
11854 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11855 {
11856 	struct hclge_vport *vport = hdev->vport;
11857 	int i;
11858 
11859 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11860 		hclge_vport_stop(vport);
11861 		vport++;
11862 	}
11863 }
11864 
11865 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11866 {
11867 	struct hclge_dev *hdev = ae_dev->priv;
11868 	struct pci_dev *pdev = ae_dev->pdev;
11869 	int ret;
11870 
11871 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11872 
11873 	hclge_stats_clear(hdev);
11874 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11875 	 * so here should not clean table in memory.
11876 	 */
11877 	if (hdev->reset_type == HNAE3_IMP_RESET ||
11878 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
11879 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11880 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11881 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11882 		hclge_reset_umv_space(hdev);
11883 	}
11884 
11885 	ret = hclge_cmd_init(hdev);
11886 	if (ret) {
11887 		dev_err(&pdev->dev, "Cmd queue init failed\n");
11888 		return ret;
11889 	}
11890 
11891 	ret = hclge_map_tqp(hdev);
11892 	if (ret) {
11893 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11894 		return ret;
11895 	}
11896 
11897 	ret = hclge_mac_init(hdev);
11898 	if (ret) {
11899 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11900 		return ret;
11901 	}
11902 
11903 	ret = hclge_tp_port_init(hdev);
11904 	if (ret) {
11905 		dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11906 			ret);
11907 		return ret;
11908 	}
11909 
11910 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11911 	if (ret) {
11912 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11913 		return ret;
11914 	}
11915 
11916 	ret = hclge_config_gro(hdev, true);
11917 	if (ret)
11918 		return ret;
11919 
11920 	ret = hclge_init_vlan_config(hdev);
11921 	if (ret) {
11922 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11923 		return ret;
11924 	}
11925 
11926 	ret = hclge_tm_init_hw(hdev, true);
11927 	if (ret) {
11928 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11929 		return ret;
11930 	}
11931 
11932 	ret = hclge_rss_init_hw(hdev);
11933 	if (ret) {
11934 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11935 		return ret;
11936 	}
11937 
11938 	ret = init_mgr_tbl(hdev);
11939 	if (ret) {
11940 		dev_err(&pdev->dev,
11941 			"failed to reinit manager table, ret = %d\n", ret);
11942 		return ret;
11943 	}
11944 
11945 	ret = hclge_init_fd_config(hdev);
11946 	if (ret) {
11947 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11948 		return ret;
11949 	}
11950 
11951 	ret = hclge_ptp_init(hdev);
11952 	if (ret)
11953 		return ret;
11954 
11955 	/* Log and clear the hw errors those already occurred */
11956 	if (hnae3_dev_ras_imp_supported(hdev))
11957 		hclge_handle_occurred_error(hdev);
11958 	else
11959 		hclge_handle_all_hns_hw_errors(ae_dev);
11960 
11961 	/* Re-enable the hw error interrupts because
11962 	 * the interrupts get disabled on global reset.
11963 	 */
11964 	ret = hclge_config_nic_hw_error(hdev, true);
11965 	if (ret) {
11966 		dev_err(&pdev->dev,
11967 			"fail(%d) to re-enable NIC hw error interrupts\n",
11968 			ret);
11969 		return ret;
11970 	}
11971 
11972 	if (hdev->roce_client) {
11973 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
11974 		if (ret) {
11975 			dev_err(&pdev->dev,
11976 				"fail(%d) to re-enable roce ras interrupts\n",
11977 				ret);
11978 			return ret;
11979 		}
11980 	}
11981 
11982 	hclge_reset_vport_state(hdev);
11983 	ret = hclge_reset_vport_spoofchk(hdev);
11984 	if (ret)
11985 		return ret;
11986 
11987 	ret = hclge_resume_vf_rate(hdev);
11988 	if (ret)
11989 		return ret;
11990 
11991 	hclge_init_rxd_adv_layout(hdev);
11992 
11993 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11994 		 HCLGE_DRIVER_NAME);
11995 
11996 	return 0;
11997 }
11998 
11999 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12000 {
12001 	struct hclge_dev *hdev = ae_dev->priv;
12002 	struct hclge_mac *mac = &hdev->hw.mac;
12003 
12004 	hclge_reset_vf_rate(hdev);
12005 	hclge_clear_vf_vlan(hdev);
12006 	hclge_misc_affinity_teardown(hdev);
12007 	hclge_state_uninit(hdev);
12008 	hclge_ptp_uninit(hdev);
12009 	hclge_uninit_rxd_adv_layout(hdev);
12010 	hclge_uninit_mac_table(hdev);
12011 	hclge_del_all_fd_entries(hdev);
12012 
12013 	if (mac->phydev)
12014 		mdiobus_unregister(mac->mdio_bus);
12015 
12016 	/* Disable MISC vector(vector0) */
12017 	hclge_enable_vector(&hdev->misc_vector, false);
12018 	synchronize_irq(hdev->misc_vector.vector_irq);
12019 
12020 	/* Disable all hw interrupts */
12021 	hclge_config_mac_tnl_int(hdev, false);
12022 	hclge_config_nic_hw_error(hdev, false);
12023 	hclge_config_rocee_ras_interrupt(hdev, false);
12024 
12025 	hclge_cmd_uninit(hdev);
12026 	hclge_misc_irq_uninit(hdev);
12027 	hclge_pci_uninit(hdev);
12028 	mutex_destroy(&hdev->vport_lock);
12029 	hclge_uninit_vport_vlan_table(hdev);
12030 	ae_dev->priv = NULL;
12031 }
12032 
12033 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12034 {
12035 	struct hclge_vport *vport = hclge_get_vport(handle);
12036 	struct hclge_dev *hdev = vport->back;
12037 
12038 	return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12039 }
12040 
12041 static void hclge_get_channels(struct hnae3_handle *handle,
12042 			       struct ethtool_channels *ch)
12043 {
12044 	ch->max_combined = hclge_get_max_channels(handle);
12045 	ch->other_count = 1;
12046 	ch->max_other = 1;
12047 	ch->combined_count = handle->kinfo.rss_size;
12048 }
12049 
12050 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12051 					u16 *alloc_tqps, u16 *max_rss_size)
12052 {
12053 	struct hclge_vport *vport = hclge_get_vport(handle);
12054 	struct hclge_dev *hdev = vport->back;
12055 
12056 	*alloc_tqps = vport->alloc_tqps;
12057 	*max_rss_size = hdev->pf_rss_size_max;
12058 }
12059 
12060 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12061 			      bool rxfh_configured)
12062 {
12063 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12064 	struct hclge_vport *vport = hclge_get_vport(handle);
12065 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12066 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12067 	struct hclge_dev *hdev = vport->back;
12068 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12069 	u16 cur_rss_size = kinfo->rss_size;
12070 	u16 cur_tqps = kinfo->num_tqps;
12071 	u16 tc_valid[HCLGE_MAX_TC_NUM];
12072 	u16 roundup_size;
12073 	u32 *rss_indir;
12074 	unsigned int i;
12075 	int ret;
12076 
12077 	kinfo->req_rss_size = new_tqps_num;
12078 
12079 	ret = hclge_tm_vport_map_update(hdev);
12080 	if (ret) {
12081 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12082 		return ret;
12083 	}
12084 
12085 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
12086 	roundup_size = ilog2(roundup_size);
12087 	/* Set the RSS TC mode according to the new RSS size */
12088 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12089 		tc_valid[i] = 0;
12090 
12091 		if (!(hdev->hw_tc_map & BIT(i)))
12092 			continue;
12093 
12094 		tc_valid[i] = 1;
12095 		tc_size[i] = roundup_size;
12096 		tc_offset[i] = kinfo->rss_size * i;
12097 	}
12098 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12099 	if (ret)
12100 		return ret;
12101 
12102 	/* RSS indirection table has been configured by user */
12103 	if (rxfh_configured)
12104 		goto out;
12105 
12106 	/* Reinitializes the rss indirect table according to the new RSS size */
12107 	rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12108 			    GFP_KERNEL);
12109 	if (!rss_indir)
12110 		return -ENOMEM;
12111 
12112 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12113 		rss_indir[i] = i % kinfo->rss_size;
12114 
12115 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12116 	if (ret)
12117 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12118 			ret);
12119 
12120 	kfree(rss_indir);
12121 
12122 out:
12123 	if (!ret)
12124 		dev_info(&hdev->pdev->dev,
12125 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12126 			 cur_rss_size, kinfo->rss_size,
12127 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12128 
12129 	return ret;
12130 }
12131 
12132 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12133 			      u32 *regs_num_64_bit)
12134 {
12135 	struct hclge_desc desc;
12136 	u32 total_num;
12137 	int ret;
12138 
12139 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12140 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12141 	if (ret) {
12142 		dev_err(&hdev->pdev->dev,
12143 			"Query register number cmd failed, ret = %d.\n", ret);
12144 		return ret;
12145 	}
12146 
12147 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
12148 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
12149 
12150 	total_num = *regs_num_32_bit + *regs_num_64_bit;
12151 	if (!total_num)
12152 		return -EINVAL;
12153 
12154 	return 0;
12155 }
12156 
12157 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12158 				 void *data)
12159 {
12160 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12161 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12162 
12163 	struct hclge_desc *desc;
12164 	u32 *reg_val = data;
12165 	__le32 *desc_data;
12166 	int nodata_num;
12167 	int cmd_num;
12168 	int i, k, n;
12169 	int ret;
12170 
12171 	if (regs_num == 0)
12172 		return 0;
12173 
12174 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12175 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12176 			       HCLGE_32_BIT_REG_RTN_DATANUM);
12177 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12178 	if (!desc)
12179 		return -ENOMEM;
12180 
12181 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12182 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12183 	if (ret) {
12184 		dev_err(&hdev->pdev->dev,
12185 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
12186 		kfree(desc);
12187 		return ret;
12188 	}
12189 
12190 	for (i = 0; i < cmd_num; i++) {
12191 		if (i == 0) {
12192 			desc_data = (__le32 *)(&desc[i].data[0]);
12193 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12194 		} else {
12195 			desc_data = (__le32 *)(&desc[i]);
12196 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
12197 		}
12198 		for (k = 0; k < n; k++) {
12199 			*reg_val++ = le32_to_cpu(*desc_data++);
12200 
12201 			regs_num--;
12202 			if (!regs_num)
12203 				break;
12204 		}
12205 	}
12206 
12207 	kfree(desc);
12208 	return 0;
12209 }
12210 
12211 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12212 				 void *data)
12213 {
12214 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12215 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12216 
12217 	struct hclge_desc *desc;
12218 	u64 *reg_val = data;
12219 	__le64 *desc_data;
12220 	int nodata_len;
12221 	int cmd_num;
12222 	int i, k, n;
12223 	int ret;
12224 
12225 	if (regs_num == 0)
12226 		return 0;
12227 
12228 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12229 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12230 			       HCLGE_64_BIT_REG_RTN_DATANUM);
12231 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12232 	if (!desc)
12233 		return -ENOMEM;
12234 
12235 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12236 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12237 	if (ret) {
12238 		dev_err(&hdev->pdev->dev,
12239 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
12240 		kfree(desc);
12241 		return ret;
12242 	}
12243 
12244 	for (i = 0; i < cmd_num; i++) {
12245 		if (i == 0) {
12246 			desc_data = (__le64 *)(&desc[i].data[0]);
12247 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12248 		} else {
12249 			desc_data = (__le64 *)(&desc[i]);
12250 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
12251 		}
12252 		for (k = 0; k < n; k++) {
12253 			*reg_val++ = le64_to_cpu(*desc_data++);
12254 
12255 			regs_num--;
12256 			if (!regs_num)
12257 				break;
12258 		}
12259 	}
12260 
12261 	kfree(desc);
12262 	return 0;
12263 }
12264 
12265 #define MAX_SEPARATE_NUM	4
12266 #define SEPARATOR_VALUE		0xFDFCFBFA
12267 #define REG_NUM_PER_LINE	4
12268 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
12269 #define REG_SEPARATOR_LINE	1
12270 #define REG_NUM_REMAIN_MASK	3
12271 
12272 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12273 {
12274 	int i;
12275 
12276 	/* initialize command BD except the last one */
12277 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12278 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12279 					   true);
12280 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12281 	}
12282 
12283 	/* initialize the last command BD */
12284 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12285 
12286 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12287 }
12288 
12289 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12290 				    int *bd_num_list,
12291 				    u32 type_num)
12292 {
12293 	u32 entries_per_desc, desc_index, index, offset, i;
12294 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12295 	int ret;
12296 
12297 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
12298 	if (ret) {
12299 		dev_err(&hdev->pdev->dev,
12300 			"Get dfx bd num fail, status is %d.\n", ret);
12301 		return ret;
12302 	}
12303 
12304 	entries_per_desc = ARRAY_SIZE(desc[0].data);
12305 	for (i = 0; i < type_num; i++) {
12306 		offset = hclge_dfx_bd_offset_list[i];
12307 		index = offset % entries_per_desc;
12308 		desc_index = offset / entries_per_desc;
12309 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12310 	}
12311 
12312 	return ret;
12313 }
12314 
12315 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12316 				  struct hclge_desc *desc_src, int bd_num,
12317 				  enum hclge_opcode_type cmd)
12318 {
12319 	struct hclge_desc *desc = desc_src;
12320 	int i, ret;
12321 
12322 	hclge_cmd_setup_basic_desc(desc, cmd, true);
12323 	for (i = 0; i < bd_num - 1; i++) {
12324 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12325 		desc++;
12326 		hclge_cmd_setup_basic_desc(desc, cmd, true);
12327 	}
12328 
12329 	desc = desc_src;
12330 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12331 	if (ret)
12332 		dev_err(&hdev->pdev->dev,
12333 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12334 			cmd, ret);
12335 
12336 	return ret;
12337 }
12338 
12339 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12340 				    void *data)
12341 {
12342 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12343 	struct hclge_desc *desc = desc_src;
12344 	u32 *reg = data;
12345 
12346 	entries_per_desc = ARRAY_SIZE(desc->data);
12347 	reg_num = entries_per_desc * bd_num;
12348 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12349 	for (i = 0; i < reg_num; i++) {
12350 		index = i % entries_per_desc;
12351 		desc_index = i / entries_per_desc;
12352 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
12353 	}
12354 	for (i = 0; i < separator_num; i++)
12355 		*reg++ = SEPARATOR_VALUE;
12356 
12357 	return reg_num + separator_num;
12358 }
12359 
12360 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12361 {
12362 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12363 	int data_len_per_desc, bd_num, i;
12364 	int *bd_num_list;
12365 	u32 data_len;
12366 	int ret;
12367 
12368 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12369 	if (!bd_num_list)
12370 		return -ENOMEM;
12371 
12372 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12373 	if (ret) {
12374 		dev_err(&hdev->pdev->dev,
12375 			"Get dfx reg bd num fail, status is %d.\n", ret);
12376 		goto out;
12377 	}
12378 
12379 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
12380 	*len = 0;
12381 	for (i = 0; i < dfx_reg_type_num; i++) {
12382 		bd_num = bd_num_list[i];
12383 		data_len = data_len_per_desc * bd_num;
12384 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12385 	}
12386 
12387 out:
12388 	kfree(bd_num_list);
12389 	return ret;
12390 }
12391 
12392 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12393 {
12394 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12395 	int bd_num, bd_num_max, buf_len, i;
12396 	struct hclge_desc *desc_src;
12397 	int *bd_num_list;
12398 	u32 *reg = data;
12399 	int ret;
12400 
12401 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12402 	if (!bd_num_list)
12403 		return -ENOMEM;
12404 
12405 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12406 	if (ret) {
12407 		dev_err(&hdev->pdev->dev,
12408 			"Get dfx reg bd num fail, status is %d.\n", ret);
12409 		goto out;
12410 	}
12411 
12412 	bd_num_max = bd_num_list[0];
12413 	for (i = 1; i < dfx_reg_type_num; i++)
12414 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12415 
12416 	buf_len = sizeof(*desc_src) * bd_num_max;
12417 	desc_src = kzalloc(buf_len, GFP_KERNEL);
12418 	if (!desc_src) {
12419 		ret = -ENOMEM;
12420 		goto out;
12421 	}
12422 
12423 	for (i = 0; i < dfx_reg_type_num; i++) {
12424 		bd_num = bd_num_list[i];
12425 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12426 					     hclge_dfx_reg_opcode_list[i]);
12427 		if (ret) {
12428 			dev_err(&hdev->pdev->dev,
12429 				"Get dfx reg fail, status is %d.\n", ret);
12430 			break;
12431 		}
12432 
12433 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12434 	}
12435 
12436 	kfree(desc_src);
12437 out:
12438 	kfree(bd_num_list);
12439 	return ret;
12440 }
12441 
12442 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12443 			      struct hnae3_knic_private_info *kinfo)
12444 {
12445 #define HCLGE_RING_REG_OFFSET		0x200
12446 #define HCLGE_RING_INT_REG_OFFSET	0x4
12447 
12448 	int i, j, reg_num, separator_num;
12449 	int data_num_sum;
12450 	u32 *reg = data;
12451 
12452 	/* fetching per-PF registers valus from PF PCIe register space */
12453 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12454 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12455 	for (i = 0; i < reg_num; i++)
12456 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12457 	for (i = 0; i < separator_num; i++)
12458 		*reg++ = SEPARATOR_VALUE;
12459 	data_num_sum = reg_num + separator_num;
12460 
12461 	reg_num = ARRAY_SIZE(common_reg_addr_list);
12462 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12463 	for (i = 0; i < reg_num; i++)
12464 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12465 	for (i = 0; i < separator_num; i++)
12466 		*reg++ = SEPARATOR_VALUE;
12467 	data_num_sum += reg_num + separator_num;
12468 
12469 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
12470 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12471 	for (j = 0; j < kinfo->num_tqps; j++) {
12472 		for (i = 0; i < reg_num; i++)
12473 			*reg++ = hclge_read_dev(&hdev->hw,
12474 						ring_reg_addr_list[i] +
12475 						HCLGE_RING_REG_OFFSET * j);
12476 		for (i = 0; i < separator_num; i++)
12477 			*reg++ = SEPARATOR_VALUE;
12478 	}
12479 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12480 
12481 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12482 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12483 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
12484 		for (i = 0; i < reg_num; i++)
12485 			*reg++ = hclge_read_dev(&hdev->hw,
12486 						tqp_intr_reg_addr_list[i] +
12487 						HCLGE_RING_INT_REG_OFFSET * j);
12488 		for (i = 0; i < separator_num; i++)
12489 			*reg++ = SEPARATOR_VALUE;
12490 	}
12491 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12492 
12493 	return data_num_sum;
12494 }
12495 
12496 static int hclge_get_regs_len(struct hnae3_handle *handle)
12497 {
12498 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12499 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12500 	struct hclge_vport *vport = hclge_get_vport(handle);
12501 	struct hclge_dev *hdev = vport->back;
12502 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12503 	int regs_lines_32_bit, regs_lines_64_bit;
12504 	int ret;
12505 
12506 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12507 	if (ret) {
12508 		dev_err(&hdev->pdev->dev,
12509 			"Get register number failed, ret = %d.\n", ret);
12510 		return ret;
12511 	}
12512 
12513 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12514 	if (ret) {
12515 		dev_err(&hdev->pdev->dev,
12516 			"Get dfx reg len failed, ret = %d.\n", ret);
12517 		return ret;
12518 	}
12519 
12520 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12521 		REG_SEPARATOR_LINE;
12522 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12523 		REG_SEPARATOR_LINE;
12524 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12525 		REG_SEPARATOR_LINE;
12526 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12527 		REG_SEPARATOR_LINE;
12528 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12529 		REG_SEPARATOR_LINE;
12530 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12531 		REG_SEPARATOR_LINE;
12532 
12533 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12534 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12535 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12536 }
12537 
12538 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12539 			   void *data)
12540 {
12541 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12542 	struct hclge_vport *vport = hclge_get_vport(handle);
12543 	struct hclge_dev *hdev = vport->back;
12544 	u32 regs_num_32_bit, regs_num_64_bit;
12545 	int i, reg_num, separator_num, ret;
12546 	u32 *reg = data;
12547 
12548 	*version = hdev->fw_version;
12549 
12550 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12551 	if (ret) {
12552 		dev_err(&hdev->pdev->dev,
12553 			"Get register number failed, ret = %d.\n", ret);
12554 		return;
12555 	}
12556 
12557 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12558 
12559 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12560 	if (ret) {
12561 		dev_err(&hdev->pdev->dev,
12562 			"Get 32 bit register failed, ret = %d.\n", ret);
12563 		return;
12564 	}
12565 	reg_num = regs_num_32_bit;
12566 	reg += reg_num;
12567 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12568 	for (i = 0; i < separator_num; i++)
12569 		*reg++ = SEPARATOR_VALUE;
12570 
12571 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12572 	if (ret) {
12573 		dev_err(&hdev->pdev->dev,
12574 			"Get 64 bit register failed, ret = %d.\n", ret);
12575 		return;
12576 	}
12577 	reg_num = regs_num_64_bit * 2;
12578 	reg += reg_num;
12579 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12580 	for (i = 0; i < separator_num; i++)
12581 		*reg++ = SEPARATOR_VALUE;
12582 
12583 	ret = hclge_get_dfx_reg(hdev, reg);
12584 	if (ret)
12585 		dev_err(&hdev->pdev->dev,
12586 			"Get dfx register failed, ret = %d.\n", ret);
12587 }
12588 
12589 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12590 {
12591 	struct hclge_set_led_state_cmd *req;
12592 	struct hclge_desc desc;
12593 	int ret;
12594 
12595 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12596 
12597 	req = (struct hclge_set_led_state_cmd *)desc.data;
12598 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12599 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12600 
12601 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12602 	if (ret)
12603 		dev_err(&hdev->pdev->dev,
12604 			"Send set led state cmd error, ret =%d\n", ret);
12605 
12606 	return ret;
12607 }
12608 
12609 enum hclge_led_status {
12610 	HCLGE_LED_OFF,
12611 	HCLGE_LED_ON,
12612 	HCLGE_LED_NO_CHANGE = 0xFF,
12613 };
12614 
12615 static int hclge_set_led_id(struct hnae3_handle *handle,
12616 			    enum ethtool_phys_id_state status)
12617 {
12618 	struct hclge_vport *vport = hclge_get_vport(handle);
12619 	struct hclge_dev *hdev = vport->back;
12620 
12621 	switch (status) {
12622 	case ETHTOOL_ID_ACTIVE:
12623 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
12624 	case ETHTOOL_ID_INACTIVE:
12625 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12626 	default:
12627 		return -EINVAL;
12628 	}
12629 }
12630 
12631 static void hclge_get_link_mode(struct hnae3_handle *handle,
12632 				unsigned long *supported,
12633 				unsigned long *advertising)
12634 {
12635 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12636 	struct hclge_vport *vport = hclge_get_vport(handle);
12637 	struct hclge_dev *hdev = vport->back;
12638 	unsigned int idx = 0;
12639 
12640 	for (; idx < size; idx++) {
12641 		supported[idx] = hdev->hw.mac.supported[idx];
12642 		advertising[idx] = hdev->hw.mac.advertising[idx];
12643 	}
12644 }
12645 
12646 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12647 {
12648 	struct hclge_vport *vport = hclge_get_vport(handle);
12649 	struct hclge_dev *hdev = vport->back;
12650 
12651 	return hclge_config_gro(hdev, enable);
12652 }
12653 
12654 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12655 {
12656 	struct hclge_vport *vport = &hdev->vport[0];
12657 	struct hnae3_handle *handle = &vport->nic;
12658 	u8 tmp_flags;
12659 	int ret;
12660 	u16 i;
12661 
12662 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12663 		set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12664 		vport->last_promisc_flags = vport->overflow_promisc_flags;
12665 	}
12666 
12667 	if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12668 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12669 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12670 					     tmp_flags & HNAE3_MPE);
12671 		if (!ret) {
12672 			clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12673 				  &vport->state);
12674 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12675 				&vport->state);
12676 		}
12677 	}
12678 
12679 	for (i = 1; i < hdev->num_alloc_vport; i++) {
12680 		bool uc_en = false;
12681 		bool mc_en = false;
12682 		bool bc_en;
12683 
12684 		vport = &hdev->vport[i];
12685 
12686 		if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12687 					&vport->state))
12688 			continue;
12689 
12690 		if (vport->vf_info.trusted) {
12691 			uc_en = vport->vf_info.request_uc_en > 0;
12692 			mc_en = vport->vf_info.request_mc_en > 0;
12693 		}
12694 		bc_en = vport->vf_info.request_bc_en > 0;
12695 
12696 		ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12697 						 mc_en, bc_en);
12698 		if (ret) {
12699 			set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12700 				&vport->state);
12701 			return;
12702 		}
12703 		hclge_set_vport_vlan_fltr_change(vport);
12704 	}
12705 }
12706 
12707 static bool hclge_module_existed(struct hclge_dev *hdev)
12708 {
12709 	struct hclge_desc desc;
12710 	u32 existed;
12711 	int ret;
12712 
12713 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12714 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12715 	if (ret) {
12716 		dev_err(&hdev->pdev->dev,
12717 			"failed to get SFP exist state, ret = %d\n", ret);
12718 		return false;
12719 	}
12720 
12721 	existed = le32_to_cpu(desc.data[0]);
12722 
12723 	return existed != 0;
12724 }
12725 
12726 /* need 6 bds(total 140 bytes) in one reading
12727  * return the number of bytes actually read, 0 means read failed.
12728  */
12729 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12730 				     u32 len, u8 *data)
12731 {
12732 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12733 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12734 	u16 read_len;
12735 	u16 copy_len;
12736 	int ret;
12737 	int i;
12738 
12739 	/* setup all 6 bds to read module eeprom info. */
12740 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12741 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12742 					   true);
12743 
12744 		/* bd0~bd4 need next flag */
12745 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12746 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12747 	}
12748 
12749 	/* setup bd0, this bd contains offset and read length. */
12750 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12751 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12752 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12753 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
12754 
12755 	ret = hclge_cmd_send(&hdev->hw, desc, i);
12756 	if (ret) {
12757 		dev_err(&hdev->pdev->dev,
12758 			"failed to get SFP eeprom info, ret = %d\n", ret);
12759 		return 0;
12760 	}
12761 
12762 	/* copy sfp info from bd0 to out buffer. */
12763 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12764 	memcpy(data, sfp_info_bd0->data, copy_len);
12765 	read_len = copy_len;
12766 
12767 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
12768 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12769 		if (read_len >= len)
12770 			return read_len;
12771 
12772 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12773 		memcpy(data + read_len, desc[i].data, copy_len);
12774 		read_len += copy_len;
12775 	}
12776 
12777 	return read_len;
12778 }
12779 
12780 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12781 				   u32 len, u8 *data)
12782 {
12783 	struct hclge_vport *vport = hclge_get_vport(handle);
12784 	struct hclge_dev *hdev = vport->back;
12785 	u32 read_len = 0;
12786 	u16 data_len;
12787 
12788 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12789 		return -EOPNOTSUPP;
12790 
12791 	if (!hclge_module_existed(hdev))
12792 		return -ENXIO;
12793 
12794 	while (read_len < len) {
12795 		data_len = hclge_get_sfp_eeprom_info(hdev,
12796 						     offset + read_len,
12797 						     len - read_len,
12798 						     data + read_len);
12799 		if (!data_len)
12800 			return -EIO;
12801 
12802 		read_len += data_len;
12803 	}
12804 
12805 	return 0;
12806 }
12807 
12808 static const struct hnae3_ae_ops hclge_ops = {
12809 	.init_ae_dev = hclge_init_ae_dev,
12810 	.uninit_ae_dev = hclge_uninit_ae_dev,
12811 	.reset_prepare = hclge_reset_prepare_general,
12812 	.reset_done = hclge_reset_done,
12813 	.init_client_instance = hclge_init_client_instance,
12814 	.uninit_client_instance = hclge_uninit_client_instance,
12815 	.map_ring_to_vector = hclge_map_ring_to_vector,
12816 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12817 	.get_vector = hclge_get_vector,
12818 	.put_vector = hclge_put_vector,
12819 	.set_promisc_mode = hclge_set_promisc_mode,
12820 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
12821 	.set_loopback = hclge_set_loopback,
12822 	.start = hclge_ae_start,
12823 	.stop = hclge_ae_stop,
12824 	.client_start = hclge_client_start,
12825 	.client_stop = hclge_client_stop,
12826 	.get_status = hclge_get_status,
12827 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
12828 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12829 	.get_media_type = hclge_get_media_type,
12830 	.check_port_speed = hclge_check_port_speed,
12831 	.get_fec = hclge_get_fec,
12832 	.set_fec = hclge_set_fec,
12833 	.get_rss_key_size = hclge_get_rss_key_size,
12834 	.get_rss = hclge_get_rss,
12835 	.set_rss = hclge_set_rss,
12836 	.set_rss_tuple = hclge_set_rss_tuple,
12837 	.get_rss_tuple = hclge_get_rss_tuple,
12838 	.get_tc_size = hclge_get_tc_size,
12839 	.get_mac_addr = hclge_get_mac_addr,
12840 	.set_mac_addr = hclge_set_mac_addr,
12841 	.do_ioctl = hclge_do_ioctl,
12842 	.add_uc_addr = hclge_add_uc_addr,
12843 	.rm_uc_addr = hclge_rm_uc_addr,
12844 	.add_mc_addr = hclge_add_mc_addr,
12845 	.rm_mc_addr = hclge_rm_mc_addr,
12846 	.set_autoneg = hclge_set_autoneg,
12847 	.get_autoneg = hclge_get_autoneg,
12848 	.restart_autoneg = hclge_restart_autoneg,
12849 	.halt_autoneg = hclge_halt_autoneg,
12850 	.get_pauseparam = hclge_get_pauseparam,
12851 	.set_pauseparam = hclge_set_pauseparam,
12852 	.set_mtu = hclge_set_mtu,
12853 	.reset_queue = hclge_reset_tqp,
12854 	.get_stats = hclge_get_stats,
12855 	.get_mac_stats = hclge_get_mac_stat,
12856 	.update_stats = hclge_update_stats,
12857 	.get_strings = hclge_get_strings,
12858 	.get_sset_count = hclge_get_sset_count,
12859 	.get_fw_version = hclge_get_fw_version,
12860 	.get_mdix_mode = hclge_get_mdix_mode,
12861 	.enable_vlan_filter = hclge_enable_vlan_filter,
12862 	.set_vlan_filter = hclge_set_vlan_filter,
12863 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12864 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12865 	.reset_event = hclge_reset_event,
12866 	.get_reset_level = hclge_get_reset_level,
12867 	.set_default_reset_request = hclge_set_def_reset_request,
12868 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12869 	.set_channels = hclge_set_channels,
12870 	.get_channels = hclge_get_channels,
12871 	.get_regs_len = hclge_get_regs_len,
12872 	.get_regs = hclge_get_regs,
12873 	.set_led_id = hclge_set_led_id,
12874 	.get_link_mode = hclge_get_link_mode,
12875 	.add_fd_entry = hclge_add_fd_entry,
12876 	.del_fd_entry = hclge_del_fd_entry,
12877 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12878 	.get_fd_rule_info = hclge_get_fd_rule_info,
12879 	.get_fd_all_rules = hclge_get_all_rules,
12880 	.enable_fd = hclge_enable_fd,
12881 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
12882 	.dbg_read_cmd = hclge_dbg_read_cmd,
12883 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
12884 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
12885 	.ae_dev_resetting = hclge_ae_dev_resetting,
12886 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12887 	.set_gro_en = hclge_gro_en,
12888 	.get_global_queue_id = hclge_covert_handle_qid_global,
12889 	.set_timer_task = hclge_set_timer_task,
12890 	.mac_connect_phy = hclge_mac_connect_phy,
12891 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
12892 	.get_vf_config = hclge_get_vf_config,
12893 	.set_vf_link_state = hclge_set_vf_link_state,
12894 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
12895 	.set_vf_trust = hclge_set_vf_trust,
12896 	.set_vf_rate = hclge_set_vf_rate,
12897 	.set_vf_mac = hclge_set_vf_mac,
12898 	.get_module_eeprom = hclge_get_module_eeprom,
12899 	.get_cmdq_stat = hclge_get_cmdq_stat,
12900 	.add_cls_flower = hclge_add_cls_flower,
12901 	.del_cls_flower = hclge_del_cls_flower,
12902 	.cls_flower_active = hclge_is_cls_flower_active,
12903 	.get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12904 	.set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12905 	.set_tx_hwts_info = hclge_ptp_set_tx_info,
12906 	.get_rx_hwts = hclge_ptp_get_rx_hwts,
12907 	.get_ts_info = hclge_ptp_get_ts_info,
12908 };
12909 
12910 static struct hnae3_ae_algo ae_algo = {
12911 	.ops = &hclge_ops,
12912 	.pdev_id_table = ae_algo_pci_tbl,
12913 };
12914 
12915 static int hclge_init(void)
12916 {
12917 	pr_info("%s is initializing\n", HCLGE_NAME);
12918 
12919 	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
12920 	if (!hclge_wq) {
12921 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12922 		return -ENOMEM;
12923 	}
12924 
12925 	hnae3_register_ae_algo(&ae_algo);
12926 
12927 	return 0;
12928 }
12929 
12930 static void hclge_exit(void)
12931 {
12932 	hnae3_unregister_ae_algo(&ae_algo);
12933 	destroy_workqueue(hclge_wq);
12934 }
12935 module_init(hclge_init);
12936 module_exit(hclge_exit);
12937 
12938 MODULE_LICENSE("GPL");
12939 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12940 MODULE_DESCRIPTION("HCLGE Driver");
12941 MODULE_VERSION(HCLGE_MOD_VERSION);
12942