1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26 
27 #define HCLGE_NAME			"hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 
31 #define HCLGE_BUF_SIZE_UNIT	256U
32 #define HCLGE_BUF_MUL_BY	2
33 #define HCLGE_BUF_DIV_BY	2
34 #define NEED_RESERVE_TC_NUM	2
35 #define BUF_MAX_PERCENT		100
36 #define BUF_RESERVE_PERCENT	90
37 
38 #define HCLGE_RESET_MAX_FAIL_CNT	5
39 #define HCLGE_RESET_SYNC_TIME		100
40 #define HCLGE_PF_RESET_SYNC_TIME	20
41 #define HCLGE_PF_RESET_SYNC_CNT		1500
42 
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET        1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
47 #define HCLGE_DFX_IGU_BD_OFFSET         4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
50 #define HCLGE_DFX_NCSI_BD_OFFSET        7
51 #define HCLGE_DFX_RTC_BD_OFFSET         8
52 #define HCLGE_DFX_PPP_BD_OFFSET         9
53 #define HCLGE_DFX_RCB_BD_OFFSET         10
54 #define HCLGE_DFX_TQP_BD_OFFSET         11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
56 
57 #define HCLGE_LINK_STATUS_MS	10
58 
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
67 						   unsigned long *addr);
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
69 
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73 static void hclge_sync_fd_table(struct hclge_dev *hdev);
74 
75 static struct hnae3_ae_algo ae_algo;
76 
77 static struct workqueue_struct *hclge_wq;
78 
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88 	/* required last entry */
89 	{0, }
90 };
91 
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93 
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 					 HCLGE_CMDQ_TX_ADDR_H_REG,
96 					 HCLGE_CMDQ_TX_DEPTH_REG,
97 					 HCLGE_CMDQ_TX_TAIL_REG,
98 					 HCLGE_CMDQ_TX_HEAD_REG,
99 					 HCLGE_CMDQ_RX_ADDR_L_REG,
100 					 HCLGE_CMDQ_RX_ADDR_H_REG,
101 					 HCLGE_CMDQ_RX_DEPTH_REG,
102 					 HCLGE_CMDQ_RX_TAIL_REG,
103 					 HCLGE_CMDQ_RX_HEAD_REG,
104 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 					 HCLGE_CMDQ_INTR_STS_REG,
106 					 HCLGE_CMDQ_INTR_EN_REG,
107 					 HCLGE_CMDQ_INTR_GEN_REG};
108 
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 					   HCLGE_VECTOR0_OTER_EN_REG,
111 					   HCLGE_MISC_RESET_STS_REG,
112 					   HCLGE_MISC_VECTOR_INT_STS,
113 					   HCLGE_GLOBAL_RESET_REG,
114 					   HCLGE_FUN_RST_ING,
115 					   HCLGE_GRO_EN_REG};
116 
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 					 HCLGE_RING_RX_ADDR_H_REG,
119 					 HCLGE_RING_RX_BD_NUM_REG,
120 					 HCLGE_RING_RX_BD_LENGTH_REG,
121 					 HCLGE_RING_RX_MERGE_EN_REG,
122 					 HCLGE_RING_RX_TAIL_REG,
123 					 HCLGE_RING_RX_HEAD_REG,
124 					 HCLGE_RING_RX_FBD_NUM_REG,
125 					 HCLGE_RING_RX_OFFSET_REG,
126 					 HCLGE_RING_RX_FBD_OFFSET_REG,
127 					 HCLGE_RING_RX_STASH_REG,
128 					 HCLGE_RING_RX_BD_ERR_REG,
129 					 HCLGE_RING_TX_ADDR_L_REG,
130 					 HCLGE_RING_TX_ADDR_H_REG,
131 					 HCLGE_RING_TX_BD_NUM_REG,
132 					 HCLGE_RING_TX_PRIORITY_REG,
133 					 HCLGE_RING_TX_TC_REG,
134 					 HCLGE_RING_TX_MERGE_EN_REG,
135 					 HCLGE_RING_TX_TAIL_REG,
136 					 HCLGE_RING_TX_HEAD_REG,
137 					 HCLGE_RING_TX_FBD_NUM_REG,
138 					 HCLGE_RING_TX_OFFSET_REG,
139 					 HCLGE_RING_TX_EBD_NUM_REG,
140 					 HCLGE_RING_TX_EBD_OFFSET_REG,
141 					 HCLGE_RING_TX_BD_ERR_REG,
142 					 HCLGE_RING_EN_REG};
143 
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 					     HCLGE_TQP_INTR_GL0_REG,
146 					     HCLGE_TQP_INTR_GL1_REG,
147 					     HCLGE_TQP_INTR_GL2_REG,
148 					     HCLGE_TQP_INTR_RL_REG};
149 
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 	"App    Loopback test",
152 	"Serdes serial Loopback test",
153 	"Serdes parallel Loopback test",
154 	"Phy    Loopback test"
155 };
156 
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 	{"mac_tx_mac_pause_num",
159 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 	{"mac_rx_mac_pause_num",
161 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 	{"mac_tx_control_pkt_num",
163 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 	{"mac_rx_control_pkt_num",
165 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 	{"mac_tx_pfc_pkt_num",
167 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168 	{"mac_tx_pfc_pri0_pkt_num",
169 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 	{"mac_tx_pfc_pri1_pkt_num",
171 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 	{"mac_tx_pfc_pri2_pkt_num",
173 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 	{"mac_tx_pfc_pri3_pkt_num",
175 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 	{"mac_tx_pfc_pri4_pkt_num",
177 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 	{"mac_tx_pfc_pri5_pkt_num",
179 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 	{"mac_tx_pfc_pri6_pkt_num",
181 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 	{"mac_tx_pfc_pri7_pkt_num",
183 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184 	{"mac_rx_pfc_pkt_num",
185 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186 	{"mac_rx_pfc_pri0_pkt_num",
187 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 	{"mac_rx_pfc_pri1_pkt_num",
189 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 	{"mac_rx_pfc_pri2_pkt_num",
191 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 	{"mac_rx_pfc_pri3_pkt_num",
193 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 	{"mac_rx_pfc_pri4_pkt_num",
195 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 	{"mac_rx_pfc_pri5_pkt_num",
197 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 	{"mac_rx_pfc_pri6_pkt_num",
199 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 	{"mac_rx_pfc_pri7_pkt_num",
201 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 	{"mac_tx_total_pkt_num",
203 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 	{"mac_tx_total_oct_num",
205 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 	{"mac_tx_good_pkt_num",
207 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 	{"mac_tx_bad_pkt_num",
209 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 	{"mac_tx_good_oct_num",
211 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 	{"mac_tx_bad_oct_num",
213 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 	{"mac_tx_uni_pkt_num",
215 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 	{"mac_tx_multi_pkt_num",
217 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 	{"mac_tx_broad_pkt_num",
219 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 	{"mac_tx_undersize_pkt_num",
221 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222 	{"mac_tx_oversize_pkt_num",
223 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224 	{"mac_tx_64_oct_pkt_num",
225 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 	{"mac_tx_65_127_oct_pkt_num",
227 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 	{"mac_tx_128_255_oct_pkt_num",
229 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 	{"mac_tx_256_511_oct_pkt_num",
231 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 	{"mac_tx_512_1023_oct_pkt_num",
233 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 	{"mac_tx_1024_1518_oct_pkt_num",
235 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236 	{"mac_tx_1519_2047_oct_pkt_num",
237 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 	{"mac_tx_2048_4095_oct_pkt_num",
239 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 	{"mac_tx_4096_8191_oct_pkt_num",
241 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242 	{"mac_tx_8192_9216_oct_pkt_num",
243 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 	{"mac_tx_9217_12287_oct_pkt_num",
245 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 	{"mac_tx_12288_16383_oct_pkt_num",
247 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 	{"mac_tx_1519_max_good_pkt_num",
249 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 	{"mac_tx_1519_max_bad_pkt_num",
251 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252 	{"mac_rx_total_pkt_num",
253 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 	{"mac_rx_total_oct_num",
255 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 	{"mac_rx_good_pkt_num",
257 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 	{"mac_rx_bad_pkt_num",
259 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 	{"mac_rx_good_oct_num",
261 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 	{"mac_rx_bad_oct_num",
263 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 	{"mac_rx_uni_pkt_num",
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 	{"mac_rx_multi_pkt_num",
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 	{"mac_rx_broad_pkt_num",
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 	{"mac_rx_undersize_pkt_num",
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272 	{"mac_rx_oversize_pkt_num",
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274 	{"mac_rx_64_oct_pkt_num",
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 	{"mac_rx_65_127_oct_pkt_num",
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 	{"mac_rx_128_255_oct_pkt_num",
279 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 	{"mac_rx_256_511_oct_pkt_num",
281 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 	{"mac_rx_512_1023_oct_pkt_num",
283 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 	{"mac_rx_1024_1518_oct_pkt_num",
285 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286 	{"mac_rx_1519_2047_oct_pkt_num",
287 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 	{"mac_rx_2048_4095_oct_pkt_num",
289 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 	{"mac_rx_4096_8191_oct_pkt_num",
291 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292 	{"mac_rx_8192_9216_oct_pkt_num",
293 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 	{"mac_rx_9217_12287_oct_pkt_num",
295 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 	{"mac_rx_12288_16383_oct_pkt_num",
297 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 	{"mac_rx_1519_max_good_pkt_num",
299 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 	{"mac_rx_1519_max_bad_pkt_num",
301 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302 
303 	{"mac_tx_fragment_pkt_num",
304 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 	{"mac_tx_undermin_pkt_num",
306 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 	{"mac_tx_jabber_pkt_num",
308 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 	{"mac_tx_err_all_pkt_num",
310 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 	{"mac_tx_from_app_good_pkt_num",
312 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 	{"mac_tx_from_app_bad_pkt_num",
314 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 	{"mac_rx_fragment_pkt_num",
316 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 	{"mac_rx_undermin_pkt_num",
318 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 	{"mac_rx_jabber_pkt_num",
320 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 	{"mac_rx_fcs_err_pkt_num",
322 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 	{"mac_rx_send_app_good_pkt_num",
324 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 	{"mac_rx_send_app_bad_pkt_num",
326 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
327 };
328 
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 	{
331 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
333 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334 		.i_port_bitmap = 0x1,
335 	},
336 };
337 
338 static const u8 hclge_hash_key[] = {
339 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344 };
345 
346 static const u32 hclge_dfx_bd_offset_list[] = {
347 	HCLGE_DFX_BIOS_BD_OFFSET,
348 	HCLGE_DFX_SSU_0_BD_OFFSET,
349 	HCLGE_DFX_SSU_1_BD_OFFSET,
350 	HCLGE_DFX_IGU_BD_OFFSET,
351 	HCLGE_DFX_RPU_0_BD_OFFSET,
352 	HCLGE_DFX_RPU_1_BD_OFFSET,
353 	HCLGE_DFX_NCSI_BD_OFFSET,
354 	HCLGE_DFX_RTC_BD_OFFSET,
355 	HCLGE_DFX_PPP_BD_OFFSET,
356 	HCLGE_DFX_RCB_BD_OFFSET,
357 	HCLGE_DFX_TQP_BD_OFFSET,
358 	HCLGE_DFX_SSU_2_BD_OFFSET
359 };
360 
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 	HCLGE_OPC_DFX_SSU_REG_0,
364 	HCLGE_OPC_DFX_SSU_REG_1,
365 	HCLGE_OPC_DFX_IGU_EGU_REG,
366 	HCLGE_OPC_DFX_RPU_REG_0,
367 	HCLGE_OPC_DFX_RPU_REG_1,
368 	HCLGE_OPC_DFX_NCSI_REG,
369 	HCLGE_OPC_DFX_RTC_REG,
370 	HCLGE_OPC_DFX_PPP_REG,
371 	HCLGE_OPC_DFX_RCB_REG,
372 	HCLGE_OPC_DFX_TQP_REG,
373 	HCLGE_OPC_DFX_SSU_REG_2
374 };
375 
376 static const struct key_info meta_data_key_info[] = {
377 	{ PACKET_TYPE_ID, 6},
378 	{ IP_FRAGEMENT, 1},
379 	{ ROCE_TYPE, 1},
380 	{ NEXT_KEY, 5},
381 	{ VLAN_NUMBER, 2},
382 	{ SRC_VPORT, 12},
383 	{ DST_VPORT, 12},
384 	{ TUNNEL_PACKET, 1},
385 };
386 
387 static const struct key_info tuple_key_info[] = {
388 	{ OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
389 	{ OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
390 	{ OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
391 	{ OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
392 	{ OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
393 	{ OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
394 	{ OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
395 	{ OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
396 	{ OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
397 	{ OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
398 	{ OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
399 	{ OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
400 	{ OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
401 	{ OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
402 	{ OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
403 	{ OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
404 	{ INNER_DST_MAC, 48, KEY_OPT_MAC,
405 	  offsetof(struct hclge_fd_rule, tuples.dst_mac),
406 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
407 	{ INNER_SRC_MAC, 48, KEY_OPT_MAC,
408 	  offsetof(struct hclge_fd_rule, tuples.src_mac),
409 	  offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
410 	{ INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
411 	  offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
412 	  offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
413 	{ INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
414 	{ INNER_ETH_TYPE, 16, KEY_OPT_LE16,
415 	  offsetof(struct hclge_fd_rule, tuples.ether_proto),
416 	  offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
417 	{ INNER_L2_RSV, 16, KEY_OPT_LE16,
418 	  offsetof(struct hclge_fd_rule, tuples.l2_user_def),
419 	  offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
420 	{ INNER_IP_TOS, 8, KEY_OPT_U8,
421 	  offsetof(struct hclge_fd_rule, tuples.ip_tos),
422 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
423 	{ INNER_IP_PROTO, 8, KEY_OPT_U8,
424 	  offsetof(struct hclge_fd_rule, tuples.ip_proto),
425 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
426 	{ INNER_SRC_IP, 32, KEY_OPT_IP,
427 	  offsetof(struct hclge_fd_rule, tuples.src_ip),
428 	  offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
429 	{ INNER_DST_IP, 32, KEY_OPT_IP,
430 	  offsetof(struct hclge_fd_rule, tuples.dst_ip),
431 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
432 	{ INNER_L3_RSV, 16, KEY_OPT_LE16,
433 	  offsetof(struct hclge_fd_rule, tuples.l3_user_def),
434 	  offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
435 	{ INNER_SRC_PORT, 16, KEY_OPT_LE16,
436 	  offsetof(struct hclge_fd_rule, tuples.src_port),
437 	  offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
438 	{ INNER_DST_PORT, 16, KEY_OPT_LE16,
439 	  offsetof(struct hclge_fd_rule, tuples.dst_port),
440 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
441 	{ INNER_L4_RSV, 32, KEY_OPT_LE32,
442 	  offsetof(struct hclge_fd_rule, tuples.l4_user_def),
443 	  offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
444 };
445 
446 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
447 {
448 #define HCLGE_MAC_CMD_NUM 21
449 
450 	u64 *data = (u64 *)(&hdev->mac_stats);
451 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
452 	__le64 *desc_data;
453 	int i, k, n;
454 	int ret;
455 
456 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
457 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
458 	if (ret) {
459 		dev_err(&hdev->pdev->dev,
460 			"Get MAC pkt stats fail, status = %d.\n", ret);
461 
462 		return ret;
463 	}
464 
465 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
466 		/* for special opcode 0032, only the first desc has the head */
467 		if (unlikely(i == 0)) {
468 			desc_data = (__le64 *)(&desc[i].data[0]);
469 			n = HCLGE_RD_FIRST_STATS_NUM;
470 		} else {
471 			desc_data = (__le64 *)(&desc[i]);
472 			n = HCLGE_RD_OTHER_STATS_NUM;
473 		}
474 
475 		for (k = 0; k < n; k++) {
476 			*data += le64_to_cpu(*desc_data);
477 			data++;
478 			desc_data++;
479 		}
480 	}
481 
482 	return 0;
483 }
484 
485 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
486 {
487 	u64 *data = (u64 *)(&hdev->mac_stats);
488 	struct hclge_desc *desc;
489 	__le64 *desc_data;
490 	u16 i, k, n;
491 	int ret;
492 
493 	/* This may be called inside atomic sections,
494 	 * so GFP_ATOMIC is more suitalbe here
495 	 */
496 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
497 	if (!desc)
498 		return -ENOMEM;
499 
500 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
501 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
502 	if (ret) {
503 		kfree(desc);
504 		return ret;
505 	}
506 
507 	for (i = 0; i < desc_num; i++) {
508 		/* for special opcode 0034, only the first desc has the head */
509 		if (i == 0) {
510 			desc_data = (__le64 *)(&desc[i].data[0]);
511 			n = HCLGE_RD_FIRST_STATS_NUM;
512 		} else {
513 			desc_data = (__le64 *)(&desc[i]);
514 			n = HCLGE_RD_OTHER_STATS_NUM;
515 		}
516 
517 		for (k = 0; k < n; k++) {
518 			*data += le64_to_cpu(*desc_data);
519 			data++;
520 			desc_data++;
521 		}
522 	}
523 
524 	kfree(desc);
525 
526 	return 0;
527 }
528 
529 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
530 {
531 	struct hclge_desc desc;
532 	__le32 *desc_data;
533 	u32 reg_num;
534 	int ret;
535 
536 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
537 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
538 	if (ret)
539 		return ret;
540 
541 	desc_data = (__le32 *)(&desc.data[0]);
542 	reg_num = le32_to_cpu(*desc_data);
543 
544 	*desc_num = 1 + ((reg_num - 3) >> 2) +
545 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
546 
547 	return 0;
548 }
549 
550 static int hclge_mac_update_stats(struct hclge_dev *hdev)
551 {
552 	u32 desc_num;
553 	int ret;
554 
555 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
556 	/* The firmware supports the new statistics acquisition method */
557 	if (!ret)
558 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
559 	else if (ret == -EOPNOTSUPP)
560 		ret = hclge_mac_update_stats_defective(hdev);
561 	else
562 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
563 
564 	return ret;
565 }
566 
567 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
568 {
569 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
570 	struct hclge_vport *vport = hclge_get_vport(handle);
571 	struct hclge_dev *hdev = vport->back;
572 	struct hnae3_queue *queue;
573 	struct hclge_desc desc[1];
574 	struct hclge_tqp *tqp;
575 	int ret, i;
576 
577 	for (i = 0; i < kinfo->num_tqps; i++) {
578 		queue = handle->kinfo.tqp[i];
579 		tqp = container_of(queue, struct hclge_tqp, q);
580 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
581 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
582 					   true);
583 
584 		desc[0].data[0] = cpu_to_le32(tqp->index);
585 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
586 		if (ret) {
587 			dev_err(&hdev->pdev->dev,
588 				"Query tqp stat fail, status = %d,queue = %d\n",
589 				ret, i);
590 			return ret;
591 		}
592 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
593 			le32_to_cpu(desc[0].data[1]);
594 	}
595 
596 	for (i = 0; i < kinfo->num_tqps; i++) {
597 		queue = handle->kinfo.tqp[i];
598 		tqp = container_of(queue, struct hclge_tqp, q);
599 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
600 		hclge_cmd_setup_basic_desc(&desc[0],
601 					   HCLGE_OPC_QUERY_TX_STATS,
602 					   true);
603 
604 		desc[0].data[0] = cpu_to_le32(tqp->index);
605 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
606 		if (ret) {
607 			dev_err(&hdev->pdev->dev,
608 				"Query tqp stat fail, status = %d,queue = %d\n",
609 				ret, i);
610 			return ret;
611 		}
612 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
613 			le32_to_cpu(desc[0].data[1]);
614 	}
615 
616 	return 0;
617 }
618 
619 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
620 {
621 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622 	struct hclge_tqp *tqp;
623 	u64 *buff = data;
624 	int i;
625 
626 	for (i = 0; i < kinfo->num_tqps; i++) {
627 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
628 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
629 	}
630 
631 	for (i = 0; i < kinfo->num_tqps; i++) {
632 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
633 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
634 	}
635 
636 	return buff;
637 }
638 
639 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
640 {
641 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
642 
643 	/* each tqp has TX & RX two queues */
644 	return kinfo->num_tqps * (2);
645 }
646 
647 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
648 {
649 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
650 	u8 *buff = data;
651 	int i;
652 
653 	for (i = 0; i < kinfo->num_tqps; i++) {
654 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
655 			struct hclge_tqp, q);
656 		snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
657 			 tqp->index);
658 		buff = buff + ETH_GSTRING_LEN;
659 	}
660 
661 	for (i = 0; i < kinfo->num_tqps; i++) {
662 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
663 			struct hclge_tqp, q);
664 		snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
665 			 tqp->index);
666 		buff = buff + ETH_GSTRING_LEN;
667 	}
668 
669 	return buff;
670 }
671 
672 static u64 *hclge_comm_get_stats(const void *comm_stats,
673 				 const struct hclge_comm_stats_str strs[],
674 				 int size, u64 *data)
675 {
676 	u64 *buf = data;
677 	u32 i;
678 
679 	for (i = 0; i < size; i++)
680 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
681 
682 	return buf + size;
683 }
684 
685 static u8 *hclge_comm_get_strings(u32 stringset,
686 				  const struct hclge_comm_stats_str strs[],
687 				  int size, u8 *data)
688 {
689 	char *buff = (char *)data;
690 	u32 i;
691 
692 	if (stringset != ETH_SS_STATS)
693 		return buff;
694 
695 	for (i = 0; i < size; i++) {
696 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
697 		buff = buff + ETH_GSTRING_LEN;
698 	}
699 
700 	return (u8 *)buff;
701 }
702 
703 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
704 {
705 	struct hnae3_handle *handle;
706 	int status;
707 
708 	handle = &hdev->vport[0].nic;
709 	if (handle->client) {
710 		status = hclge_tqps_update_stats(handle);
711 		if (status) {
712 			dev_err(&hdev->pdev->dev,
713 				"Update TQPS stats fail, status = %d.\n",
714 				status);
715 		}
716 	}
717 
718 	status = hclge_mac_update_stats(hdev);
719 	if (status)
720 		dev_err(&hdev->pdev->dev,
721 			"Update MAC stats fail, status = %d.\n", status);
722 }
723 
724 static void hclge_update_stats(struct hnae3_handle *handle,
725 			       struct net_device_stats *net_stats)
726 {
727 	struct hclge_vport *vport = hclge_get_vport(handle);
728 	struct hclge_dev *hdev = vport->back;
729 	int status;
730 
731 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
732 		return;
733 
734 	status = hclge_mac_update_stats(hdev);
735 	if (status)
736 		dev_err(&hdev->pdev->dev,
737 			"Update MAC stats fail, status = %d.\n",
738 			status);
739 
740 	status = hclge_tqps_update_stats(handle);
741 	if (status)
742 		dev_err(&hdev->pdev->dev,
743 			"Update TQPS stats fail, status = %d.\n",
744 			status);
745 
746 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
747 }
748 
749 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
750 {
751 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
752 		HNAE3_SUPPORT_PHY_LOOPBACK |\
753 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
754 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
755 
756 	struct hclge_vport *vport = hclge_get_vport(handle);
757 	struct hclge_dev *hdev = vport->back;
758 	int count = 0;
759 
760 	/* Loopback test support rules:
761 	 * mac: only GE mode support
762 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
763 	 * phy: only support when phy device exist on board
764 	 */
765 	if (stringset == ETH_SS_TEST) {
766 		/* clear loopback bit flags at first */
767 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
768 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
769 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
770 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
771 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
772 			count += 1;
773 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
774 		}
775 
776 		count += 2;
777 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
778 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
779 
780 		if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
781 		     hdev->hw.mac.phydev->drv->set_loopback) ||
782 		    hnae3_dev_phy_imp_supported(hdev)) {
783 			count += 1;
784 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
785 		}
786 	} else if (stringset == ETH_SS_STATS) {
787 		count = ARRAY_SIZE(g_mac_stats_string) +
788 			hclge_tqps_get_sset_count(handle, stringset);
789 	}
790 
791 	return count;
792 }
793 
794 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
795 			      u8 *data)
796 {
797 	u8 *p = (char *)data;
798 	int size;
799 
800 	if (stringset == ETH_SS_STATS) {
801 		size = ARRAY_SIZE(g_mac_stats_string);
802 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
803 					   size, p);
804 		p = hclge_tqps_get_strings(handle, p);
805 	} else if (stringset == ETH_SS_TEST) {
806 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
807 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
808 			       ETH_GSTRING_LEN);
809 			p += ETH_GSTRING_LEN;
810 		}
811 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
812 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
813 			       ETH_GSTRING_LEN);
814 			p += ETH_GSTRING_LEN;
815 		}
816 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
817 			memcpy(p,
818 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
819 			       ETH_GSTRING_LEN);
820 			p += ETH_GSTRING_LEN;
821 		}
822 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
823 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
824 			       ETH_GSTRING_LEN);
825 			p += ETH_GSTRING_LEN;
826 		}
827 	}
828 }
829 
830 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
831 {
832 	struct hclge_vport *vport = hclge_get_vport(handle);
833 	struct hclge_dev *hdev = vport->back;
834 	u64 *p;
835 
836 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
837 				 ARRAY_SIZE(g_mac_stats_string), data);
838 	p = hclge_tqps_get_stats(handle, p);
839 }
840 
841 static void hclge_get_mac_stat(struct hnae3_handle *handle,
842 			       struct hns3_mac_stats *mac_stats)
843 {
844 	struct hclge_vport *vport = hclge_get_vport(handle);
845 	struct hclge_dev *hdev = vport->back;
846 
847 	hclge_update_stats(handle, NULL);
848 
849 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
850 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
851 }
852 
853 static int hclge_parse_func_status(struct hclge_dev *hdev,
854 				   struct hclge_func_status_cmd *status)
855 {
856 #define HCLGE_MAC_ID_MASK	0xF
857 
858 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
859 		return -EINVAL;
860 
861 	/* Set the pf to main pf */
862 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
863 		hdev->flag |= HCLGE_FLAG_MAIN;
864 	else
865 		hdev->flag &= ~HCLGE_FLAG_MAIN;
866 
867 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
868 	return 0;
869 }
870 
871 static int hclge_query_function_status(struct hclge_dev *hdev)
872 {
873 #define HCLGE_QUERY_MAX_CNT	5
874 
875 	struct hclge_func_status_cmd *req;
876 	struct hclge_desc desc;
877 	int timeout = 0;
878 	int ret;
879 
880 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
881 	req = (struct hclge_func_status_cmd *)desc.data;
882 
883 	do {
884 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
885 		if (ret) {
886 			dev_err(&hdev->pdev->dev,
887 				"query function status failed %d.\n", ret);
888 			return ret;
889 		}
890 
891 		/* Check pf reset is done */
892 		if (req->pf_state)
893 			break;
894 		usleep_range(1000, 2000);
895 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
896 
897 	return hclge_parse_func_status(hdev, req);
898 }
899 
900 static int hclge_query_pf_resource(struct hclge_dev *hdev)
901 {
902 	struct hclge_pf_res_cmd *req;
903 	struct hclge_desc desc;
904 	int ret;
905 
906 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
907 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
908 	if (ret) {
909 		dev_err(&hdev->pdev->dev,
910 			"query pf resource failed %d.\n", ret);
911 		return ret;
912 	}
913 
914 	req = (struct hclge_pf_res_cmd *)desc.data;
915 	hdev->num_tqps = le16_to_cpu(req->tqp_num) +
916 			 le16_to_cpu(req->ext_tqp_num);
917 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
918 
919 	if (req->tx_buf_size)
920 		hdev->tx_buf_size =
921 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
922 	else
923 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
924 
925 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
926 
927 	if (req->dv_buf_size)
928 		hdev->dv_buf_size =
929 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
930 	else
931 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
932 
933 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
934 
935 	hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
936 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
937 		dev_err(&hdev->pdev->dev,
938 			"only %u msi resources available, not enough for pf(min:2).\n",
939 			hdev->num_nic_msi);
940 		return -EINVAL;
941 	}
942 
943 	if (hnae3_dev_roce_supported(hdev)) {
944 		hdev->num_roce_msi =
945 			le16_to_cpu(req->pf_intr_vector_number_roce);
946 
947 		/* PF should have NIC vectors and Roce vectors,
948 		 * NIC vectors are queued before Roce vectors.
949 		 */
950 		hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
951 	} else {
952 		hdev->num_msi = hdev->num_nic_msi;
953 	}
954 
955 	return 0;
956 }
957 
958 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
959 {
960 	switch (speed_cmd) {
961 	case 6:
962 		*speed = HCLGE_MAC_SPEED_10M;
963 		break;
964 	case 7:
965 		*speed = HCLGE_MAC_SPEED_100M;
966 		break;
967 	case 0:
968 		*speed = HCLGE_MAC_SPEED_1G;
969 		break;
970 	case 1:
971 		*speed = HCLGE_MAC_SPEED_10G;
972 		break;
973 	case 2:
974 		*speed = HCLGE_MAC_SPEED_25G;
975 		break;
976 	case 3:
977 		*speed = HCLGE_MAC_SPEED_40G;
978 		break;
979 	case 4:
980 		*speed = HCLGE_MAC_SPEED_50G;
981 		break;
982 	case 5:
983 		*speed = HCLGE_MAC_SPEED_100G;
984 		break;
985 	case 8:
986 		*speed = HCLGE_MAC_SPEED_200G;
987 		break;
988 	default:
989 		return -EINVAL;
990 	}
991 
992 	return 0;
993 }
994 
995 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
996 {
997 	struct hclge_vport *vport = hclge_get_vport(handle);
998 	struct hclge_dev *hdev = vport->back;
999 	u32 speed_ability = hdev->hw.mac.speed_ability;
1000 	u32 speed_bit = 0;
1001 
1002 	switch (speed) {
1003 	case HCLGE_MAC_SPEED_10M:
1004 		speed_bit = HCLGE_SUPPORT_10M_BIT;
1005 		break;
1006 	case HCLGE_MAC_SPEED_100M:
1007 		speed_bit = HCLGE_SUPPORT_100M_BIT;
1008 		break;
1009 	case HCLGE_MAC_SPEED_1G:
1010 		speed_bit = HCLGE_SUPPORT_1G_BIT;
1011 		break;
1012 	case HCLGE_MAC_SPEED_10G:
1013 		speed_bit = HCLGE_SUPPORT_10G_BIT;
1014 		break;
1015 	case HCLGE_MAC_SPEED_25G:
1016 		speed_bit = HCLGE_SUPPORT_25G_BIT;
1017 		break;
1018 	case HCLGE_MAC_SPEED_40G:
1019 		speed_bit = HCLGE_SUPPORT_40G_BIT;
1020 		break;
1021 	case HCLGE_MAC_SPEED_50G:
1022 		speed_bit = HCLGE_SUPPORT_50G_BIT;
1023 		break;
1024 	case HCLGE_MAC_SPEED_100G:
1025 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1026 		break;
1027 	case HCLGE_MAC_SPEED_200G:
1028 		speed_bit = HCLGE_SUPPORT_200G_BIT;
1029 		break;
1030 	default:
1031 		return -EINVAL;
1032 	}
1033 
1034 	if (speed_bit & speed_ability)
1035 		return 0;
1036 
1037 	return -EINVAL;
1038 }
1039 
1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1041 {
1042 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1044 				 mac->supported);
1045 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1047 				 mac->supported);
1048 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1050 				 mac->supported);
1051 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1053 				 mac->supported);
1054 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1056 				 mac->supported);
1057 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1059 				 mac->supported);
1060 }
1061 
1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1063 {
1064 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1066 				 mac->supported);
1067 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1069 				 mac->supported);
1070 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1072 				 mac->supported);
1073 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1075 				 mac->supported);
1076 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1078 				 mac->supported);
1079 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1080 		linkmode_set_bit(
1081 			ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1082 			mac->supported);
1083 }
1084 
1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1086 {
1087 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1089 				 mac->supported);
1090 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1092 				 mac->supported);
1093 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1095 				 mac->supported);
1096 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1098 				 mac->supported);
1099 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1101 				 mac->supported);
1102 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1104 				 mac->supported);
1105 }
1106 
1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1108 {
1109 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1111 				 mac->supported);
1112 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1114 				 mac->supported);
1115 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1117 				 mac->supported);
1118 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1120 				 mac->supported);
1121 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1123 				 mac->supported);
1124 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1126 				 mac->supported);
1127 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1129 				 mac->supported);
1130 }
1131 
1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1133 {
1134 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1136 
1137 	switch (mac->speed) {
1138 	case HCLGE_MAC_SPEED_10G:
1139 	case HCLGE_MAC_SPEED_40G:
1140 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1141 				 mac->supported);
1142 		mac->fec_ability =
1143 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1144 		break;
1145 	case HCLGE_MAC_SPEED_25G:
1146 	case HCLGE_MAC_SPEED_50G:
1147 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1148 				 mac->supported);
1149 		mac->fec_ability =
1150 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151 			BIT(HNAE3_FEC_AUTO);
1152 		break;
1153 	case HCLGE_MAC_SPEED_100G:
1154 	case HCLGE_MAC_SPEED_200G:
1155 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1157 		break;
1158 	default:
1159 		mac->fec_ability = 0;
1160 		break;
1161 	}
1162 }
1163 
1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1165 					u16 speed_ability)
1166 {
1167 	struct hclge_mac *mac = &hdev->hw.mac;
1168 
1169 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1171 				 mac->supported);
1172 
1173 	hclge_convert_setting_sr(mac, speed_ability);
1174 	hclge_convert_setting_lr(mac, speed_ability);
1175 	hclge_convert_setting_cr(mac, speed_ability);
1176 	if (hnae3_dev_fec_supported(hdev))
1177 		hclge_convert_setting_fec(mac);
1178 
1179 	if (hnae3_dev_pause_supported(hdev))
1180 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1181 
1182 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1184 }
1185 
1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1187 					    u16 speed_ability)
1188 {
1189 	struct hclge_mac *mac = &hdev->hw.mac;
1190 
1191 	hclge_convert_setting_kr(mac, speed_ability);
1192 	if (hnae3_dev_fec_supported(hdev))
1193 		hclge_convert_setting_fec(mac);
1194 
1195 	if (hnae3_dev_pause_supported(hdev))
1196 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1197 
1198 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1200 }
1201 
1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1203 					 u16 speed_ability)
1204 {
1205 	unsigned long *supported = hdev->hw.mac.supported;
1206 
1207 	/* default to support all speed for GE port */
1208 	if (!speed_ability)
1209 		speed_ability = HCLGE_SUPPORT_GE;
1210 
1211 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1213 				 supported);
1214 
1215 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1217 				 supported);
1218 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1219 				 supported);
1220 	}
1221 
1222 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1225 	}
1226 
1227 	if (hnae3_dev_pause_supported(hdev)) {
1228 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229 		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1230 	}
1231 
1232 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1234 }
1235 
1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1237 {
1238 	u8 media_type = hdev->hw.mac.media_type;
1239 
1240 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1242 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243 		hclge_parse_copper_link_mode(hdev, speed_ability);
1244 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1246 }
1247 
1248 static u32 hclge_get_max_speed(u16 speed_ability)
1249 {
1250 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251 		return HCLGE_MAC_SPEED_200G;
1252 
1253 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254 		return HCLGE_MAC_SPEED_100G;
1255 
1256 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257 		return HCLGE_MAC_SPEED_50G;
1258 
1259 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260 		return HCLGE_MAC_SPEED_40G;
1261 
1262 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263 		return HCLGE_MAC_SPEED_25G;
1264 
1265 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266 		return HCLGE_MAC_SPEED_10G;
1267 
1268 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269 		return HCLGE_MAC_SPEED_1G;
1270 
1271 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272 		return HCLGE_MAC_SPEED_100M;
1273 
1274 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275 		return HCLGE_MAC_SPEED_10M;
1276 
1277 	return HCLGE_MAC_SPEED_1G;
1278 }
1279 
1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1281 {
1282 #define SPEED_ABILITY_EXT_SHIFT			8
1283 
1284 	struct hclge_cfg_param_cmd *req;
1285 	u64 mac_addr_tmp_high;
1286 	u16 speed_ability_ext;
1287 	u64 mac_addr_tmp;
1288 	unsigned int i;
1289 
1290 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1291 
1292 	/* get the configuration */
1293 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1294 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1295 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1296 					    HCLGE_CFG_TQP_DESC_N_M,
1297 					    HCLGE_CFG_TQP_DESC_N_S);
1298 
1299 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1300 					HCLGE_CFG_PHY_ADDR_M,
1301 					HCLGE_CFG_PHY_ADDR_S);
1302 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1303 					  HCLGE_CFG_MEDIA_TP_M,
1304 					  HCLGE_CFG_MEDIA_TP_S);
1305 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1306 					  HCLGE_CFG_RX_BUF_LEN_M,
1307 					  HCLGE_CFG_RX_BUF_LEN_S);
1308 	/* get mac_address */
1309 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1310 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1311 					    HCLGE_CFG_MAC_ADDR_H_M,
1312 					    HCLGE_CFG_MAC_ADDR_H_S);
1313 
1314 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1315 
1316 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1317 					     HCLGE_CFG_DEFAULT_SPEED_M,
1318 					     HCLGE_CFG_DEFAULT_SPEED_S);
1319 	cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1320 					       HCLGE_CFG_RSS_SIZE_M,
1321 					       HCLGE_CFG_RSS_SIZE_S);
1322 
1323 	for (i = 0; i < ETH_ALEN; i++)
1324 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1325 
1326 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1327 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1328 
1329 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1330 					     HCLGE_CFG_SPEED_ABILITY_M,
1331 					     HCLGE_CFG_SPEED_ABILITY_S);
1332 	speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1333 					    HCLGE_CFG_SPEED_ABILITY_EXT_M,
1334 					    HCLGE_CFG_SPEED_ABILITY_EXT_S);
1335 	cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1336 
1337 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1338 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1339 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1340 	if (!cfg->umv_space)
1341 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1342 
1343 	cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1344 					       HCLGE_CFG_PF_RSS_SIZE_M,
1345 					       HCLGE_CFG_PF_RSS_SIZE_S);
1346 
1347 	/* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1348 	 * power of 2, instead of reading out directly. This would
1349 	 * be more flexible for future changes and expansions.
1350 	 * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1351 	 * it does not make sense if PF's field is 0. In this case, PF and VF
1352 	 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1353 	 */
1354 	cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1355 			       1U << cfg->pf_rss_size_max :
1356 			       cfg->vf_rss_size_max;
1357 }
1358 
1359 /* hclge_get_cfg: query the static parameter from flash
1360  * @hdev: pointer to struct hclge_dev
1361  * @hcfg: the config structure to be getted
1362  */
1363 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1364 {
1365 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1366 	struct hclge_cfg_param_cmd *req;
1367 	unsigned int i;
1368 	int ret;
1369 
1370 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1371 		u32 offset = 0;
1372 
1373 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1374 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1375 					   true);
1376 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1377 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1378 		/* Len should be united by 4 bytes when send to hardware */
1379 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1380 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1381 		req->offset = cpu_to_le32(offset);
1382 	}
1383 
1384 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1385 	if (ret) {
1386 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1387 		return ret;
1388 	}
1389 
1390 	hclge_parse_cfg(hcfg, desc);
1391 
1392 	return 0;
1393 }
1394 
1395 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1396 {
1397 #define HCLGE_MAX_NON_TSO_BD_NUM			8U
1398 
1399 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1400 
1401 	ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1402 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1403 	ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1404 	ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1405 	ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1406 	ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1407 	ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1408 }
1409 
1410 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1411 				  struct hclge_desc *desc)
1412 {
1413 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1414 	struct hclge_dev_specs_0_cmd *req0;
1415 	struct hclge_dev_specs_1_cmd *req1;
1416 
1417 	req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1418 	req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1419 
1420 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1421 	ae_dev->dev_specs.rss_ind_tbl_size =
1422 		le16_to_cpu(req0->rss_ind_tbl_size);
1423 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1424 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1425 	ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1426 	ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1427 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1428 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1429 }
1430 
1431 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1432 {
1433 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1434 
1435 	if (!dev_specs->max_non_tso_bd_num)
1436 		dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1437 	if (!dev_specs->rss_ind_tbl_size)
1438 		dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1439 	if (!dev_specs->rss_key_size)
1440 		dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1441 	if (!dev_specs->max_tm_rate)
1442 		dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1443 	if (!dev_specs->max_qset_num)
1444 		dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1445 	if (!dev_specs->max_int_gl)
1446 		dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1447 	if (!dev_specs->max_frm_size)
1448 		dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1449 }
1450 
1451 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1452 {
1453 	struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1454 	int ret;
1455 	int i;
1456 
1457 	/* set default specifications as devices lower than version V3 do not
1458 	 * support querying specifications from firmware.
1459 	 */
1460 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1461 		hclge_set_default_dev_specs(hdev);
1462 		return 0;
1463 	}
1464 
1465 	for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1466 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1467 					   true);
1468 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1469 	}
1470 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1471 
1472 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1473 	if (ret)
1474 		return ret;
1475 
1476 	hclge_parse_dev_specs(hdev, desc);
1477 	hclge_check_dev_specs(hdev);
1478 
1479 	return 0;
1480 }
1481 
1482 static int hclge_get_cap(struct hclge_dev *hdev)
1483 {
1484 	int ret;
1485 
1486 	ret = hclge_query_function_status(hdev);
1487 	if (ret) {
1488 		dev_err(&hdev->pdev->dev,
1489 			"query function status error %d.\n", ret);
1490 		return ret;
1491 	}
1492 
1493 	/* get pf resource */
1494 	return hclge_query_pf_resource(hdev);
1495 }
1496 
1497 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1498 {
1499 #define HCLGE_MIN_TX_DESC	64
1500 #define HCLGE_MIN_RX_DESC	64
1501 
1502 	if (!is_kdump_kernel())
1503 		return;
1504 
1505 	dev_info(&hdev->pdev->dev,
1506 		 "Running kdump kernel. Using minimal resources\n");
1507 
1508 	/* minimal queue pairs equals to the number of vports */
1509 	hdev->num_tqps = hdev->num_req_vfs + 1;
1510 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1511 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1512 }
1513 
1514 static int hclge_configure(struct hclge_dev *hdev)
1515 {
1516 	struct hclge_cfg cfg;
1517 	unsigned int i;
1518 	int ret;
1519 
1520 	ret = hclge_get_cfg(hdev, &cfg);
1521 	if (ret)
1522 		return ret;
1523 
1524 	hdev->base_tqp_pid = 0;
1525 	hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1526 	hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1527 	hdev->rx_buf_len = cfg.rx_buf_len;
1528 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1529 	hdev->hw.mac.media_type = cfg.media_type;
1530 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1531 	hdev->num_tx_desc = cfg.tqp_desc_num;
1532 	hdev->num_rx_desc = cfg.tqp_desc_num;
1533 	hdev->tm_info.num_pg = 1;
1534 	hdev->tc_max = cfg.tc_num;
1535 	hdev->tm_info.hw_pfc_map = 0;
1536 	hdev->wanted_umv_size = cfg.umv_space;
1537 
1538 	if (hnae3_dev_fd_supported(hdev)) {
1539 		hdev->fd_en = true;
1540 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1541 	}
1542 
1543 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1544 	if (ret) {
1545 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1546 			cfg.default_speed, ret);
1547 		return ret;
1548 	}
1549 
1550 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1551 
1552 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1553 
1554 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1555 	    (hdev->tc_max < 1)) {
1556 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1557 			 hdev->tc_max);
1558 		hdev->tc_max = 1;
1559 	}
1560 
1561 	/* Dev does not support DCB */
1562 	if (!hnae3_dev_dcb_supported(hdev)) {
1563 		hdev->tc_max = 1;
1564 		hdev->pfc_max = 0;
1565 	} else {
1566 		hdev->pfc_max = hdev->tc_max;
1567 	}
1568 
1569 	hdev->tm_info.num_tc = 1;
1570 
1571 	/* Currently not support uncontiuous tc */
1572 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1573 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1574 
1575 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1576 
1577 	hclge_init_kdump_kernel_config(hdev);
1578 
1579 	/* Set the init affinity based on pci func number */
1580 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1581 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1582 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1583 			&hdev->affinity_mask);
1584 
1585 	return ret;
1586 }
1587 
1588 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1589 			    u16 tso_mss_max)
1590 {
1591 	struct hclge_cfg_tso_status_cmd *req;
1592 	struct hclge_desc desc;
1593 
1594 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1595 
1596 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1597 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1598 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1599 
1600 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1601 }
1602 
1603 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1604 {
1605 	struct hclge_cfg_gro_status_cmd *req;
1606 	struct hclge_desc desc;
1607 	int ret;
1608 
1609 	if (!hnae3_dev_gro_supported(hdev))
1610 		return 0;
1611 
1612 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1613 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1614 
1615 	req->gro_en = en ? 1 : 0;
1616 
1617 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1618 	if (ret)
1619 		dev_err(&hdev->pdev->dev,
1620 			"GRO hardware config cmd failed, ret = %d\n", ret);
1621 
1622 	return ret;
1623 }
1624 
1625 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1626 {
1627 	struct hclge_tqp *tqp;
1628 	int i;
1629 
1630 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1631 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1632 	if (!hdev->htqp)
1633 		return -ENOMEM;
1634 
1635 	tqp = hdev->htqp;
1636 
1637 	for (i = 0; i < hdev->num_tqps; i++) {
1638 		tqp->dev = &hdev->pdev->dev;
1639 		tqp->index = i;
1640 
1641 		tqp->q.ae_algo = &ae_algo;
1642 		tqp->q.buf_size = hdev->rx_buf_len;
1643 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1644 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1645 
1646 		/* need an extended offset to configure queues >=
1647 		 * HCLGE_TQP_MAX_SIZE_DEV_V2
1648 		 */
1649 		if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1650 			tqp->q.io_base = hdev->hw.io_base +
1651 					 HCLGE_TQP_REG_OFFSET +
1652 					 i * HCLGE_TQP_REG_SIZE;
1653 		else
1654 			tqp->q.io_base = hdev->hw.io_base +
1655 					 HCLGE_TQP_REG_OFFSET +
1656 					 HCLGE_TQP_EXT_REG_OFFSET +
1657 					 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1658 					 HCLGE_TQP_REG_SIZE;
1659 
1660 		tqp++;
1661 	}
1662 
1663 	return 0;
1664 }
1665 
1666 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1667 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1668 {
1669 	struct hclge_tqp_map_cmd *req;
1670 	struct hclge_desc desc;
1671 	int ret;
1672 
1673 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1674 
1675 	req = (struct hclge_tqp_map_cmd *)desc.data;
1676 	req->tqp_id = cpu_to_le16(tqp_pid);
1677 	req->tqp_vf = func_id;
1678 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1679 	if (!is_pf)
1680 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1681 	req->tqp_vid = cpu_to_le16(tqp_vid);
1682 
1683 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1684 	if (ret)
1685 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1686 
1687 	return ret;
1688 }
1689 
1690 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1691 {
1692 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1693 	struct hclge_dev *hdev = vport->back;
1694 	int i, alloced;
1695 
1696 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1697 	     alloced < num_tqps; i++) {
1698 		if (!hdev->htqp[i].alloced) {
1699 			hdev->htqp[i].q.handle = &vport->nic;
1700 			hdev->htqp[i].q.tqp_index = alloced;
1701 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1702 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1703 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1704 			hdev->htqp[i].alloced = true;
1705 			alloced++;
1706 		}
1707 	}
1708 	vport->alloc_tqps = alloced;
1709 	kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1710 				vport->alloc_tqps / hdev->tm_info.num_tc);
1711 
1712 	/* ensure one to one mapping between irq and queue at default */
1713 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1714 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1715 
1716 	return 0;
1717 }
1718 
1719 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1720 			    u16 num_tx_desc, u16 num_rx_desc)
1721 
1722 {
1723 	struct hnae3_handle *nic = &vport->nic;
1724 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1725 	struct hclge_dev *hdev = vport->back;
1726 	int ret;
1727 
1728 	kinfo->num_tx_desc = num_tx_desc;
1729 	kinfo->num_rx_desc = num_rx_desc;
1730 
1731 	kinfo->rx_buf_len = hdev->rx_buf_len;
1732 
1733 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1734 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1735 	if (!kinfo->tqp)
1736 		return -ENOMEM;
1737 
1738 	ret = hclge_assign_tqp(vport, num_tqps);
1739 	if (ret)
1740 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1741 
1742 	return ret;
1743 }
1744 
1745 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1746 				  struct hclge_vport *vport)
1747 {
1748 	struct hnae3_handle *nic = &vport->nic;
1749 	struct hnae3_knic_private_info *kinfo;
1750 	u16 i;
1751 
1752 	kinfo = &nic->kinfo;
1753 	for (i = 0; i < vport->alloc_tqps; i++) {
1754 		struct hclge_tqp *q =
1755 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1756 		bool is_pf;
1757 		int ret;
1758 
1759 		is_pf = !(vport->vport_id);
1760 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1761 					     i, is_pf);
1762 		if (ret)
1763 			return ret;
1764 	}
1765 
1766 	return 0;
1767 }
1768 
1769 static int hclge_map_tqp(struct hclge_dev *hdev)
1770 {
1771 	struct hclge_vport *vport = hdev->vport;
1772 	u16 i, num_vport;
1773 
1774 	num_vport = hdev->num_req_vfs + 1;
1775 	for (i = 0; i < num_vport; i++)	{
1776 		int ret;
1777 
1778 		ret = hclge_map_tqp_to_vport(hdev, vport);
1779 		if (ret)
1780 			return ret;
1781 
1782 		vport++;
1783 	}
1784 
1785 	return 0;
1786 }
1787 
1788 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1789 {
1790 	struct hnae3_handle *nic = &vport->nic;
1791 	struct hclge_dev *hdev = vport->back;
1792 	int ret;
1793 
1794 	nic->pdev = hdev->pdev;
1795 	nic->ae_algo = &ae_algo;
1796 	nic->numa_node_mask = hdev->numa_node_mask;
1797 
1798 	ret = hclge_knic_setup(vport, num_tqps,
1799 			       hdev->num_tx_desc, hdev->num_rx_desc);
1800 	if (ret)
1801 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1802 
1803 	return ret;
1804 }
1805 
1806 static int hclge_alloc_vport(struct hclge_dev *hdev)
1807 {
1808 	struct pci_dev *pdev = hdev->pdev;
1809 	struct hclge_vport *vport;
1810 	u32 tqp_main_vport;
1811 	u32 tqp_per_vport;
1812 	int num_vport, i;
1813 	int ret;
1814 
1815 	/* We need to alloc a vport for main NIC of PF */
1816 	num_vport = hdev->num_req_vfs + 1;
1817 
1818 	if (hdev->num_tqps < num_vport) {
1819 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1820 			hdev->num_tqps, num_vport);
1821 		return -EINVAL;
1822 	}
1823 
1824 	/* Alloc the same number of TQPs for every vport */
1825 	tqp_per_vport = hdev->num_tqps / num_vport;
1826 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1827 
1828 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1829 			     GFP_KERNEL);
1830 	if (!vport)
1831 		return -ENOMEM;
1832 
1833 	hdev->vport = vport;
1834 	hdev->num_alloc_vport = num_vport;
1835 
1836 	if (IS_ENABLED(CONFIG_PCI_IOV))
1837 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1838 
1839 	for (i = 0; i < num_vport; i++) {
1840 		vport->back = hdev;
1841 		vport->vport_id = i;
1842 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1843 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1844 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1845 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1846 		INIT_LIST_HEAD(&vport->vlan_list);
1847 		INIT_LIST_HEAD(&vport->uc_mac_list);
1848 		INIT_LIST_HEAD(&vport->mc_mac_list);
1849 		spin_lock_init(&vport->mac_list_lock);
1850 
1851 		if (i == 0)
1852 			ret = hclge_vport_setup(vport, tqp_main_vport);
1853 		else
1854 			ret = hclge_vport_setup(vport, tqp_per_vport);
1855 		if (ret) {
1856 			dev_err(&pdev->dev,
1857 				"vport setup failed for vport %d, %d\n",
1858 				i, ret);
1859 			return ret;
1860 		}
1861 
1862 		vport++;
1863 	}
1864 
1865 	return 0;
1866 }
1867 
1868 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1869 				    struct hclge_pkt_buf_alloc *buf_alloc)
1870 {
1871 /* TX buffer size is unit by 128 byte */
1872 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1873 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1874 	struct hclge_tx_buff_alloc_cmd *req;
1875 	struct hclge_desc desc;
1876 	int ret;
1877 	u8 i;
1878 
1879 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1880 
1881 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1882 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1883 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1884 
1885 		req->tx_pkt_buff[i] =
1886 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1887 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1888 	}
1889 
1890 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1891 	if (ret)
1892 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1893 			ret);
1894 
1895 	return ret;
1896 }
1897 
1898 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1899 				 struct hclge_pkt_buf_alloc *buf_alloc)
1900 {
1901 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1902 
1903 	if (ret)
1904 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1905 
1906 	return ret;
1907 }
1908 
1909 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1910 {
1911 	unsigned int i;
1912 	u32 cnt = 0;
1913 
1914 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1915 		if (hdev->hw_tc_map & BIT(i))
1916 			cnt++;
1917 	return cnt;
1918 }
1919 
1920 /* Get the number of pfc enabled TCs, which have private buffer */
1921 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1922 				  struct hclge_pkt_buf_alloc *buf_alloc)
1923 {
1924 	struct hclge_priv_buf *priv;
1925 	unsigned int i;
1926 	int cnt = 0;
1927 
1928 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1929 		priv = &buf_alloc->priv_buf[i];
1930 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1931 		    priv->enable)
1932 			cnt++;
1933 	}
1934 
1935 	return cnt;
1936 }
1937 
1938 /* Get the number of pfc disabled TCs, which have private buffer */
1939 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1940 				     struct hclge_pkt_buf_alloc *buf_alloc)
1941 {
1942 	struct hclge_priv_buf *priv;
1943 	unsigned int i;
1944 	int cnt = 0;
1945 
1946 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1947 		priv = &buf_alloc->priv_buf[i];
1948 		if (hdev->hw_tc_map & BIT(i) &&
1949 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1950 		    priv->enable)
1951 			cnt++;
1952 	}
1953 
1954 	return cnt;
1955 }
1956 
1957 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1958 {
1959 	struct hclge_priv_buf *priv;
1960 	u32 rx_priv = 0;
1961 	int i;
1962 
1963 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1964 		priv = &buf_alloc->priv_buf[i];
1965 		if (priv->enable)
1966 			rx_priv += priv->buf_size;
1967 	}
1968 	return rx_priv;
1969 }
1970 
1971 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1972 {
1973 	u32 i, total_tx_size = 0;
1974 
1975 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1976 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1977 
1978 	return total_tx_size;
1979 }
1980 
1981 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1982 				struct hclge_pkt_buf_alloc *buf_alloc,
1983 				u32 rx_all)
1984 {
1985 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1986 	u32 tc_num = hclge_get_tc_num(hdev);
1987 	u32 shared_buf, aligned_mps;
1988 	u32 rx_priv;
1989 	int i;
1990 
1991 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1992 
1993 	if (hnae3_dev_dcb_supported(hdev))
1994 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1995 					hdev->dv_buf_size;
1996 	else
1997 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1998 					+ hdev->dv_buf_size;
1999 
2000 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2001 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2002 			     HCLGE_BUF_SIZE_UNIT);
2003 
2004 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2005 	if (rx_all < rx_priv + shared_std)
2006 		return false;
2007 
2008 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2009 	buf_alloc->s_buf.buf_size = shared_buf;
2010 	if (hnae3_dev_dcb_supported(hdev)) {
2011 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2012 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2013 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2014 				  HCLGE_BUF_SIZE_UNIT);
2015 	} else {
2016 		buf_alloc->s_buf.self.high = aligned_mps +
2017 						HCLGE_NON_DCB_ADDITIONAL_BUF;
2018 		buf_alloc->s_buf.self.low = aligned_mps;
2019 	}
2020 
2021 	if (hnae3_dev_dcb_supported(hdev)) {
2022 		hi_thrd = shared_buf - hdev->dv_buf_size;
2023 
2024 		if (tc_num <= NEED_RESERVE_TC_NUM)
2025 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2026 					/ BUF_MAX_PERCENT;
2027 
2028 		if (tc_num)
2029 			hi_thrd = hi_thrd / tc_num;
2030 
2031 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2032 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2033 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2034 	} else {
2035 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2036 		lo_thrd = aligned_mps;
2037 	}
2038 
2039 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2040 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2041 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2042 	}
2043 
2044 	return true;
2045 }
2046 
2047 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2048 				struct hclge_pkt_buf_alloc *buf_alloc)
2049 {
2050 	u32 i, total_size;
2051 
2052 	total_size = hdev->pkt_buf_size;
2053 
2054 	/* alloc tx buffer for all enabled tc */
2055 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2056 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2057 
2058 		if (hdev->hw_tc_map & BIT(i)) {
2059 			if (total_size < hdev->tx_buf_size)
2060 				return -ENOMEM;
2061 
2062 			priv->tx_buf_size = hdev->tx_buf_size;
2063 		} else {
2064 			priv->tx_buf_size = 0;
2065 		}
2066 
2067 		total_size -= priv->tx_buf_size;
2068 	}
2069 
2070 	return 0;
2071 }
2072 
2073 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2074 				  struct hclge_pkt_buf_alloc *buf_alloc)
2075 {
2076 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2077 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2078 	unsigned int i;
2079 
2080 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2081 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2082 
2083 		priv->enable = 0;
2084 		priv->wl.low = 0;
2085 		priv->wl.high = 0;
2086 		priv->buf_size = 0;
2087 
2088 		if (!(hdev->hw_tc_map & BIT(i)))
2089 			continue;
2090 
2091 		priv->enable = 1;
2092 
2093 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2094 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2095 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
2096 						HCLGE_BUF_SIZE_UNIT);
2097 		} else {
2098 			priv->wl.low = 0;
2099 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2100 					aligned_mps;
2101 		}
2102 
2103 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2104 	}
2105 
2106 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2107 }
2108 
2109 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2110 					  struct hclge_pkt_buf_alloc *buf_alloc)
2111 {
2112 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2113 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2114 	int i;
2115 
2116 	/* let the last to be cleared first */
2117 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2118 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2119 		unsigned int mask = BIT((unsigned int)i);
2120 
2121 		if (hdev->hw_tc_map & mask &&
2122 		    !(hdev->tm_info.hw_pfc_map & mask)) {
2123 			/* Clear the no pfc TC private buffer */
2124 			priv->wl.low = 0;
2125 			priv->wl.high = 0;
2126 			priv->buf_size = 0;
2127 			priv->enable = 0;
2128 			no_pfc_priv_num--;
2129 		}
2130 
2131 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2132 		    no_pfc_priv_num == 0)
2133 			break;
2134 	}
2135 
2136 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2137 }
2138 
2139 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2140 					struct hclge_pkt_buf_alloc *buf_alloc)
2141 {
2142 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2143 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2144 	int i;
2145 
2146 	/* let the last to be cleared first */
2147 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2148 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2149 		unsigned int mask = BIT((unsigned int)i);
2150 
2151 		if (hdev->hw_tc_map & mask &&
2152 		    hdev->tm_info.hw_pfc_map & mask) {
2153 			/* Reduce the number of pfc TC with private buffer */
2154 			priv->wl.low = 0;
2155 			priv->enable = 0;
2156 			priv->wl.high = 0;
2157 			priv->buf_size = 0;
2158 			pfc_priv_num--;
2159 		}
2160 
2161 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2162 		    pfc_priv_num == 0)
2163 			break;
2164 	}
2165 
2166 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2167 }
2168 
2169 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2170 				      struct hclge_pkt_buf_alloc *buf_alloc)
2171 {
2172 #define COMPENSATE_BUFFER	0x3C00
2173 #define COMPENSATE_HALF_MPS_NUM	5
2174 #define PRIV_WL_GAP		0x1800
2175 
2176 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2177 	u32 tc_num = hclge_get_tc_num(hdev);
2178 	u32 half_mps = hdev->mps >> 1;
2179 	u32 min_rx_priv;
2180 	unsigned int i;
2181 
2182 	if (tc_num)
2183 		rx_priv = rx_priv / tc_num;
2184 
2185 	if (tc_num <= NEED_RESERVE_TC_NUM)
2186 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2187 
2188 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2189 			COMPENSATE_HALF_MPS_NUM * half_mps;
2190 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2191 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2192 	if (rx_priv < min_rx_priv)
2193 		return false;
2194 
2195 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2196 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2197 
2198 		priv->enable = 0;
2199 		priv->wl.low = 0;
2200 		priv->wl.high = 0;
2201 		priv->buf_size = 0;
2202 
2203 		if (!(hdev->hw_tc_map & BIT(i)))
2204 			continue;
2205 
2206 		priv->enable = 1;
2207 		priv->buf_size = rx_priv;
2208 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2209 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2210 	}
2211 
2212 	buf_alloc->s_buf.buf_size = 0;
2213 
2214 	return true;
2215 }
2216 
2217 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2218  * @hdev: pointer to struct hclge_dev
2219  * @buf_alloc: pointer to buffer calculation data
2220  * @return: 0: calculate successful, negative: fail
2221  */
2222 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2223 				struct hclge_pkt_buf_alloc *buf_alloc)
2224 {
2225 	/* When DCB is not supported, rx private buffer is not allocated. */
2226 	if (!hnae3_dev_dcb_supported(hdev)) {
2227 		u32 rx_all = hdev->pkt_buf_size;
2228 
2229 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2230 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2231 			return -ENOMEM;
2232 
2233 		return 0;
2234 	}
2235 
2236 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2237 		return 0;
2238 
2239 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2240 		return 0;
2241 
2242 	/* try to decrease the buffer size */
2243 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2244 		return 0;
2245 
2246 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2247 		return 0;
2248 
2249 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2250 		return 0;
2251 
2252 	return -ENOMEM;
2253 }
2254 
2255 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2256 				   struct hclge_pkt_buf_alloc *buf_alloc)
2257 {
2258 	struct hclge_rx_priv_buff_cmd *req;
2259 	struct hclge_desc desc;
2260 	int ret;
2261 	int i;
2262 
2263 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2264 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2265 
2266 	/* Alloc private buffer TCs */
2267 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2268 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2269 
2270 		req->buf_num[i] =
2271 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2272 		req->buf_num[i] |=
2273 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2274 	}
2275 
2276 	req->shared_buf =
2277 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2278 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2279 
2280 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2281 	if (ret)
2282 		dev_err(&hdev->pdev->dev,
2283 			"rx private buffer alloc cmd failed %d\n", ret);
2284 
2285 	return ret;
2286 }
2287 
2288 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2289 				   struct hclge_pkt_buf_alloc *buf_alloc)
2290 {
2291 	struct hclge_rx_priv_wl_buf *req;
2292 	struct hclge_priv_buf *priv;
2293 	struct hclge_desc desc[2];
2294 	int i, j;
2295 	int ret;
2296 
2297 	for (i = 0; i < 2; i++) {
2298 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2299 					   false);
2300 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2301 
2302 		/* The first descriptor set the NEXT bit to 1 */
2303 		if (i == 0)
2304 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2305 		else
2306 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2307 
2308 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2309 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2310 
2311 			priv = &buf_alloc->priv_buf[idx];
2312 			req->tc_wl[j].high =
2313 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2314 			req->tc_wl[j].high |=
2315 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2316 			req->tc_wl[j].low =
2317 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2318 			req->tc_wl[j].low |=
2319 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2320 		}
2321 	}
2322 
2323 	/* Send 2 descriptor at one time */
2324 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2325 	if (ret)
2326 		dev_err(&hdev->pdev->dev,
2327 			"rx private waterline config cmd failed %d\n",
2328 			ret);
2329 	return ret;
2330 }
2331 
2332 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2333 				    struct hclge_pkt_buf_alloc *buf_alloc)
2334 {
2335 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2336 	struct hclge_rx_com_thrd *req;
2337 	struct hclge_desc desc[2];
2338 	struct hclge_tc_thrd *tc;
2339 	int i, j;
2340 	int ret;
2341 
2342 	for (i = 0; i < 2; i++) {
2343 		hclge_cmd_setup_basic_desc(&desc[i],
2344 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2345 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2346 
2347 		/* The first descriptor set the NEXT bit to 1 */
2348 		if (i == 0)
2349 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2350 		else
2351 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2352 
2353 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2354 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2355 
2356 			req->com_thrd[j].high =
2357 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2358 			req->com_thrd[j].high |=
2359 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2360 			req->com_thrd[j].low =
2361 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2362 			req->com_thrd[j].low |=
2363 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2364 		}
2365 	}
2366 
2367 	/* Send 2 descriptors at one time */
2368 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2369 	if (ret)
2370 		dev_err(&hdev->pdev->dev,
2371 			"common threshold config cmd failed %d\n", ret);
2372 	return ret;
2373 }
2374 
2375 static int hclge_common_wl_config(struct hclge_dev *hdev,
2376 				  struct hclge_pkt_buf_alloc *buf_alloc)
2377 {
2378 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2379 	struct hclge_rx_com_wl *req;
2380 	struct hclge_desc desc;
2381 	int ret;
2382 
2383 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2384 
2385 	req = (struct hclge_rx_com_wl *)desc.data;
2386 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2387 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2388 
2389 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2390 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2391 
2392 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2393 	if (ret)
2394 		dev_err(&hdev->pdev->dev,
2395 			"common waterline config cmd failed %d\n", ret);
2396 
2397 	return ret;
2398 }
2399 
2400 int hclge_buffer_alloc(struct hclge_dev *hdev)
2401 {
2402 	struct hclge_pkt_buf_alloc *pkt_buf;
2403 	int ret;
2404 
2405 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2406 	if (!pkt_buf)
2407 		return -ENOMEM;
2408 
2409 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2410 	if (ret) {
2411 		dev_err(&hdev->pdev->dev,
2412 			"could not calc tx buffer size for all TCs %d\n", ret);
2413 		goto out;
2414 	}
2415 
2416 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2417 	if (ret) {
2418 		dev_err(&hdev->pdev->dev,
2419 			"could not alloc tx buffers %d\n", ret);
2420 		goto out;
2421 	}
2422 
2423 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2424 	if (ret) {
2425 		dev_err(&hdev->pdev->dev,
2426 			"could not calc rx priv buffer size for all TCs %d\n",
2427 			ret);
2428 		goto out;
2429 	}
2430 
2431 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2432 	if (ret) {
2433 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2434 			ret);
2435 		goto out;
2436 	}
2437 
2438 	if (hnae3_dev_dcb_supported(hdev)) {
2439 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2440 		if (ret) {
2441 			dev_err(&hdev->pdev->dev,
2442 				"could not configure rx private waterline %d\n",
2443 				ret);
2444 			goto out;
2445 		}
2446 
2447 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2448 		if (ret) {
2449 			dev_err(&hdev->pdev->dev,
2450 				"could not configure common threshold %d\n",
2451 				ret);
2452 			goto out;
2453 		}
2454 	}
2455 
2456 	ret = hclge_common_wl_config(hdev, pkt_buf);
2457 	if (ret)
2458 		dev_err(&hdev->pdev->dev,
2459 			"could not configure common waterline %d\n", ret);
2460 
2461 out:
2462 	kfree(pkt_buf);
2463 	return ret;
2464 }
2465 
2466 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2467 {
2468 	struct hnae3_handle *roce = &vport->roce;
2469 	struct hnae3_handle *nic = &vport->nic;
2470 	struct hclge_dev *hdev = vport->back;
2471 
2472 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2473 
2474 	if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2475 		return -EINVAL;
2476 
2477 	roce->rinfo.base_vector = hdev->roce_base_vector;
2478 
2479 	roce->rinfo.netdev = nic->kinfo.netdev;
2480 	roce->rinfo.roce_io_base = hdev->hw.io_base;
2481 	roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2482 
2483 	roce->pdev = nic->pdev;
2484 	roce->ae_algo = nic->ae_algo;
2485 	roce->numa_node_mask = nic->numa_node_mask;
2486 
2487 	return 0;
2488 }
2489 
2490 static int hclge_init_msi(struct hclge_dev *hdev)
2491 {
2492 	struct pci_dev *pdev = hdev->pdev;
2493 	int vectors;
2494 	int i;
2495 
2496 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2497 					hdev->num_msi,
2498 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2499 	if (vectors < 0) {
2500 		dev_err(&pdev->dev,
2501 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2502 			vectors);
2503 		return vectors;
2504 	}
2505 	if (vectors < hdev->num_msi)
2506 		dev_warn(&hdev->pdev->dev,
2507 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2508 			 hdev->num_msi, vectors);
2509 
2510 	hdev->num_msi = vectors;
2511 	hdev->num_msi_left = vectors;
2512 
2513 	hdev->base_msi_vector = pdev->irq;
2514 	hdev->roce_base_vector = hdev->base_msi_vector +
2515 				hdev->num_nic_msi;
2516 
2517 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2518 					   sizeof(u16), GFP_KERNEL);
2519 	if (!hdev->vector_status) {
2520 		pci_free_irq_vectors(pdev);
2521 		return -ENOMEM;
2522 	}
2523 
2524 	for (i = 0; i < hdev->num_msi; i++)
2525 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2526 
2527 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2528 					sizeof(int), GFP_KERNEL);
2529 	if (!hdev->vector_irq) {
2530 		pci_free_irq_vectors(pdev);
2531 		return -ENOMEM;
2532 	}
2533 
2534 	return 0;
2535 }
2536 
2537 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2538 {
2539 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2540 		duplex = HCLGE_MAC_FULL;
2541 
2542 	return duplex;
2543 }
2544 
2545 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2546 				      u8 duplex)
2547 {
2548 	struct hclge_config_mac_speed_dup_cmd *req;
2549 	struct hclge_desc desc;
2550 	int ret;
2551 
2552 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2553 
2554 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2555 
2556 	if (duplex)
2557 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2558 
2559 	switch (speed) {
2560 	case HCLGE_MAC_SPEED_10M:
2561 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2562 				HCLGE_CFG_SPEED_S, 6);
2563 		break;
2564 	case HCLGE_MAC_SPEED_100M:
2565 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2566 				HCLGE_CFG_SPEED_S, 7);
2567 		break;
2568 	case HCLGE_MAC_SPEED_1G:
2569 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2570 				HCLGE_CFG_SPEED_S, 0);
2571 		break;
2572 	case HCLGE_MAC_SPEED_10G:
2573 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2574 				HCLGE_CFG_SPEED_S, 1);
2575 		break;
2576 	case HCLGE_MAC_SPEED_25G:
2577 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2578 				HCLGE_CFG_SPEED_S, 2);
2579 		break;
2580 	case HCLGE_MAC_SPEED_40G:
2581 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2582 				HCLGE_CFG_SPEED_S, 3);
2583 		break;
2584 	case HCLGE_MAC_SPEED_50G:
2585 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2586 				HCLGE_CFG_SPEED_S, 4);
2587 		break;
2588 	case HCLGE_MAC_SPEED_100G:
2589 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2590 				HCLGE_CFG_SPEED_S, 5);
2591 		break;
2592 	case HCLGE_MAC_SPEED_200G:
2593 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2594 				HCLGE_CFG_SPEED_S, 8);
2595 		break;
2596 	default:
2597 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2598 		return -EINVAL;
2599 	}
2600 
2601 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2602 		      1);
2603 
2604 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2605 	if (ret) {
2606 		dev_err(&hdev->pdev->dev,
2607 			"mac speed/duplex config cmd failed %d.\n", ret);
2608 		return ret;
2609 	}
2610 
2611 	return 0;
2612 }
2613 
2614 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2615 {
2616 	struct hclge_mac *mac = &hdev->hw.mac;
2617 	int ret;
2618 
2619 	duplex = hclge_check_speed_dup(duplex, speed);
2620 	if (!mac->support_autoneg && mac->speed == speed &&
2621 	    mac->duplex == duplex)
2622 		return 0;
2623 
2624 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2625 	if (ret)
2626 		return ret;
2627 
2628 	hdev->hw.mac.speed = speed;
2629 	hdev->hw.mac.duplex = duplex;
2630 
2631 	return 0;
2632 }
2633 
2634 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2635 				     u8 duplex)
2636 {
2637 	struct hclge_vport *vport = hclge_get_vport(handle);
2638 	struct hclge_dev *hdev = vport->back;
2639 
2640 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2641 }
2642 
2643 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2644 {
2645 	struct hclge_config_auto_neg_cmd *req;
2646 	struct hclge_desc desc;
2647 	u32 flag = 0;
2648 	int ret;
2649 
2650 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2651 
2652 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2653 	if (enable)
2654 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2655 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2656 
2657 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2658 	if (ret)
2659 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2660 			ret);
2661 
2662 	return ret;
2663 }
2664 
2665 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2666 {
2667 	struct hclge_vport *vport = hclge_get_vport(handle);
2668 	struct hclge_dev *hdev = vport->back;
2669 
2670 	if (!hdev->hw.mac.support_autoneg) {
2671 		if (enable) {
2672 			dev_err(&hdev->pdev->dev,
2673 				"autoneg is not supported by current port\n");
2674 			return -EOPNOTSUPP;
2675 		} else {
2676 			return 0;
2677 		}
2678 	}
2679 
2680 	return hclge_set_autoneg_en(hdev, enable);
2681 }
2682 
2683 static int hclge_get_autoneg(struct hnae3_handle *handle)
2684 {
2685 	struct hclge_vport *vport = hclge_get_vport(handle);
2686 	struct hclge_dev *hdev = vport->back;
2687 	struct phy_device *phydev = hdev->hw.mac.phydev;
2688 
2689 	if (phydev)
2690 		return phydev->autoneg;
2691 
2692 	return hdev->hw.mac.autoneg;
2693 }
2694 
2695 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2696 {
2697 	struct hclge_vport *vport = hclge_get_vport(handle);
2698 	struct hclge_dev *hdev = vport->back;
2699 	int ret;
2700 
2701 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2702 
2703 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2704 	if (ret)
2705 		return ret;
2706 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2707 }
2708 
2709 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2710 {
2711 	struct hclge_vport *vport = hclge_get_vport(handle);
2712 	struct hclge_dev *hdev = vport->back;
2713 
2714 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2715 		return hclge_set_autoneg_en(hdev, !halt);
2716 
2717 	return 0;
2718 }
2719 
2720 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2721 {
2722 	struct hclge_config_fec_cmd *req;
2723 	struct hclge_desc desc;
2724 	int ret;
2725 
2726 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2727 
2728 	req = (struct hclge_config_fec_cmd *)desc.data;
2729 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2730 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2731 	if (fec_mode & BIT(HNAE3_FEC_RS))
2732 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2733 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2734 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2735 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2736 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2737 
2738 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2739 	if (ret)
2740 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2741 
2742 	return ret;
2743 }
2744 
2745 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2746 {
2747 	struct hclge_vport *vport = hclge_get_vport(handle);
2748 	struct hclge_dev *hdev = vport->back;
2749 	struct hclge_mac *mac = &hdev->hw.mac;
2750 	int ret;
2751 
2752 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2753 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2754 		return -EINVAL;
2755 	}
2756 
2757 	ret = hclge_set_fec_hw(hdev, fec_mode);
2758 	if (ret)
2759 		return ret;
2760 
2761 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2762 	return 0;
2763 }
2764 
2765 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2766 			  u8 *fec_mode)
2767 {
2768 	struct hclge_vport *vport = hclge_get_vport(handle);
2769 	struct hclge_dev *hdev = vport->back;
2770 	struct hclge_mac *mac = &hdev->hw.mac;
2771 
2772 	if (fec_ability)
2773 		*fec_ability = mac->fec_ability;
2774 	if (fec_mode)
2775 		*fec_mode = mac->fec_mode;
2776 }
2777 
2778 static int hclge_mac_init(struct hclge_dev *hdev)
2779 {
2780 	struct hclge_mac *mac = &hdev->hw.mac;
2781 	int ret;
2782 
2783 	hdev->support_sfp_query = true;
2784 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2785 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2786 					 hdev->hw.mac.duplex);
2787 	if (ret)
2788 		return ret;
2789 
2790 	if (hdev->hw.mac.support_autoneg) {
2791 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2792 		if (ret)
2793 			return ret;
2794 	}
2795 
2796 	mac->link = 0;
2797 
2798 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2799 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2800 		if (ret)
2801 			return ret;
2802 	}
2803 
2804 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2805 	if (ret) {
2806 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2807 		return ret;
2808 	}
2809 
2810 	ret = hclge_set_default_loopback(hdev);
2811 	if (ret)
2812 		return ret;
2813 
2814 	ret = hclge_buffer_alloc(hdev);
2815 	if (ret)
2816 		dev_err(&hdev->pdev->dev,
2817 			"allocate buffer fail, ret=%d\n", ret);
2818 
2819 	return ret;
2820 }
2821 
2822 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2823 {
2824 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2825 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2826 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2827 				    hclge_wq, &hdev->service_task, 0);
2828 }
2829 
2830 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2831 {
2832 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2833 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2834 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2835 				    hclge_wq, &hdev->service_task, 0);
2836 }
2837 
2838 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2839 {
2840 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2841 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2842 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2843 				    hclge_wq, &hdev->service_task,
2844 				    delay_time);
2845 }
2846 
2847 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2848 {
2849 	struct hclge_link_status_cmd *req;
2850 	struct hclge_desc desc;
2851 	int ret;
2852 
2853 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2854 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2855 	if (ret) {
2856 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2857 			ret);
2858 		return ret;
2859 	}
2860 
2861 	req = (struct hclge_link_status_cmd *)desc.data;
2862 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2863 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2864 
2865 	return 0;
2866 }
2867 
2868 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2869 {
2870 	struct phy_device *phydev = hdev->hw.mac.phydev;
2871 
2872 	*link_status = HCLGE_LINK_STATUS_DOWN;
2873 
2874 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2875 		return 0;
2876 
2877 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2878 		return 0;
2879 
2880 	return hclge_get_mac_link_status(hdev, link_status);
2881 }
2882 
2883 static void hclge_push_link_status(struct hclge_dev *hdev)
2884 {
2885 	struct hclge_vport *vport;
2886 	int ret;
2887 	u16 i;
2888 
2889 	for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2890 		vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2891 
2892 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2893 		    vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2894 			continue;
2895 
2896 		ret = hclge_push_vf_link_status(vport);
2897 		if (ret) {
2898 			dev_err(&hdev->pdev->dev,
2899 				"failed to push link status to vf%u, ret = %d\n",
2900 				i, ret);
2901 		}
2902 	}
2903 }
2904 
2905 static void hclge_update_link_status(struct hclge_dev *hdev)
2906 {
2907 	struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2908 	struct hnae3_handle *handle = &hdev->vport[0].nic;
2909 	struct hnae3_client *rclient = hdev->roce_client;
2910 	struct hnae3_client *client = hdev->nic_client;
2911 	int state;
2912 	int ret;
2913 
2914 	if (!client)
2915 		return;
2916 
2917 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2918 		return;
2919 
2920 	ret = hclge_get_mac_phy_link(hdev, &state);
2921 	if (ret) {
2922 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2923 		return;
2924 	}
2925 
2926 	if (state != hdev->hw.mac.link) {
2927 		client->ops->link_status_change(handle, state);
2928 		hclge_config_mac_tnl_int(hdev, state);
2929 		if (rclient && rclient->ops->link_status_change)
2930 			rclient->ops->link_status_change(rhandle, state);
2931 
2932 		hdev->hw.mac.link = state;
2933 		hclge_push_link_status(hdev);
2934 	}
2935 
2936 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2937 }
2938 
2939 static void hclge_update_port_capability(struct hclge_dev *hdev,
2940 					 struct hclge_mac *mac)
2941 {
2942 	if (hnae3_dev_fec_supported(hdev))
2943 		/* update fec ability by speed */
2944 		hclge_convert_setting_fec(mac);
2945 
2946 	/* firmware can not identify back plane type, the media type
2947 	 * read from configuration can help deal it
2948 	 */
2949 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2950 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2951 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2952 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2953 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2954 
2955 	if (mac->support_autoneg) {
2956 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2957 		linkmode_copy(mac->advertising, mac->supported);
2958 	} else {
2959 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2960 				   mac->supported);
2961 		linkmode_zero(mac->advertising);
2962 	}
2963 }
2964 
2965 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2966 {
2967 	struct hclge_sfp_info_cmd *resp;
2968 	struct hclge_desc desc;
2969 	int ret;
2970 
2971 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2972 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2973 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2974 	if (ret == -EOPNOTSUPP) {
2975 		dev_warn(&hdev->pdev->dev,
2976 			 "IMP do not support get SFP speed %d\n", ret);
2977 		return ret;
2978 	} else if (ret) {
2979 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2980 		return ret;
2981 	}
2982 
2983 	*speed = le32_to_cpu(resp->speed);
2984 
2985 	return 0;
2986 }
2987 
2988 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2989 {
2990 	struct hclge_sfp_info_cmd *resp;
2991 	struct hclge_desc desc;
2992 	int ret;
2993 
2994 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2995 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2996 
2997 	resp->query_type = QUERY_ACTIVE_SPEED;
2998 
2999 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3000 	if (ret == -EOPNOTSUPP) {
3001 		dev_warn(&hdev->pdev->dev,
3002 			 "IMP does not support get SFP info %d\n", ret);
3003 		return ret;
3004 	} else if (ret) {
3005 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3006 		return ret;
3007 	}
3008 
3009 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
3010 	 * set to mac->speed.
3011 	 */
3012 	if (!le32_to_cpu(resp->speed))
3013 		return 0;
3014 
3015 	mac->speed = le32_to_cpu(resp->speed);
3016 	/* if resp->speed_ability is 0, it means it's an old version
3017 	 * firmware, do not update these params
3018 	 */
3019 	if (resp->speed_ability) {
3020 		mac->module_type = le32_to_cpu(resp->module_type);
3021 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
3022 		mac->autoneg = resp->autoneg;
3023 		mac->support_autoneg = resp->autoneg_ability;
3024 		mac->speed_type = QUERY_ACTIVE_SPEED;
3025 		if (!resp->active_fec)
3026 			mac->fec_mode = 0;
3027 		else
3028 			mac->fec_mode = BIT(resp->active_fec);
3029 	} else {
3030 		mac->speed_type = QUERY_SFP_SPEED;
3031 	}
3032 
3033 	return 0;
3034 }
3035 
3036 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3037 					struct ethtool_link_ksettings *cmd)
3038 {
3039 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3040 	struct hclge_vport *vport = hclge_get_vport(handle);
3041 	struct hclge_phy_link_ksetting_0_cmd *req0;
3042 	struct hclge_phy_link_ksetting_1_cmd *req1;
3043 	u32 supported, advertising, lp_advertising;
3044 	struct hclge_dev *hdev = vport->back;
3045 	int ret;
3046 
3047 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3048 				   true);
3049 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3050 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3051 				   true);
3052 
3053 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3054 	if (ret) {
3055 		dev_err(&hdev->pdev->dev,
3056 			"failed to get phy link ksetting, ret = %d.\n", ret);
3057 		return ret;
3058 	}
3059 
3060 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3061 	cmd->base.autoneg = req0->autoneg;
3062 	cmd->base.speed = le32_to_cpu(req0->speed);
3063 	cmd->base.duplex = req0->duplex;
3064 	cmd->base.port = req0->port;
3065 	cmd->base.transceiver = req0->transceiver;
3066 	cmd->base.phy_address = req0->phy_address;
3067 	cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3068 	cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3069 	supported = le32_to_cpu(req0->supported);
3070 	advertising = le32_to_cpu(req0->advertising);
3071 	lp_advertising = le32_to_cpu(req0->lp_advertising);
3072 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3073 						supported);
3074 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3075 						advertising);
3076 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3077 						lp_advertising);
3078 
3079 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3080 	cmd->base.master_slave_cfg = req1->master_slave_cfg;
3081 	cmd->base.master_slave_state = req1->master_slave_state;
3082 
3083 	return 0;
3084 }
3085 
3086 static int
3087 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3088 			     const struct ethtool_link_ksettings *cmd)
3089 {
3090 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3091 	struct hclge_vport *vport = hclge_get_vport(handle);
3092 	struct hclge_phy_link_ksetting_0_cmd *req0;
3093 	struct hclge_phy_link_ksetting_1_cmd *req1;
3094 	struct hclge_dev *hdev = vport->back;
3095 	u32 advertising;
3096 	int ret;
3097 
3098 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
3099 	    ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3100 	     (cmd->base.duplex != DUPLEX_HALF &&
3101 	      cmd->base.duplex != DUPLEX_FULL)))
3102 		return -EINVAL;
3103 
3104 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3105 				   false);
3106 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3107 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3108 				   false);
3109 
3110 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3111 	req0->autoneg = cmd->base.autoneg;
3112 	req0->speed = cpu_to_le32(cmd->base.speed);
3113 	req0->duplex = cmd->base.duplex;
3114 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
3115 						cmd->link_modes.advertising);
3116 	req0->advertising = cpu_to_le32(advertising);
3117 	req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3118 
3119 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3120 	req1->master_slave_cfg = cmd->base.master_slave_cfg;
3121 
3122 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3123 	if (ret) {
3124 		dev_err(&hdev->pdev->dev,
3125 			"failed to set phy link ksettings, ret = %d.\n", ret);
3126 		return ret;
3127 	}
3128 
3129 	hdev->hw.mac.autoneg = cmd->base.autoneg;
3130 	hdev->hw.mac.speed = cmd->base.speed;
3131 	hdev->hw.mac.duplex = cmd->base.duplex;
3132 	linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3133 
3134 	return 0;
3135 }
3136 
3137 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3138 {
3139 	struct ethtool_link_ksettings cmd;
3140 	int ret;
3141 
3142 	if (!hnae3_dev_phy_imp_supported(hdev))
3143 		return 0;
3144 
3145 	ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3146 	if (ret)
3147 		return ret;
3148 
3149 	hdev->hw.mac.autoneg = cmd.base.autoneg;
3150 	hdev->hw.mac.speed = cmd.base.speed;
3151 	hdev->hw.mac.duplex = cmd.base.duplex;
3152 
3153 	return 0;
3154 }
3155 
3156 static int hclge_tp_port_init(struct hclge_dev *hdev)
3157 {
3158 	struct ethtool_link_ksettings cmd;
3159 
3160 	if (!hnae3_dev_phy_imp_supported(hdev))
3161 		return 0;
3162 
3163 	cmd.base.autoneg = hdev->hw.mac.autoneg;
3164 	cmd.base.speed = hdev->hw.mac.speed;
3165 	cmd.base.duplex = hdev->hw.mac.duplex;
3166 	linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3167 
3168 	return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3169 }
3170 
3171 static int hclge_update_port_info(struct hclge_dev *hdev)
3172 {
3173 	struct hclge_mac *mac = &hdev->hw.mac;
3174 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
3175 	int ret;
3176 
3177 	/* get the port info from SFP cmd if not copper port */
3178 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3179 		return hclge_update_tp_port_info(hdev);
3180 
3181 	/* if IMP does not support get SFP/qSFP info, return directly */
3182 	if (!hdev->support_sfp_query)
3183 		return 0;
3184 
3185 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3186 		ret = hclge_get_sfp_info(hdev, mac);
3187 	else
3188 		ret = hclge_get_sfp_speed(hdev, &speed);
3189 
3190 	if (ret == -EOPNOTSUPP) {
3191 		hdev->support_sfp_query = false;
3192 		return ret;
3193 	} else if (ret) {
3194 		return ret;
3195 	}
3196 
3197 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3198 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3199 			hclge_update_port_capability(hdev, mac);
3200 			return 0;
3201 		}
3202 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3203 					       HCLGE_MAC_FULL);
3204 	} else {
3205 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3206 			return 0; /* do nothing if no SFP */
3207 
3208 		/* must config full duplex for SFP */
3209 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3210 	}
3211 }
3212 
3213 static int hclge_get_status(struct hnae3_handle *handle)
3214 {
3215 	struct hclge_vport *vport = hclge_get_vport(handle);
3216 	struct hclge_dev *hdev = vport->back;
3217 
3218 	hclge_update_link_status(hdev);
3219 
3220 	return hdev->hw.mac.link;
3221 }
3222 
3223 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3224 {
3225 	if (!pci_num_vf(hdev->pdev)) {
3226 		dev_err(&hdev->pdev->dev,
3227 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
3228 		return NULL;
3229 	}
3230 
3231 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3232 		dev_err(&hdev->pdev->dev,
3233 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
3234 			vf, pci_num_vf(hdev->pdev));
3235 		return NULL;
3236 	}
3237 
3238 	/* VF start from 1 in vport */
3239 	vf += HCLGE_VF_VPORT_START_NUM;
3240 	return &hdev->vport[vf];
3241 }
3242 
3243 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3244 			       struct ifla_vf_info *ivf)
3245 {
3246 	struct hclge_vport *vport = hclge_get_vport(handle);
3247 	struct hclge_dev *hdev = vport->back;
3248 
3249 	vport = hclge_get_vf_vport(hdev, vf);
3250 	if (!vport)
3251 		return -EINVAL;
3252 
3253 	ivf->vf = vf;
3254 	ivf->linkstate = vport->vf_info.link_state;
3255 	ivf->spoofchk = vport->vf_info.spoofchk;
3256 	ivf->trusted = vport->vf_info.trusted;
3257 	ivf->min_tx_rate = 0;
3258 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3259 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3260 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3261 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3262 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
3263 
3264 	return 0;
3265 }
3266 
3267 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3268 				   int link_state)
3269 {
3270 	struct hclge_vport *vport = hclge_get_vport(handle);
3271 	struct hclge_dev *hdev = vport->back;
3272 	int link_state_old;
3273 	int ret;
3274 
3275 	vport = hclge_get_vf_vport(hdev, vf);
3276 	if (!vport)
3277 		return -EINVAL;
3278 
3279 	link_state_old = vport->vf_info.link_state;
3280 	vport->vf_info.link_state = link_state;
3281 
3282 	ret = hclge_push_vf_link_status(vport);
3283 	if (ret) {
3284 		vport->vf_info.link_state = link_state_old;
3285 		dev_err(&hdev->pdev->dev,
3286 			"failed to push vf%d link status, ret = %d\n", vf, ret);
3287 	}
3288 
3289 	return ret;
3290 }
3291 
3292 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3293 {
3294 	u32 cmdq_src_reg, msix_src_reg;
3295 
3296 	/* fetch the events from their corresponding regs */
3297 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3298 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3299 
3300 	/* Assumption: If by any chance reset and mailbox events are reported
3301 	 * together then we will only process reset event in this go and will
3302 	 * defer the processing of the mailbox events. Since, we would have not
3303 	 * cleared RX CMDQ event this time we would receive again another
3304 	 * interrupt from H/W just for the mailbox.
3305 	 *
3306 	 * check for vector0 reset event sources
3307 	 */
3308 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3309 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3310 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3311 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3312 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3313 		hdev->rst_stats.imp_rst_cnt++;
3314 		return HCLGE_VECTOR0_EVENT_RST;
3315 	}
3316 
3317 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3318 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3319 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3320 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3321 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3322 		hdev->rst_stats.global_rst_cnt++;
3323 		return HCLGE_VECTOR0_EVENT_RST;
3324 	}
3325 
3326 	/* check for vector0 msix event source */
3327 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3328 		*clearval = msix_src_reg;
3329 		return HCLGE_VECTOR0_EVENT_ERR;
3330 	}
3331 
3332 	/* check for vector0 mailbox(=CMDQ RX) event source */
3333 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3334 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3335 		*clearval = cmdq_src_reg;
3336 		return HCLGE_VECTOR0_EVENT_MBX;
3337 	}
3338 
3339 	/* print other vector0 event source */
3340 	dev_info(&hdev->pdev->dev,
3341 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3342 		 cmdq_src_reg, msix_src_reg);
3343 	*clearval = msix_src_reg;
3344 
3345 	return HCLGE_VECTOR0_EVENT_OTHER;
3346 }
3347 
3348 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3349 				    u32 regclr)
3350 {
3351 	switch (event_type) {
3352 	case HCLGE_VECTOR0_EVENT_RST:
3353 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3354 		break;
3355 	case HCLGE_VECTOR0_EVENT_MBX:
3356 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3357 		break;
3358 	default:
3359 		break;
3360 	}
3361 }
3362 
3363 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3364 {
3365 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3366 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3367 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3368 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3369 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3370 }
3371 
3372 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3373 {
3374 	writel(enable ? 1 : 0, vector->addr);
3375 }
3376 
3377 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3378 {
3379 	struct hclge_dev *hdev = data;
3380 	u32 clearval = 0;
3381 	u32 event_cause;
3382 
3383 	hclge_enable_vector(&hdev->misc_vector, false);
3384 	event_cause = hclge_check_event_cause(hdev, &clearval);
3385 
3386 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
3387 	switch (event_cause) {
3388 	case HCLGE_VECTOR0_EVENT_ERR:
3389 		/* we do not know what type of reset is required now. This could
3390 		 * only be decided after we fetch the type of errors which
3391 		 * caused this event. Therefore, we will do below for now:
3392 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3393 		 *    have defered type of reset to be used.
3394 		 * 2. Schedule the reset service task.
3395 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3396 		 *    will fetch the correct type of reset.  This would be done
3397 		 *    by first decoding the types of errors.
3398 		 */
3399 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3400 		fallthrough;
3401 	case HCLGE_VECTOR0_EVENT_RST:
3402 		hclge_reset_task_schedule(hdev);
3403 		break;
3404 	case HCLGE_VECTOR0_EVENT_MBX:
3405 		/* If we are here then,
3406 		 * 1. Either we are not handling any mbx task and we are not
3407 		 *    scheduled as well
3408 		 *                        OR
3409 		 * 2. We could be handling a mbx task but nothing more is
3410 		 *    scheduled.
3411 		 * In both cases, we should schedule mbx task as there are more
3412 		 * mbx messages reported by this interrupt.
3413 		 */
3414 		hclge_mbx_task_schedule(hdev);
3415 		break;
3416 	default:
3417 		dev_warn(&hdev->pdev->dev,
3418 			 "received unknown or unhandled event of vector0\n");
3419 		break;
3420 	}
3421 
3422 	hclge_clear_event_cause(hdev, event_cause, clearval);
3423 
3424 	/* Enable interrupt if it is not cause by reset. And when
3425 	 * clearval equal to 0, it means interrupt status may be
3426 	 * cleared by hardware before driver reads status register.
3427 	 * For this case, vector0 interrupt also should be enabled.
3428 	 */
3429 	if (!clearval ||
3430 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3431 		hclge_enable_vector(&hdev->misc_vector, true);
3432 	}
3433 
3434 	return IRQ_HANDLED;
3435 }
3436 
3437 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3438 {
3439 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3440 		dev_warn(&hdev->pdev->dev,
3441 			 "vector(vector_id %d) has been freed.\n", vector_id);
3442 		return;
3443 	}
3444 
3445 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3446 	hdev->num_msi_left += 1;
3447 	hdev->num_msi_used -= 1;
3448 }
3449 
3450 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3451 {
3452 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3453 
3454 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3455 
3456 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3457 	hdev->vector_status[0] = 0;
3458 
3459 	hdev->num_msi_left -= 1;
3460 	hdev->num_msi_used += 1;
3461 }
3462 
3463 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3464 				      const cpumask_t *mask)
3465 {
3466 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3467 					      affinity_notify);
3468 
3469 	cpumask_copy(&hdev->affinity_mask, mask);
3470 }
3471 
3472 static void hclge_irq_affinity_release(struct kref *ref)
3473 {
3474 }
3475 
3476 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3477 {
3478 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3479 			      &hdev->affinity_mask);
3480 
3481 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3482 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3483 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3484 				  &hdev->affinity_notify);
3485 }
3486 
3487 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3488 {
3489 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3490 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3491 }
3492 
3493 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3494 {
3495 	int ret;
3496 
3497 	hclge_get_misc_vector(hdev);
3498 
3499 	/* this would be explicitly freed in the end */
3500 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3501 		 HCLGE_NAME, pci_name(hdev->pdev));
3502 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3503 			  0, hdev->misc_vector.name, hdev);
3504 	if (ret) {
3505 		hclge_free_vector(hdev, 0);
3506 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3507 			hdev->misc_vector.vector_irq);
3508 	}
3509 
3510 	return ret;
3511 }
3512 
3513 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3514 {
3515 	free_irq(hdev->misc_vector.vector_irq, hdev);
3516 	hclge_free_vector(hdev, 0);
3517 }
3518 
3519 int hclge_notify_client(struct hclge_dev *hdev,
3520 			enum hnae3_reset_notify_type type)
3521 {
3522 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3523 	struct hnae3_client *client = hdev->nic_client;
3524 	int ret;
3525 
3526 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3527 		return 0;
3528 
3529 	if (!client->ops->reset_notify)
3530 		return -EOPNOTSUPP;
3531 
3532 	ret = client->ops->reset_notify(handle, type);
3533 	if (ret)
3534 		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3535 			type, ret);
3536 
3537 	return ret;
3538 }
3539 
3540 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3541 				    enum hnae3_reset_notify_type type)
3542 {
3543 	struct hnae3_handle *handle = &hdev->vport[0].roce;
3544 	struct hnae3_client *client = hdev->roce_client;
3545 	int ret;
3546 
3547 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3548 		return 0;
3549 
3550 	if (!client->ops->reset_notify)
3551 		return -EOPNOTSUPP;
3552 
3553 	ret = client->ops->reset_notify(handle, type);
3554 	if (ret)
3555 		dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3556 			type, ret);
3557 
3558 	return ret;
3559 }
3560 
3561 static int hclge_reset_wait(struct hclge_dev *hdev)
3562 {
3563 #define HCLGE_RESET_WATI_MS	100
3564 #define HCLGE_RESET_WAIT_CNT	350
3565 
3566 	u32 val, reg, reg_bit;
3567 	u32 cnt = 0;
3568 
3569 	switch (hdev->reset_type) {
3570 	case HNAE3_IMP_RESET:
3571 		reg = HCLGE_GLOBAL_RESET_REG;
3572 		reg_bit = HCLGE_IMP_RESET_BIT;
3573 		break;
3574 	case HNAE3_GLOBAL_RESET:
3575 		reg = HCLGE_GLOBAL_RESET_REG;
3576 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3577 		break;
3578 	case HNAE3_FUNC_RESET:
3579 		reg = HCLGE_FUN_RST_ING;
3580 		reg_bit = HCLGE_FUN_RST_ING_B;
3581 		break;
3582 	default:
3583 		dev_err(&hdev->pdev->dev,
3584 			"Wait for unsupported reset type: %d\n",
3585 			hdev->reset_type);
3586 		return -EINVAL;
3587 	}
3588 
3589 	val = hclge_read_dev(&hdev->hw, reg);
3590 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3591 		msleep(HCLGE_RESET_WATI_MS);
3592 		val = hclge_read_dev(&hdev->hw, reg);
3593 		cnt++;
3594 	}
3595 
3596 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3597 		dev_warn(&hdev->pdev->dev,
3598 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3599 		return -EBUSY;
3600 	}
3601 
3602 	return 0;
3603 }
3604 
3605 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3606 {
3607 	struct hclge_vf_rst_cmd *req;
3608 	struct hclge_desc desc;
3609 
3610 	req = (struct hclge_vf_rst_cmd *)desc.data;
3611 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3612 	req->dest_vfid = func_id;
3613 
3614 	if (reset)
3615 		req->vf_rst = 0x1;
3616 
3617 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3618 }
3619 
3620 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3621 {
3622 	int i;
3623 
3624 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3625 		struct hclge_vport *vport = &hdev->vport[i];
3626 		int ret;
3627 
3628 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3629 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3630 		if (ret) {
3631 			dev_err(&hdev->pdev->dev,
3632 				"set vf(%u) rst failed %d!\n",
3633 				vport->vport_id, ret);
3634 			return ret;
3635 		}
3636 
3637 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3638 			continue;
3639 
3640 		/* Inform VF to process the reset.
3641 		 * hclge_inform_reset_assert_to_vf may fail if VF
3642 		 * driver is not loaded.
3643 		 */
3644 		ret = hclge_inform_reset_assert_to_vf(vport);
3645 		if (ret)
3646 			dev_warn(&hdev->pdev->dev,
3647 				 "inform reset to vf(%u) failed %d!\n",
3648 				 vport->vport_id, ret);
3649 	}
3650 
3651 	return 0;
3652 }
3653 
3654 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3655 {
3656 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3657 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3658 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3659 		return;
3660 
3661 	hclge_mbx_handler(hdev);
3662 
3663 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3664 }
3665 
3666 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3667 {
3668 	struct hclge_pf_rst_sync_cmd *req;
3669 	struct hclge_desc desc;
3670 	int cnt = 0;
3671 	int ret;
3672 
3673 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3674 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3675 
3676 	do {
3677 		/* vf need to down netdev by mbx during PF or FLR reset */
3678 		hclge_mailbox_service_task(hdev);
3679 
3680 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3681 		/* for compatible with old firmware, wait
3682 		 * 100 ms for VF to stop IO
3683 		 */
3684 		if (ret == -EOPNOTSUPP) {
3685 			msleep(HCLGE_RESET_SYNC_TIME);
3686 			return;
3687 		} else if (ret) {
3688 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3689 				 ret);
3690 			return;
3691 		} else if (req->all_vf_ready) {
3692 			return;
3693 		}
3694 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3695 		hclge_cmd_reuse_desc(&desc, true);
3696 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3697 
3698 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3699 }
3700 
3701 void hclge_report_hw_error(struct hclge_dev *hdev,
3702 			   enum hnae3_hw_error_type type)
3703 {
3704 	struct hnae3_client *client = hdev->nic_client;
3705 
3706 	if (!client || !client->ops->process_hw_error ||
3707 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3708 		return;
3709 
3710 	client->ops->process_hw_error(&hdev->vport[0].nic, type);
3711 }
3712 
3713 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3714 {
3715 	u32 reg_val;
3716 
3717 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3718 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3719 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3720 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3721 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3722 	}
3723 
3724 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3725 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3726 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3727 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3728 	}
3729 }
3730 
3731 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3732 {
3733 	struct hclge_desc desc;
3734 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3735 	int ret;
3736 
3737 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3738 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3739 	req->fun_reset_vfid = func_id;
3740 
3741 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3742 	if (ret)
3743 		dev_err(&hdev->pdev->dev,
3744 			"send function reset cmd fail, status =%d\n", ret);
3745 
3746 	return ret;
3747 }
3748 
3749 static void hclge_do_reset(struct hclge_dev *hdev)
3750 {
3751 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3752 	struct pci_dev *pdev = hdev->pdev;
3753 	u32 val;
3754 
3755 	if (hclge_get_hw_reset_stat(handle)) {
3756 		dev_info(&pdev->dev, "hardware reset not finish\n");
3757 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3758 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3759 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3760 		return;
3761 	}
3762 
3763 	switch (hdev->reset_type) {
3764 	case HNAE3_GLOBAL_RESET:
3765 		dev_info(&pdev->dev, "global reset requested\n");
3766 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3767 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3768 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3769 		break;
3770 	case HNAE3_FUNC_RESET:
3771 		dev_info(&pdev->dev, "PF reset requested\n");
3772 		/* schedule again to check later */
3773 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3774 		hclge_reset_task_schedule(hdev);
3775 		break;
3776 	default:
3777 		dev_warn(&pdev->dev,
3778 			 "unsupported reset type: %d\n", hdev->reset_type);
3779 		break;
3780 	}
3781 }
3782 
3783 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3784 						   unsigned long *addr)
3785 {
3786 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3787 	struct hclge_dev *hdev = ae_dev->priv;
3788 
3789 	/* first, resolve any unknown reset type to the known type(s) */
3790 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3791 		u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3792 					HCLGE_MISC_VECTOR_INT_STS);
3793 		/* we will intentionally ignore any errors from this function
3794 		 *  as we will end up in *some* reset request in any case
3795 		 */
3796 		if (hclge_handle_hw_msix_error(hdev, addr))
3797 			dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3798 				 msix_sts_reg);
3799 
3800 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3801 		/* We defered the clearing of the error event which caused
3802 		 * interrupt since it was not posssible to do that in
3803 		 * interrupt context (and this is the reason we introduced
3804 		 * new UNKNOWN reset type). Now, the errors have been
3805 		 * handled and cleared in hardware we can safely enable
3806 		 * interrupts. This is an exception to the norm.
3807 		 */
3808 		hclge_enable_vector(&hdev->misc_vector, true);
3809 	}
3810 
3811 	/* return the highest priority reset level amongst all */
3812 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3813 		rst_level = HNAE3_IMP_RESET;
3814 		clear_bit(HNAE3_IMP_RESET, addr);
3815 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3816 		clear_bit(HNAE3_FUNC_RESET, addr);
3817 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3818 		rst_level = HNAE3_GLOBAL_RESET;
3819 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3820 		clear_bit(HNAE3_FUNC_RESET, addr);
3821 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3822 		rst_level = HNAE3_FUNC_RESET;
3823 		clear_bit(HNAE3_FUNC_RESET, addr);
3824 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3825 		rst_level = HNAE3_FLR_RESET;
3826 		clear_bit(HNAE3_FLR_RESET, addr);
3827 	}
3828 
3829 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3830 	    rst_level < hdev->reset_type)
3831 		return HNAE3_NONE_RESET;
3832 
3833 	return rst_level;
3834 }
3835 
3836 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3837 {
3838 	u32 clearval = 0;
3839 
3840 	switch (hdev->reset_type) {
3841 	case HNAE3_IMP_RESET:
3842 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3843 		break;
3844 	case HNAE3_GLOBAL_RESET:
3845 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3846 		break;
3847 	default:
3848 		break;
3849 	}
3850 
3851 	if (!clearval)
3852 		return;
3853 
3854 	/* For revision 0x20, the reset interrupt source
3855 	 * can only be cleared after hardware reset done
3856 	 */
3857 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3858 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3859 				clearval);
3860 
3861 	hclge_enable_vector(&hdev->misc_vector, true);
3862 }
3863 
3864 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3865 {
3866 	u32 reg_val;
3867 
3868 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3869 	if (enable)
3870 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3871 	else
3872 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3873 
3874 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3875 }
3876 
3877 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3878 {
3879 	int ret;
3880 
3881 	ret = hclge_set_all_vf_rst(hdev, true);
3882 	if (ret)
3883 		return ret;
3884 
3885 	hclge_func_reset_sync_vf(hdev);
3886 
3887 	return 0;
3888 }
3889 
3890 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3891 {
3892 	u32 reg_val;
3893 	int ret = 0;
3894 
3895 	switch (hdev->reset_type) {
3896 	case HNAE3_FUNC_RESET:
3897 		ret = hclge_func_reset_notify_vf(hdev);
3898 		if (ret)
3899 			return ret;
3900 
3901 		ret = hclge_func_reset_cmd(hdev, 0);
3902 		if (ret) {
3903 			dev_err(&hdev->pdev->dev,
3904 				"asserting function reset fail %d!\n", ret);
3905 			return ret;
3906 		}
3907 
3908 		/* After performaning pf reset, it is not necessary to do the
3909 		 * mailbox handling or send any command to firmware, because
3910 		 * any mailbox handling or command to firmware is only valid
3911 		 * after hclge_cmd_init is called.
3912 		 */
3913 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3914 		hdev->rst_stats.pf_rst_cnt++;
3915 		break;
3916 	case HNAE3_FLR_RESET:
3917 		ret = hclge_func_reset_notify_vf(hdev);
3918 		if (ret)
3919 			return ret;
3920 		break;
3921 	case HNAE3_IMP_RESET:
3922 		hclge_handle_imp_error(hdev);
3923 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3924 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3925 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3926 		break;
3927 	default:
3928 		break;
3929 	}
3930 
3931 	/* inform hardware that preparatory work is done */
3932 	msleep(HCLGE_RESET_SYNC_TIME);
3933 	hclge_reset_handshake(hdev, true);
3934 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3935 
3936 	return ret;
3937 }
3938 
3939 static void hclge_show_rst_info(struct hclge_dev *hdev)
3940 {
3941 	char *buf;
3942 
3943 	buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
3944 	if (!buf)
3945 		return;
3946 
3947 	hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
3948 
3949 	dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
3950 
3951 	kfree(buf);
3952 }
3953 
3954 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3955 {
3956 #define MAX_RESET_FAIL_CNT 5
3957 
3958 	if (hdev->reset_pending) {
3959 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3960 			 hdev->reset_pending);
3961 		return true;
3962 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3963 		   HCLGE_RESET_INT_M) {
3964 		dev_info(&hdev->pdev->dev,
3965 			 "reset failed because new reset interrupt\n");
3966 		hclge_clear_reset_cause(hdev);
3967 		return false;
3968 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3969 		hdev->rst_stats.reset_fail_cnt++;
3970 		set_bit(hdev->reset_type, &hdev->reset_pending);
3971 		dev_info(&hdev->pdev->dev,
3972 			 "re-schedule reset task(%u)\n",
3973 			 hdev->rst_stats.reset_fail_cnt);
3974 		return true;
3975 	}
3976 
3977 	hclge_clear_reset_cause(hdev);
3978 
3979 	/* recover the handshake status when reset fail */
3980 	hclge_reset_handshake(hdev, true);
3981 
3982 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3983 
3984 	hclge_show_rst_info(hdev);
3985 
3986 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3987 
3988 	return false;
3989 }
3990 
3991 static void hclge_update_reset_level(struct hclge_dev *hdev)
3992 {
3993 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3994 	enum hnae3_reset_type reset_level;
3995 
3996 	/* reset request will not be set during reset, so clear
3997 	 * pending reset request to avoid unnecessary reset
3998 	 * caused by the same reason.
3999 	 */
4000 	hclge_get_reset_level(ae_dev, &hdev->reset_request);
4001 
4002 	/* if default_reset_request has a higher level reset request,
4003 	 * it should be handled as soon as possible. since some errors
4004 	 * need this kind of reset to fix.
4005 	 */
4006 	reset_level = hclge_get_reset_level(ae_dev,
4007 					    &hdev->default_reset_request);
4008 	if (reset_level != HNAE3_NONE_RESET)
4009 		set_bit(reset_level, &hdev->reset_request);
4010 }
4011 
4012 static int hclge_set_rst_done(struct hclge_dev *hdev)
4013 {
4014 	struct hclge_pf_rst_done_cmd *req;
4015 	struct hclge_desc desc;
4016 	int ret;
4017 
4018 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
4019 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4020 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4021 
4022 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4023 	/* To be compatible with the old firmware, which does not support
4024 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4025 	 * return success
4026 	 */
4027 	if (ret == -EOPNOTSUPP) {
4028 		dev_warn(&hdev->pdev->dev,
4029 			 "current firmware does not support command(0x%x)!\n",
4030 			 HCLGE_OPC_PF_RST_DONE);
4031 		return 0;
4032 	} else if (ret) {
4033 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4034 			ret);
4035 	}
4036 
4037 	return ret;
4038 }
4039 
4040 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4041 {
4042 	int ret = 0;
4043 
4044 	switch (hdev->reset_type) {
4045 	case HNAE3_FUNC_RESET:
4046 	case HNAE3_FLR_RESET:
4047 		ret = hclge_set_all_vf_rst(hdev, false);
4048 		break;
4049 	case HNAE3_GLOBAL_RESET:
4050 	case HNAE3_IMP_RESET:
4051 		ret = hclge_set_rst_done(hdev);
4052 		break;
4053 	default:
4054 		break;
4055 	}
4056 
4057 	/* clear up the handshake status after re-initialize done */
4058 	hclge_reset_handshake(hdev, false);
4059 
4060 	return ret;
4061 }
4062 
4063 static int hclge_reset_stack(struct hclge_dev *hdev)
4064 {
4065 	int ret;
4066 
4067 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4068 	if (ret)
4069 		return ret;
4070 
4071 	ret = hclge_reset_ae_dev(hdev->ae_dev);
4072 	if (ret)
4073 		return ret;
4074 
4075 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4076 }
4077 
4078 static int hclge_reset_prepare(struct hclge_dev *hdev)
4079 {
4080 	int ret;
4081 
4082 	hdev->rst_stats.reset_cnt++;
4083 	/* perform reset of the stack & ae device for a client */
4084 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4085 	if (ret)
4086 		return ret;
4087 
4088 	rtnl_lock();
4089 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4090 	rtnl_unlock();
4091 	if (ret)
4092 		return ret;
4093 
4094 	return hclge_reset_prepare_wait(hdev);
4095 }
4096 
4097 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4098 {
4099 	int ret;
4100 
4101 	hdev->rst_stats.hw_reset_done_cnt++;
4102 
4103 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4104 	if (ret)
4105 		return ret;
4106 
4107 	rtnl_lock();
4108 	ret = hclge_reset_stack(hdev);
4109 	rtnl_unlock();
4110 	if (ret)
4111 		return ret;
4112 
4113 	hclge_clear_reset_cause(hdev);
4114 
4115 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4116 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4117 	 * times
4118 	 */
4119 	if (ret &&
4120 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4121 		return ret;
4122 
4123 	ret = hclge_reset_prepare_up(hdev);
4124 	if (ret)
4125 		return ret;
4126 
4127 	rtnl_lock();
4128 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4129 	rtnl_unlock();
4130 	if (ret)
4131 		return ret;
4132 
4133 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4134 	if (ret)
4135 		return ret;
4136 
4137 	hdev->last_reset_time = jiffies;
4138 	hdev->rst_stats.reset_fail_cnt = 0;
4139 	hdev->rst_stats.reset_done_cnt++;
4140 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4141 
4142 	hclge_update_reset_level(hdev);
4143 
4144 	return 0;
4145 }
4146 
4147 static void hclge_reset(struct hclge_dev *hdev)
4148 {
4149 	if (hclge_reset_prepare(hdev))
4150 		goto err_reset;
4151 
4152 	if (hclge_reset_wait(hdev))
4153 		goto err_reset;
4154 
4155 	if (hclge_reset_rebuild(hdev))
4156 		goto err_reset;
4157 
4158 	return;
4159 
4160 err_reset:
4161 	if (hclge_reset_err_handle(hdev))
4162 		hclge_reset_task_schedule(hdev);
4163 }
4164 
4165 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4166 {
4167 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4168 	struct hclge_dev *hdev = ae_dev->priv;
4169 
4170 	/* We might end up getting called broadly because of 2 below cases:
4171 	 * 1. Recoverable error was conveyed through APEI and only way to bring
4172 	 *    normalcy is to reset.
4173 	 * 2. A new reset request from the stack due to timeout
4174 	 *
4175 	 * check if this is a new reset request and we are not here just because
4176 	 * last reset attempt did not succeed and watchdog hit us again. We will
4177 	 * know this if last reset request did not occur very recently (watchdog
4178 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4179 	 * In case of new request we reset the "reset level" to PF reset.
4180 	 * And if it is a repeat reset request of the most recent one then we
4181 	 * want to make sure we throttle the reset request. Therefore, we will
4182 	 * not allow it again before 3*HZ times.
4183 	 */
4184 
4185 	if (time_before(jiffies, (hdev->last_reset_time +
4186 				  HCLGE_RESET_INTERVAL))) {
4187 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4188 		return;
4189 	}
4190 
4191 	if (hdev->default_reset_request) {
4192 		hdev->reset_level =
4193 			hclge_get_reset_level(ae_dev,
4194 					      &hdev->default_reset_request);
4195 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4196 		hdev->reset_level = HNAE3_FUNC_RESET;
4197 	}
4198 
4199 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4200 		 hdev->reset_level);
4201 
4202 	/* request reset & schedule reset task */
4203 	set_bit(hdev->reset_level, &hdev->reset_request);
4204 	hclge_reset_task_schedule(hdev);
4205 
4206 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4207 		hdev->reset_level++;
4208 }
4209 
4210 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4211 					enum hnae3_reset_type rst_type)
4212 {
4213 	struct hclge_dev *hdev = ae_dev->priv;
4214 
4215 	set_bit(rst_type, &hdev->default_reset_request);
4216 }
4217 
4218 static void hclge_reset_timer(struct timer_list *t)
4219 {
4220 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4221 
4222 	/* if default_reset_request has no value, it means that this reset
4223 	 * request has already be handled, so just return here
4224 	 */
4225 	if (!hdev->default_reset_request)
4226 		return;
4227 
4228 	dev_info(&hdev->pdev->dev,
4229 		 "triggering reset in reset timer\n");
4230 	hclge_reset_event(hdev->pdev, NULL);
4231 }
4232 
4233 static void hclge_reset_subtask(struct hclge_dev *hdev)
4234 {
4235 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4236 
4237 	/* check if there is any ongoing reset in the hardware. This status can
4238 	 * be checked from reset_pending. If there is then, we need to wait for
4239 	 * hardware to complete reset.
4240 	 *    a. If we are able to figure out in reasonable time that hardware
4241 	 *       has fully resetted then, we can proceed with driver, client
4242 	 *       reset.
4243 	 *    b. else, we can come back later to check this status so re-sched
4244 	 *       now.
4245 	 */
4246 	hdev->last_reset_time = jiffies;
4247 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4248 	if (hdev->reset_type != HNAE3_NONE_RESET)
4249 		hclge_reset(hdev);
4250 
4251 	/* check if we got any *new* reset requests to be honored */
4252 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4253 	if (hdev->reset_type != HNAE3_NONE_RESET)
4254 		hclge_do_reset(hdev);
4255 
4256 	hdev->reset_type = HNAE3_NONE_RESET;
4257 }
4258 
4259 static void hclge_reset_service_task(struct hclge_dev *hdev)
4260 {
4261 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4262 		return;
4263 
4264 	down(&hdev->reset_sem);
4265 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4266 
4267 	hclge_reset_subtask(hdev);
4268 
4269 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4270 	up(&hdev->reset_sem);
4271 }
4272 
4273 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4274 {
4275 	int i;
4276 
4277 	/* start from vport 1 for PF is always alive */
4278 	for (i = 1; i < hdev->num_alloc_vport; i++) {
4279 		struct hclge_vport *vport = &hdev->vport[i];
4280 
4281 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4282 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4283 
4284 		/* If vf is not alive, set to default value */
4285 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4286 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4287 	}
4288 }
4289 
4290 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4291 {
4292 	unsigned long delta = round_jiffies_relative(HZ);
4293 
4294 	if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4295 		return;
4296 
4297 	/* Always handle the link updating to make sure link state is
4298 	 * updated when it is triggered by mbx.
4299 	 */
4300 	hclge_update_link_status(hdev);
4301 	hclge_sync_mac_table(hdev);
4302 	hclge_sync_promisc_mode(hdev);
4303 	hclge_sync_fd_table(hdev);
4304 
4305 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4306 		delta = jiffies - hdev->last_serv_processed;
4307 
4308 		if (delta < round_jiffies_relative(HZ)) {
4309 			delta = round_jiffies_relative(HZ) - delta;
4310 			goto out;
4311 		}
4312 	}
4313 
4314 	hdev->serv_processed_cnt++;
4315 	hclge_update_vport_alive(hdev);
4316 
4317 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4318 		hdev->last_serv_processed = jiffies;
4319 		goto out;
4320 	}
4321 
4322 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4323 		hclge_update_stats_for_all(hdev);
4324 
4325 	hclge_update_port_info(hdev);
4326 	hclge_sync_vlan_filter(hdev);
4327 
4328 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4329 		hclge_rfs_filter_expire(hdev);
4330 
4331 	hdev->last_serv_processed = jiffies;
4332 
4333 out:
4334 	hclge_task_schedule(hdev, delta);
4335 }
4336 
4337 static void hclge_service_task(struct work_struct *work)
4338 {
4339 	struct hclge_dev *hdev =
4340 		container_of(work, struct hclge_dev, service_task.work);
4341 
4342 	hclge_reset_service_task(hdev);
4343 	hclge_mailbox_service_task(hdev);
4344 	hclge_periodic_service_task(hdev);
4345 
4346 	/* Handle reset and mbx again in case periodical task delays the
4347 	 * handling by calling hclge_task_schedule() in
4348 	 * hclge_periodic_service_task().
4349 	 */
4350 	hclge_reset_service_task(hdev);
4351 	hclge_mailbox_service_task(hdev);
4352 }
4353 
4354 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4355 {
4356 	/* VF handle has no client */
4357 	if (!handle->client)
4358 		return container_of(handle, struct hclge_vport, nic);
4359 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4360 		return container_of(handle, struct hclge_vport, roce);
4361 	else
4362 		return container_of(handle, struct hclge_vport, nic);
4363 }
4364 
4365 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4366 				  struct hnae3_vector_info *vector_info)
4367 {
4368 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2	64
4369 
4370 	vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4371 
4372 	/* need an extend offset to config vector >= 64 */
4373 	if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4374 		vector_info->io_addr = hdev->hw.io_base +
4375 				HCLGE_VECTOR_REG_BASE +
4376 				(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4377 	else
4378 		vector_info->io_addr = hdev->hw.io_base +
4379 				HCLGE_VECTOR_EXT_REG_BASE +
4380 				(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4381 				HCLGE_VECTOR_REG_OFFSET_H +
4382 				(idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4383 				HCLGE_VECTOR_REG_OFFSET;
4384 
4385 	hdev->vector_status[idx] = hdev->vport[0].vport_id;
4386 	hdev->vector_irq[idx] = vector_info->vector;
4387 }
4388 
4389 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4390 			    struct hnae3_vector_info *vector_info)
4391 {
4392 	struct hclge_vport *vport = hclge_get_vport(handle);
4393 	struct hnae3_vector_info *vector = vector_info;
4394 	struct hclge_dev *hdev = vport->back;
4395 	int alloc = 0;
4396 	u16 i = 0;
4397 	u16 j;
4398 
4399 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4400 	vector_num = min(hdev->num_msi_left, vector_num);
4401 
4402 	for (j = 0; j < vector_num; j++) {
4403 		while (++i < hdev->num_nic_msi) {
4404 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4405 				hclge_get_vector_info(hdev, i, vector);
4406 				vector++;
4407 				alloc++;
4408 
4409 				break;
4410 			}
4411 		}
4412 	}
4413 	hdev->num_msi_left -= alloc;
4414 	hdev->num_msi_used += alloc;
4415 
4416 	return alloc;
4417 }
4418 
4419 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4420 {
4421 	int i;
4422 
4423 	for (i = 0; i < hdev->num_msi; i++)
4424 		if (vector == hdev->vector_irq[i])
4425 			return i;
4426 
4427 	return -EINVAL;
4428 }
4429 
4430 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4431 {
4432 	struct hclge_vport *vport = hclge_get_vport(handle);
4433 	struct hclge_dev *hdev = vport->back;
4434 	int vector_id;
4435 
4436 	vector_id = hclge_get_vector_index(hdev, vector);
4437 	if (vector_id < 0) {
4438 		dev_err(&hdev->pdev->dev,
4439 			"Get vector index fail. vector = %d\n", vector);
4440 		return vector_id;
4441 	}
4442 
4443 	hclge_free_vector(hdev, vector_id);
4444 
4445 	return 0;
4446 }
4447 
4448 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4449 {
4450 	return HCLGE_RSS_KEY_SIZE;
4451 }
4452 
4453 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4454 				  const u8 hfunc, const u8 *key)
4455 {
4456 	struct hclge_rss_config_cmd *req;
4457 	unsigned int key_offset = 0;
4458 	struct hclge_desc desc;
4459 	int key_counts;
4460 	int key_size;
4461 	int ret;
4462 
4463 	key_counts = HCLGE_RSS_KEY_SIZE;
4464 	req = (struct hclge_rss_config_cmd *)desc.data;
4465 
4466 	while (key_counts) {
4467 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4468 					   false);
4469 
4470 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4471 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4472 
4473 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4474 		memcpy(req->hash_key,
4475 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4476 
4477 		key_counts -= key_size;
4478 		key_offset++;
4479 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4480 		if (ret) {
4481 			dev_err(&hdev->pdev->dev,
4482 				"Configure RSS config fail, status = %d\n",
4483 				ret);
4484 			return ret;
4485 		}
4486 	}
4487 	return 0;
4488 }
4489 
4490 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4491 {
4492 	struct hclge_rss_indirection_table_cmd *req;
4493 	struct hclge_desc desc;
4494 	int rss_cfg_tbl_num;
4495 	u8 rss_msb_oft;
4496 	u8 rss_msb_val;
4497 	int ret;
4498 	u16 qid;
4499 	int i;
4500 	u32 j;
4501 
4502 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4503 	rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4504 			  HCLGE_RSS_CFG_TBL_SIZE;
4505 
4506 	for (i = 0; i < rss_cfg_tbl_num; i++) {
4507 		hclge_cmd_setup_basic_desc
4508 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4509 
4510 		req->start_table_index =
4511 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4512 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4513 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4514 			qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4515 			req->rss_qid_l[j] = qid & 0xff;
4516 			rss_msb_oft =
4517 				j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4518 			rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4519 				(j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4520 			req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4521 		}
4522 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4523 		if (ret) {
4524 			dev_err(&hdev->pdev->dev,
4525 				"Configure rss indir table fail,status = %d\n",
4526 				ret);
4527 			return ret;
4528 		}
4529 	}
4530 	return 0;
4531 }
4532 
4533 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4534 				 u16 *tc_size, u16 *tc_offset)
4535 {
4536 	struct hclge_rss_tc_mode_cmd *req;
4537 	struct hclge_desc desc;
4538 	int ret;
4539 	int i;
4540 
4541 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4542 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4543 
4544 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4545 		u16 mode = 0;
4546 
4547 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4548 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4549 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4550 		hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4551 			      tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4552 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4553 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4554 
4555 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4556 	}
4557 
4558 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4559 	if (ret)
4560 		dev_err(&hdev->pdev->dev,
4561 			"Configure rss tc mode fail, status = %d\n", ret);
4562 
4563 	return ret;
4564 }
4565 
4566 static void hclge_get_rss_type(struct hclge_vport *vport)
4567 {
4568 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4569 	    vport->rss_tuple_sets.ipv4_udp_en ||
4570 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4571 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4572 	    vport->rss_tuple_sets.ipv6_udp_en ||
4573 	    vport->rss_tuple_sets.ipv6_sctp_en)
4574 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4575 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4576 		 vport->rss_tuple_sets.ipv6_fragment_en)
4577 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4578 	else
4579 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4580 }
4581 
4582 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4583 {
4584 	struct hclge_rss_input_tuple_cmd *req;
4585 	struct hclge_desc desc;
4586 	int ret;
4587 
4588 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4589 
4590 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4591 
4592 	/* Get the tuple cfg from pf */
4593 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4594 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4595 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4596 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4597 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4598 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4599 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4600 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4601 	hclge_get_rss_type(&hdev->vport[0]);
4602 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4603 	if (ret)
4604 		dev_err(&hdev->pdev->dev,
4605 			"Configure rss input fail, status = %d\n", ret);
4606 	return ret;
4607 }
4608 
4609 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4610 			 u8 *key, u8 *hfunc)
4611 {
4612 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4613 	struct hclge_vport *vport = hclge_get_vport(handle);
4614 	int i;
4615 
4616 	/* Get hash algorithm */
4617 	if (hfunc) {
4618 		switch (vport->rss_algo) {
4619 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4620 			*hfunc = ETH_RSS_HASH_TOP;
4621 			break;
4622 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4623 			*hfunc = ETH_RSS_HASH_XOR;
4624 			break;
4625 		default:
4626 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4627 			break;
4628 		}
4629 	}
4630 
4631 	/* Get the RSS Key required by the user */
4632 	if (key)
4633 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4634 
4635 	/* Get indirect table */
4636 	if (indir)
4637 		for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4638 			indir[i] =  vport->rss_indirection_tbl[i];
4639 
4640 	return 0;
4641 }
4642 
4643 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4644 			 const  u8 *key, const  u8 hfunc)
4645 {
4646 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4647 	struct hclge_vport *vport = hclge_get_vport(handle);
4648 	struct hclge_dev *hdev = vport->back;
4649 	u8 hash_algo;
4650 	int ret, i;
4651 
4652 	/* Set the RSS Hash Key if specififed by the user */
4653 	if (key) {
4654 		switch (hfunc) {
4655 		case ETH_RSS_HASH_TOP:
4656 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4657 			break;
4658 		case ETH_RSS_HASH_XOR:
4659 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4660 			break;
4661 		case ETH_RSS_HASH_NO_CHANGE:
4662 			hash_algo = vport->rss_algo;
4663 			break;
4664 		default:
4665 			return -EINVAL;
4666 		}
4667 
4668 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4669 		if (ret)
4670 			return ret;
4671 
4672 		/* Update the shadow RSS key with user specified qids */
4673 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4674 		vport->rss_algo = hash_algo;
4675 	}
4676 
4677 	/* Update the shadow RSS table with user specified qids */
4678 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4679 		vport->rss_indirection_tbl[i] = indir[i];
4680 
4681 	/* Update the hardware */
4682 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4683 }
4684 
4685 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4686 {
4687 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4688 
4689 	if (nfc->data & RXH_L4_B_2_3)
4690 		hash_sets |= HCLGE_D_PORT_BIT;
4691 	else
4692 		hash_sets &= ~HCLGE_D_PORT_BIT;
4693 
4694 	if (nfc->data & RXH_IP_SRC)
4695 		hash_sets |= HCLGE_S_IP_BIT;
4696 	else
4697 		hash_sets &= ~HCLGE_S_IP_BIT;
4698 
4699 	if (nfc->data & RXH_IP_DST)
4700 		hash_sets |= HCLGE_D_IP_BIT;
4701 	else
4702 		hash_sets &= ~HCLGE_D_IP_BIT;
4703 
4704 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4705 		hash_sets |= HCLGE_V_TAG_BIT;
4706 
4707 	return hash_sets;
4708 }
4709 
4710 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4711 				    struct ethtool_rxnfc *nfc,
4712 				    struct hclge_rss_input_tuple_cmd *req)
4713 {
4714 	struct hclge_dev *hdev = vport->back;
4715 	u8 tuple_sets;
4716 
4717 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4718 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4719 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4720 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4721 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4722 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4723 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4724 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4725 
4726 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4727 	switch (nfc->flow_type) {
4728 	case TCP_V4_FLOW:
4729 		req->ipv4_tcp_en = tuple_sets;
4730 		break;
4731 	case TCP_V6_FLOW:
4732 		req->ipv6_tcp_en = tuple_sets;
4733 		break;
4734 	case UDP_V4_FLOW:
4735 		req->ipv4_udp_en = tuple_sets;
4736 		break;
4737 	case UDP_V6_FLOW:
4738 		req->ipv6_udp_en = tuple_sets;
4739 		break;
4740 	case SCTP_V4_FLOW:
4741 		req->ipv4_sctp_en = tuple_sets;
4742 		break;
4743 	case SCTP_V6_FLOW:
4744 		if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4745 		    (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4746 			return -EINVAL;
4747 
4748 		req->ipv6_sctp_en = tuple_sets;
4749 		break;
4750 	case IPV4_FLOW:
4751 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4752 		break;
4753 	case IPV6_FLOW:
4754 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4755 		break;
4756 	default:
4757 		return -EINVAL;
4758 	}
4759 
4760 	return 0;
4761 }
4762 
4763 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4764 			       struct ethtool_rxnfc *nfc)
4765 {
4766 	struct hclge_vport *vport = hclge_get_vport(handle);
4767 	struct hclge_dev *hdev = vport->back;
4768 	struct hclge_rss_input_tuple_cmd *req;
4769 	struct hclge_desc desc;
4770 	int ret;
4771 
4772 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4773 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4774 		return -EINVAL;
4775 
4776 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4777 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4778 
4779 	ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4780 	if (ret) {
4781 		dev_err(&hdev->pdev->dev,
4782 			"failed to init rss tuple cmd, ret = %d\n", ret);
4783 		return ret;
4784 	}
4785 
4786 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4787 	if (ret) {
4788 		dev_err(&hdev->pdev->dev,
4789 			"Set rss tuple fail, status = %d\n", ret);
4790 		return ret;
4791 	}
4792 
4793 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4794 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4795 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4796 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4797 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4798 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4799 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4800 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4801 	hclge_get_rss_type(vport);
4802 	return 0;
4803 }
4804 
4805 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4806 				     u8 *tuple_sets)
4807 {
4808 	switch (flow_type) {
4809 	case TCP_V4_FLOW:
4810 		*tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4811 		break;
4812 	case UDP_V4_FLOW:
4813 		*tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4814 		break;
4815 	case TCP_V6_FLOW:
4816 		*tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4817 		break;
4818 	case UDP_V6_FLOW:
4819 		*tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4820 		break;
4821 	case SCTP_V4_FLOW:
4822 		*tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4823 		break;
4824 	case SCTP_V6_FLOW:
4825 		*tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4826 		break;
4827 	case IPV4_FLOW:
4828 	case IPV6_FLOW:
4829 		*tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4830 		break;
4831 	default:
4832 		return -EINVAL;
4833 	}
4834 
4835 	return 0;
4836 }
4837 
4838 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4839 {
4840 	u64 tuple_data = 0;
4841 
4842 	if (tuple_sets & HCLGE_D_PORT_BIT)
4843 		tuple_data |= RXH_L4_B_2_3;
4844 	if (tuple_sets & HCLGE_S_PORT_BIT)
4845 		tuple_data |= RXH_L4_B_0_1;
4846 	if (tuple_sets & HCLGE_D_IP_BIT)
4847 		tuple_data |= RXH_IP_DST;
4848 	if (tuple_sets & HCLGE_S_IP_BIT)
4849 		tuple_data |= RXH_IP_SRC;
4850 
4851 	return tuple_data;
4852 }
4853 
4854 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4855 			       struct ethtool_rxnfc *nfc)
4856 {
4857 	struct hclge_vport *vport = hclge_get_vport(handle);
4858 	u8 tuple_sets;
4859 	int ret;
4860 
4861 	nfc->data = 0;
4862 
4863 	ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4864 	if (ret || !tuple_sets)
4865 		return ret;
4866 
4867 	nfc->data = hclge_convert_rss_tuple(tuple_sets);
4868 
4869 	return 0;
4870 }
4871 
4872 static int hclge_get_tc_size(struct hnae3_handle *handle)
4873 {
4874 	struct hclge_vport *vport = hclge_get_vport(handle);
4875 	struct hclge_dev *hdev = vport->back;
4876 
4877 	return hdev->pf_rss_size_max;
4878 }
4879 
4880 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4881 {
4882 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4883 	struct hclge_vport *vport = hdev->vport;
4884 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4885 	u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4886 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4887 	struct hnae3_tc_info *tc_info;
4888 	u16 roundup_size;
4889 	u16 rss_size;
4890 	int i;
4891 
4892 	tc_info = &vport->nic.kinfo.tc_info;
4893 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4894 		rss_size = tc_info->tqp_count[i];
4895 		tc_valid[i] = 0;
4896 
4897 		if (!(hdev->hw_tc_map & BIT(i)))
4898 			continue;
4899 
4900 		/* tc_size set to hardware is the log2 of roundup power of two
4901 		 * of rss_size, the acutal queue size is limited by indirection
4902 		 * table.
4903 		 */
4904 		if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4905 		    rss_size == 0) {
4906 			dev_err(&hdev->pdev->dev,
4907 				"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4908 				rss_size);
4909 			return -EINVAL;
4910 		}
4911 
4912 		roundup_size = roundup_pow_of_two(rss_size);
4913 		roundup_size = ilog2(roundup_size);
4914 
4915 		tc_valid[i] = 1;
4916 		tc_size[i] = roundup_size;
4917 		tc_offset[i] = tc_info->tqp_offset[i];
4918 	}
4919 
4920 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4921 }
4922 
4923 int hclge_rss_init_hw(struct hclge_dev *hdev)
4924 {
4925 	struct hclge_vport *vport = hdev->vport;
4926 	u16 *rss_indir = vport[0].rss_indirection_tbl;
4927 	u8 *key = vport[0].rss_hash_key;
4928 	u8 hfunc = vport[0].rss_algo;
4929 	int ret;
4930 
4931 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4932 	if (ret)
4933 		return ret;
4934 
4935 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4936 	if (ret)
4937 		return ret;
4938 
4939 	ret = hclge_set_rss_input_tuple(hdev);
4940 	if (ret)
4941 		return ret;
4942 
4943 	return hclge_init_rss_tc_mode(hdev);
4944 }
4945 
4946 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4947 {
4948 	struct hclge_vport *vport = &hdev->vport[0];
4949 	int i;
4950 
4951 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4952 		vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
4953 }
4954 
4955 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4956 {
4957 	u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
4958 	int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4959 	struct hclge_vport *vport = &hdev->vport[0];
4960 	u16 *rss_ind_tbl;
4961 
4962 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4963 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4964 
4965 	vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4966 	vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4967 	vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
4968 	vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4969 	vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4970 	vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4971 	vport->rss_tuple_sets.ipv6_sctp_en =
4972 		hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4973 		HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4974 		HCLGE_RSS_INPUT_TUPLE_SCTP;
4975 	vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4976 
4977 	vport->rss_algo = rss_algo;
4978 
4979 	rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4980 				   sizeof(*rss_ind_tbl), GFP_KERNEL);
4981 	if (!rss_ind_tbl)
4982 		return -ENOMEM;
4983 
4984 	vport->rss_indirection_tbl = rss_ind_tbl;
4985 	memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
4986 
4987 	hclge_rss_indir_init_cfg(hdev);
4988 
4989 	return 0;
4990 }
4991 
4992 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4993 				int vector_id, bool en,
4994 				struct hnae3_ring_chain_node *ring_chain)
4995 {
4996 	struct hclge_dev *hdev = vport->back;
4997 	struct hnae3_ring_chain_node *node;
4998 	struct hclge_desc desc;
4999 	struct hclge_ctrl_vector_chain_cmd *req =
5000 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
5001 	enum hclge_cmd_status status;
5002 	enum hclge_opcode_type op;
5003 	u16 tqp_type_and_id;
5004 	int i;
5005 
5006 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5007 	hclge_cmd_setup_basic_desc(&desc, op, false);
5008 	req->int_vector_id_l = hnae3_get_field(vector_id,
5009 					       HCLGE_VECTOR_ID_L_M,
5010 					       HCLGE_VECTOR_ID_L_S);
5011 	req->int_vector_id_h = hnae3_get_field(vector_id,
5012 					       HCLGE_VECTOR_ID_H_M,
5013 					       HCLGE_VECTOR_ID_H_S);
5014 
5015 	i = 0;
5016 	for (node = ring_chain; node; node = node->next) {
5017 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5018 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
5019 				HCLGE_INT_TYPE_S,
5020 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5021 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5022 				HCLGE_TQP_ID_S, node->tqp_index);
5023 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5024 				HCLGE_INT_GL_IDX_S,
5025 				hnae3_get_field(node->int_gl_idx,
5026 						HNAE3_RING_GL_IDX_M,
5027 						HNAE3_RING_GL_IDX_S));
5028 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5029 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5030 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5031 			req->vfid = vport->vport_id;
5032 
5033 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
5034 			if (status) {
5035 				dev_err(&hdev->pdev->dev,
5036 					"Map TQP fail, status is %d.\n",
5037 					status);
5038 				return -EIO;
5039 			}
5040 			i = 0;
5041 
5042 			hclge_cmd_setup_basic_desc(&desc,
5043 						   op,
5044 						   false);
5045 			req->int_vector_id_l =
5046 				hnae3_get_field(vector_id,
5047 						HCLGE_VECTOR_ID_L_M,
5048 						HCLGE_VECTOR_ID_L_S);
5049 			req->int_vector_id_h =
5050 				hnae3_get_field(vector_id,
5051 						HCLGE_VECTOR_ID_H_M,
5052 						HCLGE_VECTOR_ID_H_S);
5053 		}
5054 	}
5055 
5056 	if (i > 0) {
5057 		req->int_cause_num = i;
5058 		req->vfid = vport->vport_id;
5059 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
5060 		if (status) {
5061 			dev_err(&hdev->pdev->dev,
5062 				"Map TQP fail, status is %d.\n", status);
5063 			return -EIO;
5064 		}
5065 	}
5066 
5067 	return 0;
5068 }
5069 
5070 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5071 				    struct hnae3_ring_chain_node *ring_chain)
5072 {
5073 	struct hclge_vport *vport = hclge_get_vport(handle);
5074 	struct hclge_dev *hdev = vport->back;
5075 	int vector_id;
5076 
5077 	vector_id = hclge_get_vector_index(hdev, vector);
5078 	if (vector_id < 0) {
5079 		dev_err(&hdev->pdev->dev,
5080 			"failed to get vector index. vector=%d\n", vector);
5081 		return vector_id;
5082 	}
5083 
5084 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5085 }
5086 
5087 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5088 				       struct hnae3_ring_chain_node *ring_chain)
5089 {
5090 	struct hclge_vport *vport = hclge_get_vport(handle);
5091 	struct hclge_dev *hdev = vport->back;
5092 	int vector_id, ret;
5093 
5094 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5095 		return 0;
5096 
5097 	vector_id = hclge_get_vector_index(hdev, vector);
5098 	if (vector_id < 0) {
5099 		dev_err(&handle->pdev->dev,
5100 			"Get vector index fail. ret =%d\n", vector_id);
5101 		return vector_id;
5102 	}
5103 
5104 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5105 	if (ret)
5106 		dev_err(&handle->pdev->dev,
5107 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5108 			vector_id, ret);
5109 
5110 	return ret;
5111 }
5112 
5113 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5114 				      bool en_uc, bool en_mc, bool en_bc)
5115 {
5116 	struct hclge_vport *vport = &hdev->vport[vf_id];
5117 	struct hnae3_handle *handle = &vport->nic;
5118 	struct hclge_promisc_cfg_cmd *req;
5119 	struct hclge_desc desc;
5120 	bool uc_tx_en = en_uc;
5121 	u8 promisc_cfg = 0;
5122 	int ret;
5123 
5124 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5125 
5126 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
5127 	req->vf_id = vf_id;
5128 
5129 	if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5130 		uc_tx_en = false;
5131 
5132 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5133 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5134 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5135 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5136 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5137 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5138 	req->extend_promisc = promisc_cfg;
5139 
5140 	/* to be compatible with DEVICE_VERSION_V1/2 */
5141 	promisc_cfg = 0;
5142 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5143 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5144 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5145 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5146 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5147 	req->promisc = promisc_cfg;
5148 
5149 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5150 	if (ret)
5151 		dev_err(&hdev->pdev->dev,
5152 			"failed to set vport %u promisc mode, ret = %d.\n",
5153 			vf_id, ret);
5154 
5155 	return ret;
5156 }
5157 
5158 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5159 				 bool en_mc_pmc, bool en_bc_pmc)
5160 {
5161 	return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5162 					  en_uc_pmc, en_mc_pmc, en_bc_pmc);
5163 }
5164 
5165 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5166 				  bool en_mc_pmc)
5167 {
5168 	struct hclge_vport *vport = hclge_get_vport(handle);
5169 	struct hclge_dev *hdev = vport->back;
5170 	bool en_bc_pmc = true;
5171 
5172 	/* For device whose version below V2, if broadcast promisc enabled,
5173 	 * vlan filter is always bypassed. So broadcast promisc should be
5174 	 * disabled until user enable promisc mode
5175 	 */
5176 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5177 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5178 
5179 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5180 					    en_bc_pmc);
5181 }
5182 
5183 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5184 {
5185 	struct hclge_vport *vport = hclge_get_vport(handle);
5186 
5187 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5188 }
5189 
5190 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5191 {
5192 	if (hlist_empty(&hdev->fd_rule_list))
5193 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5194 }
5195 
5196 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5197 {
5198 	if (!test_bit(location, hdev->fd_bmap)) {
5199 		set_bit(location, hdev->fd_bmap);
5200 		hdev->hclge_fd_rule_num++;
5201 	}
5202 }
5203 
5204 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5205 {
5206 	if (test_bit(location, hdev->fd_bmap)) {
5207 		clear_bit(location, hdev->fd_bmap);
5208 		hdev->hclge_fd_rule_num--;
5209 	}
5210 }
5211 
5212 static void hclge_fd_free_node(struct hclge_dev *hdev,
5213 			       struct hclge_fd_rule *rule)
5214 {
5215 	hlist_del(&rule->rule_node);
5216 	kfree(rule);
5217 	hclge_sync_fd_state(hdev);
5218 }
5219 
5220 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5221 				      struct hclge_fd_rule *old_rule,
5222 				      struct hclge_fd_rule *new_rule,
5223 				      enum HCLGE_FD_NODE_STATE state)
5224 {
5225 	switch (state) {
5226 	case HCLGE_FD_TO_ADD:
5227 	case HCLGE_FD_ACTIVE:
5228 		/* 1) if the new state is TO_ADD, just replace the old rule
5229 		 * with the same location, no matter its state, because the
5230 		 * new rule will be configured to the hardware.
5231 		 * 2) if the new state is ACTIVE, it means the new rule
5232 		 * has been configured to the hardware, so just replace
5233 		 * the old rule node with the same location.
5234 		 * 3) for it doesn't add a new node to the list, so it's
5235 		 * unnecessary to update the rule number and fd_bmap.
5236 		 */
5237 		new_rule->rule_node.next = old_rule->rule_node.next;
5238 		new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5239 		memcpy(old_rule, new_rule, sizeof(*old_rule));
5240 		kfree(new_rule);
5241 		break;
5242 	case HCLGE_FD_DELETED:
5243 		hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5244 		hclge_fd_free_node(hdev, old_rule);
5245 		break;
5246 	case HCLGE_FD_TO_DEL:
5247 		/* if new request is TO_DEL, and old rule is existent
5248 		 * 1) the state of old rule is TO_DEL, we need do nothing,
5249 		 * because we delete rule by location, other rule content
5250 		 * is unncessary.
5251 		 * 2) the state of old rule is ACTIVE, we need to change its
5252 		 * state to TO_DEL, so the rule will be deleted when periodic
5253 		 * task being scheduled.
5254 		 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5255 		 * been added to hardware, so we just delete the rule node from
5256 		 * fd_rule_list directly.
5257 		 */
5258 		if (old_rule->state == HCLGE_FD_TO_ADD) {
5259 			hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5260 			hclge_fd_free_node(hdev, old_rule);
5261 			return;
5262 		}
5263 		old_rule->state = HCLGE_FD_TO_DEL;
5264 		break;
5265 	}
5266 }
5267 
5268 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5269 						u16 location,
5270 						struct hclge_fd_rule **parent)
5271 {
5272 	struct hclge_fd_rule *rule;
5273 	struct hlist_node *node;
5274 
5275 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5276 		if (rule->location == location)
5277 			return rule;
5278 		else if (rule->location > location)
5279 			return NULL;
5280 		/* record the parent node, use to keep the nodes in fd_rule_list
5281 		 * in ascend order.
5282 		 */
5283 		*parent = rule;
5284 	}
5285 
5286 	return NULL;
5287 }
5288 
5289 /* insert fd rule node in ascend order according to rule->location */
5290 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5291 				      struct hclge_fd_rule *rule,
5292 				      struct hclge_fd_rule *parent)
5293 {
5294 	INIT_HLIST_NODE(&rule->rule_node);
5295 
5296 	if (parent)
5297 		hlist_add_behind(&rule->rule_node, &parent->rule_node);
5298 	else
5299 		hlist_add_head(&rule->rule_node, hlist);
5300 }
5301 
5302 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5303 				     struct hclge_fd_user_def_cfg *cfg)
5304 {
5305 	struct hclge_fd_user_def_cfg_cmd *req;
5306 	struct hclge_desc desc;
5307 	u16 data = 0;
5308 	int ret;
5309 
5310 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5311 
5312 	req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5313 
5314 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5315 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5316 			HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5317 	req->ol2_cfg = cpu_to_le16(data);
5318 
5319 	data = 0;
5320 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5321 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5322 			HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5323 	req->ol3_cfg = cpu_to_le16(data);
5324 
5325 	data = 0;
5326 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5327 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5328 			HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5329 	req->ol4_cfg = cpu_to_le16(data);
5330 
5331 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5332 	if (ret)
5333 		dev_err(&hdev->pdev->dev,
5334 			"failed to set fd user def data, ret= %d\n", ret);
5335 	return ret;
5336 }
5337 
5338 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5339 {
5340 	int ret;
5341 
5342 	if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5343 		return;
5344 
5345 	if (!locked)
5346 		spin_lock_bh(&hdev->fd_rule_lock);
5347 
5348 	ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5349 	if (ret)
5350 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5351 
5352 	if (!locked)
5353 		spin_unlock_bh(&hdev->fd_rule_lock);
5354 }
5355 
5356 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5357 					  struct hclge_fd_rule *rule)
5358 {
5359 	struct hlist_head *hlist = &hdev->fd_rule_list;
5360 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5361 	struct hclge_fd_user_def_info *info, *old_info;
5362 	struct hclge_fd_user_def_cfg *cfg;
5363 
5364 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5365 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5366 		return 0;
5367 
5368 	/* for valid layer is start from 1, so need minus 1 to get the cfg */
5369 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5370 	info = &rule->ep.user_def;
5371 
5372 	if (!cfg->ref_cnt || cfg->offset == info->offset)
5373 		return 0;
5374 
5375 	if (cfg->ref_cnt > 1)
5376 		goto error;
5377 
5378 	fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5379 	if (fd_rule) {
5380 		old_info = &fd_rule->ep.user_def;
5381 		if (info->layer == old_info->layer)
5382 			return 0;
5383 	}
5384 
5385 error:
5386 	dev_err(&hdev->pdev->dev,
5387 		"No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5388 		info->layer + 1);
5389 	return -ENOSPC;
5390 }
5391 
5392 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5393 					 struct hclge_fd_rule *rule)
5394 {
5395 	struct hclge_fd_user_def_cfg *cfg;
5396 
5397 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5398 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5399 		return;
5400 
5401 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5402 	if (!cfg->ref_cnt) {
5403 		cfg->offset = rule->ep.user_def.offset;
5404 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5405 	}
5406 	cfg->ref_cnt++;
5407 }
5408 
5409 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5410 					 struct hclge_fd_rule *rule)
5411 {
5412 	struct hclge_fd_user_def_cfg *cfg;
5413 
5414 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5415 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5416 		return;
5417 
5418 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5419 	if (!cfg->ref_cnt)
5420 		return;
5421 
5422 	cfg->ref_cnt--;
5423 	if (!cfg->ref_cnt) {
5424 		cfg->offset = 0;
5425 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5426 	}
5427 }
5428 
5429 static void hclge_update_fd_list(struct hclge_dev *hdev,
5430 				 enum HCLGE_FD_NODE_STATE state, u16 location,
5431 				 struct hclge_fd_rule *new_rule)
5432 {
5433 	struct hlist_head *hlist = &hdev->fd_rule_list;
5434 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5435 
5436 	fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5437 	if (fd_rule) {
5438 		hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5439 		if (state == HCLGE_FD_ACTIVE)
5440 			hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5441 		hclge_sync_fd_user_def_cfg(hdev, true);
5442 
5443 		hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5444 		return;
5445 	}
5446 
5447 	/* it's unlikely to fail here, because we have checked the rule
5448 	 * exist before.
5449 	 */
5450 	if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5451 		dev_warn(&hdev->pdev->dev,
5452 			 "failed to delete fd rule %u, it's inexistent\n",
5453 			 location);
5454 		return;
5455 	}
5456 
5457 	hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5458 	hclge_sync_fd_user_def_cfg(hdev, true);
5459 
5460 	hclge_fd_insert_rule_node(hlist, new_rule, parent);
5461 	hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5462 
5463 	if (state == HCLGE_FD_TO_ADD) {
5464 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5465 		hclge_task_schedule(hdev, 0);
5466 	}
5467 }
5468 
5469 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5470 {
5471 	struct hclge_get_fd_mode_cmd *req;
5472 	struct hclge_desc desc;
5473 	int ret;
5474 
5475 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5476 
5477 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
5478 
5479 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5480 	if (ret) {
5481 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5482 		return ret;
5483 	}
5484 
5485 	*fd_mode = req->mode;
5486 
5487 	return ret;
5488 }
5489 
5490 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5491 				   u32 *stage1_entry_num,
5492 				   u32 *stage2_entry_num,
5493 				   u16 *stage1_counter_num,
5494 				   u16 *stage2_counter_num)
5495 {
5496 	struct hclge_get_fd_allocation_cmd *req;
5497 	struct hclge_desc desc;
5498 	int ret;
5499 
5500 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5501 
5502 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5503 
5504 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5505 	if (ret) {
5506 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5507 			ret);
5508 		return ret;
5509 	}
5510 
5511 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5512 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5513 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5514 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5515 
5516 	return ret;
5517 }
5518 
5519 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5520 				   enum HCLGE_FD_STAGE stage_num)
5521 {
5522 	struct hclge_set_fd_key_config_cmd *req;
5523 	struct hclge_fd_key_cfg *stage;
5524 	struct hclge_desc desc;
5525 	int ret;
5526 
5527 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5528 
5529 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5530 	stage = &hdev->fd_cfg.key_cfg[stage_num];
5531 	req->stage = stage_num;
5532 	req->key_select = stage->key_sel;
5533 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5534 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5535 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5536 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5537 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5538 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5539 
5540 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5541 	if (ret)
5542 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5543 
5544 	return ret;
5545 }
5546 
5547 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5548 {
5549 	struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5550 
5551 	spin_lock_bh(&hdev->fd_rule_lock);
5552 	memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5553 	spin_unlock_bh(&hdev->fd_rule_lock);
5554 
5555 	hclge_fd_set_user_def_cmd(hdev, cfg);
5556 }
5557 
5558 static int hclge_init_fd_config(struct hclge_dev *hdev)
5559 {
5560 #define LOW_2_WORDS		0x03
5561 	struct hclge_fd_key_cfg *key_cfg;
5562 	int ret;
5563 
5564 	if (!hnae3_dev_fd_supported(hdev))
5565 		return 0;
5566 
5567 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5568 	if (ret)
5569 		return ret;
5570 
5571 	switch (hdev->fd_cfg.fd_mode) {
5572 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5573 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5574 		break;
5575 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5576 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5577 		break;
5578 	default:
5579 		dev_err(&hdev->pdev->dev,
5580 			"Unsupported flow director mode %u\n",
5581 			hdev->fd_cfg.fd_mode);
5582 		return -EOPNOTSUPP;
5583 	}
5584 
5585 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5586 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5587 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5588 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5589 	key_cfg->outer_sipv6_word_en = 0;
5590 	key_cfg->outer_dipv6_word_en = 0;
5591 
5592 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5593 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5594 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5595 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5596 
5597 	/* If use max 400bit key, we can support tuples for ether type */
5598 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5599 		key_cfg->tuple_active |=
5600 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5601 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5602 			key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5603 	}
5604 
5605 	/* roce_type is used to filter roce frames
5606 	 * dst_vport is used to specify the rule
5607 	 */
5608 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5609 
5610 	ret = hclge_get_fd_allocation(hdev,
5611 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5612 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5613 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5614 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5615 	if (ret)
5616 		return ret;
5617 
5618 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5619 }
5620 
5621 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5622 				int loc, u8 *key, bool is_add)
5623 {
5624 	struct hclge_fd_tcam_config_1_cmd *req1;
5625 	struct hclge_fd_tcam_config_2_cmd *req2;
5626 	struct hclge_fd_tcam_config_3_cmd *req3;
5627 	struct hclge_desc desc[3];
5628 	int ret;
5629 
5630 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5631 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5632 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5633 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5634 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5635 
5636 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5637 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5638 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5639 
5640 	req1->stage = stage;
5641 	req1->xy_sel = sel_x ? 1 : 0;
5642 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5643 	req1->index = cpu_to_le32(loc);
5644 	req1->entry_vld = sel_x ? is_add : 0;
5645 
5646 	if (key) {
5647 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5648 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5649 		       sizeof(req2->tcam_data));
5650 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5651 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5652 	}
5653 
5654 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
5655 	if (ret)
5656 		dev_err(&hdev->pdev->dev,
5657 			"config tcam key fail, ret=%d\n",
5658 			ret);
5659 
5660 	return ret;
5661 }
5662 
5663 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5664 			      struct hclge_fd_ad_data *action)
5665 {
5666 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5667 	struct hclge_fd_ad_config_cmd *req;
5668 	struct hclge_desc desc;
5669 	u64 ad_data = 0;
5670 	int ret;
5671 
5672 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5673 
5674 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
5675 	req->index = cpu_to_le32(loc);
5676 	req->stage = stage;
5677 
5678 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5679 		      action->write_rule_id_to_bd);
5680 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5681 			action->rule_id);
5682 	if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5683 		hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5684 			      action->override_tc);
5685 		hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5686 				HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5687 	}
5688 	ad_data <<= 32;
5689 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5690 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5691 		      action->forward_to_direct_queue);
5692 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5693 			action->queue_id);
5694 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5695 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5696 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5697 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5698 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5699 			action->counter_id);
5700 
5701 	req->ad_data = cpu_to_le64(ad_data);
5702 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5703 	if (ret)
5704 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5705 
5706 	return ret;
5707 }
5708 
5709 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5710 				   struct hclge_fd_rule *rule)
5711 {
5712 	int offset, moffset, ip_offset;
5713 	enum HCLGE_FD_KEY_OPT key_opt;
5714 	u16 tmp_x_s, tmp_y_s;
5715 	u32 tmp_x_l, tmp_y_l;
5716 	u8 *p = (u8 *)rule;
5717 	int i;
5718 
5719 	if (rule->unused_tuple & BIT(tuple_bit))
5720 		return true;
5721 
5722 	key_opt = tuple_key_info[tuple_bit].key_opt;
5723 	offset = tuple_key_info[tuple_bit].offset;
5724 	moffset = tuple_key_info[tuple_bit].moffset;
5725 
5726 	switch (key_opt) {
5727 	case KEY_OPT_U8:
5728 		calc_x(*key_x, p[offset], p[moffset]);
5729 		calc_y(*key_y, p[offset], p[moffset]);
5730 
5731 		return true;
5732 	case KEY_OPT_LE16:
5733 		calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5734 		calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5735 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5736 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5737 
5738 		return true;
5739 	case KEY_OPT_LE32:
5740 		calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5741 		calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5742 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5743 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5744 
5745 		return true;
5746 	case KEY_OPT_MAC:
5747 		for (i = 0; i < ETH_ALEN; i++) {
5748 			calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5749 			       p[moffset + i]);
5750 			calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5751 			       p[moffset + i]);
5752 		}
5753 
5754 		return true;
5755 	case KEY_OPT_IP:
5756 		ip_offset = IPV4_INDEX * sizeof(u32);
5757 		calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5758 		       *(u32 *)(&p[moffset + ip_offset]));
5759 		calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5760 		       *(u32 *)(&p[moffset + ip_offset]));
5761 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5762 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5763 
5764 		return true;
5765 	default:
5766 		return false;
5767 	}
5768 }
5769 
5770 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5771 				 u8 vf_id, u8 network_port_id)
5772 {
5773 	u32 port_number = 0;
5774 
5775 	if (port_type == HOST_PORT) {
5776 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5777 				pf_id);
5778 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5779 				vf_id);
5780 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5781 	} else {
5782 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5783 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5784 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5785 	}
5786 
5787 	return port_number;
5788 }
5789 
5790 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5791 				       __le32 *key_x, __le32 *key_y,
5792 				       struct hclge_fd_rule *rule)
5793 {
5794 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5795 	u8 cur_pos = 0, tuple_size, shift_bits;
5796 	unsigned int i;
5797 
5798 	for (i = 0; i < MAX_META_DATA; i++) {
5799 		tuple_size = meta_data_key_info[i].key_length;
5800 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5801 
5802 		switch (tuple_bit) {
5803 		case BIT(ROCE_TYPE):
5804 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5805 			cur_pos += tuple_size;
5806 			break;
5807 		case BIT(DST_VPORT):
5808 			port_number = hclge_get_port_number(HOST_PORT, 0,
5809 							    rule->vf_id, 0);
5810 			hnae3_set_field(meta_data,
5811 					GENMASK(cur_pos + tuple_size, cur_pos),
5812 					cur_pos, port_number);
5813 			cur_pos += tuple_size;
5814 			break;
5815 		default:
5816 			break;
5817 		}
5818 	}
5819 
5820 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5821 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5822 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5823 
5824 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5825 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5826 }
5827 
5828 /* A complete key is combined with meta data key and tuple key.
5829  * Meta data key is stored at the MSB region, and tuple key is stored at
5830  * the LSB region, unused bits will be filled 0.
5831  */
5832 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5833 			    struct hclge_fd_rule *rule)
5834 {
5835 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5836 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5837 	u8 *cur_key_x, *cur_key_y;
5838 	u8 meta_data_region;
5839 	u8 tuple_size;
5840 	int ret;
5841 	u32 i;
5842 
5843 	memset(key_x, 0, sizeof(key_x));
5844 	memset(key_y, 0, sizeof(key_y));
5845 	cur_key_x = key_x;
5846 	cur_key_y = key_y;
5847 
5848 	for (i = 0 ; i < MAX_TUPLE; i++) {
5849 		bool tuple_valid;
5850 
5851 		tuple_size = tuple_key_info[i].key_length / 8;
5852 		if (!(key_cfg->tuple_active & BIT(i)))
5853 			continue;
5854 
5855 		tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5856 						     cur_key_y, rule);
5857 		if (tuple_valid) {
5858 			cur_key_x += tuple_size;
5859 			cur_key_y += tuple_size;
5860 		}
5861 	}
5862 
5863 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5864 			MAX_META_DATA_LENGTH / 8;
5865 
5866 	hclge_fd_convert_meta_data(key_cfg,
5867 				   (__le32 *)(key_x + meta_data_region),
5868 				   (__le32 *)(key_y + meta_data_region),
5869 				   rule);
5870 
5871 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5872 				   true);
5873 	if (ret) {
5874 		dev_err(&hdev->pdev->dev,
5875 			"fd key_y config fail, loc=%u, ret=%d\n",
5876 			rule->queue_id, ret);
5877 		return ret;
5878 	}
5879 
5880 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5881 				   true);
5882 	if (ret)
5883 		dev_err(&hdev->pdev->dev,
5884 			"fd key_x config fail, loc=%u, ret=%d\n",
5885 			rule->queue_id, ret);
5886 	return ret;
5887 }
5888 
5889 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5890 			       struct hclge_fd_rule *rule)
5891 {
5892 	struct hclge_vport *vport = hdev->vport;
5893 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5894 	struct hclge_fd_ad_data ad_data;
5895 
5896 	memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5897 	ad_data.ad_id = rule->location;
5898 
5899 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5900 		ad_data.drop_packet = true;
5901 	} else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5902 		ad_data.override_tc = true;
5903 		ad_data.queue_id =
5904 			kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5905 		ad_data.tc_size =
5906 			ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5907 	} else {
5908 		ad_data.forward_to_direct_queue = true;
5909 		ad_data.queue_id = rule->queue_id;
5910 	}
5911 
5912 	ad_data.use_counter = false;
5913 	ad_data.counter_id = 0;
5914 
5915 	ad_data.use_next_stage = false;
5916 	ad_data.next_input_key = 0;
5917 
5918 	ad_data.write_rule_id_to_bd = true;
5919 	ad_data.rule_id = rule->location;
5920 
5921 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5922 }
5923 
5924 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5925 				       u32 *unused_tuple)
5926 {
5927 	if (!spec || !unused_tuple)
5928 		return -EINVAL;
5929 
5930 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5931 
5932 	if (!spec->ip4src)
5933 		*unused_tuple |= BIT(INNER_SRC_IP);
5934 
5935 	if (!spec->ip4dst)
5936 		*unused_tuple |= BIT(INNER_DST_IP);
5937 
5938 	if (!spec->psrc)
5939 		*unused_tuple |= BIT(INNER_SRC_PORT);
5940 
5941 	if (!spec->pdst)
5942 		*unused_tuple |= BIT(INNER_DST_PORT);
5943 
5944 	if (!spec->tos)
5945 		*unused_tuple |= BIT(INNER_IP_TOS);
5946 
5947 	return 0;
5948 }
5949 
5950 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5951 				    u32 *unused_tuple)
5952 {
5953 	if (!spec || !unused_tuple)
5954 		return -EINVAL;
5955 
5956 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5957 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5958 
5959 	if (!spec->ip4src)
5960 		*unused_tuple |= BIT(INNER_SRC_IP);
5961 
5962 	if (!spec->ip4dst)
5963 		*unused_tuple |= BIT(INNER_DST_IP);
5964 
5965 	if (!spec->tos)
5966 		*unused_tuple |= BIT(INNER_IP_TOS);
5967 
5968 	if (!spec->proto)
5969 		*unused_tuple |= BIT(INNER_IP_PROTO);
5970 
5971 	if (spec->l4_4_bytes)
5972 		return -EOPNOTSUPP;
5973 
5974 	if (spec->ip_ver != ETH_RX_NFC_IP4)
5975 		return -EOPNOTSUPP;
5976 
5977 	return 0;
5978 }
5979 
5980 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5981 				       u32 *unused_tuple)
5982 {
5983 	if (!spec || !unused_tuple)
5984 		return -EINVAL;
5985 
5986 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5987 
5988 	/* check whether src/dst ip address used */
5989 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5990 		*unused_tuple |= BIT(INNER_SRC_IP);
5991 
5992 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5993 		*unused_tuple |= BIT(INNER_DST_IP);
5994 
5995 	if (!spec->psrc)
5996 		*unused_tuple |= BIT(INNER_SRC_PORT);
5997 
5998 	if (!spec->pdst)
5999 		*unused_tuple |= BIT(INNER_DST_PORT);
6000 
6001 	if (!spec->tclass)
6002 		*unused_tuple |= BIT(INNER_IP_TOS);
6003 
6004 	return 0;
6005 }
6006 
6007 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6008 				    u32 *unused_tuple)
6009 {
6010 	if (!spec || !unused_tuple)
6011 		return -EINVAL;
6012 
6013 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6014 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6015 
6016 	/* check whether src/dst ip address used */
6017 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6018 		*unused_tuple |= BIT(INNER_SRC_IP);
6019 
6020 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6021 		*unused_tuple |= BIT(INNER_DST_IP);
6022 
6023 	if (!spec->l4_proto)
6024 		*unused_tuple |= BIT(INNER_IP_PROTO);
6025 
6026 	if (!spec->tclass)
6027 		*unused_tuple |= BIT(INNER_IP_TOS);
6028 
6029 	if (spec->l4_4_bytes)
6030 		return -EOPNOTSUPP;
6031 
6032 	return 0;
6033 }
6034 
6035 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6036 {
6037 	if (!spec || !unused_tuple)
6038 		return -EINVAL;
6039 
6040 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6041 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6042 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6043 
6044 	if (is_zero_ether_addr(spec->h_source))
6045 		*unused_tuple |= BIT(INNER_SRC_MAC);
6046 
6047 	if (is_zero_ether_addr(spec->h_dest))
6048 		*unused_tuple |= BIT(INNER_DST_MAC);
6049 
6050 	if (!spec->h_proto)
6051 		*unused_tuple |= BIT(INNER_ETH_TYPE);
6052 
6053 	return 0;
6054 }
6055 
6056 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6057 				    struct ethtool_rx_flow_spec *fs,
6058 				    u32 *unused_tuple)
6059 {
6060 	if (fs->flow_type & FLOW_EXT) {
6061 		if (fs->h_ext.vlan_etype) {
6062 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6063 			return -EOPNOTSUPP;
6064 		}
6065 
6066 		if (!fs->h_ext.vlan_tci)
6067 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6068 
6069 		if (fs->m_ext.vlan_tci &&
6070 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6071 			dev_err(&hdev->pdev->dev,
6072 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6073 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6074 			return -EINVAL;
6075 		}
6076 	} else {
6077 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6078 	}
6079 
6080 	if (fs->flow_type & FLOW_MAC_EXT) {
6081 		if (hdev->fd_cfg.fd_mode !=
6082 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6083 			dev_err(&hdev->pdev->dev,
6084 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
6085 			return -EOPNOTSUPP;
6086 		}
6087 
6088 		if (is_zero_ether_addr(fs->h_ext.h_dest))
6089 			*unused_tuple |= BIT(INNER_DST_MAC);
6090 		else
6091 			*unused_tuple &= ~BIT(INNER_DST_MAC);
6092 	}
6093 
6094 	return 0;
6095 }
6096 
6097 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6098 				       struct hclge_fd_user_def_info *info)
6099 {
6100 	switch (flow_type) {
6101 	case ETHER_FLOW:
6102 		info->layer = HCLGE_FD_USER_DEF_L2;
6103 		*unused_tuple &= ~BIT(INNER_L2_RSV);
6104 		break;
6105 	case IP_USER_FLOW:
6106 	case IPV6_USER_FLOW:
6107 		info->layer = HCLGE_FD_USER_DEF_L3;
6108 		*unused_tuple &= ~BIT(INNER_L3_RSV);
6109 		break;
6110 	case TCP_V4_FLOW:
6111 	case UDP_V4_FLOW:
6112 	case TCP_V6_FLOW:
6113 	case UDP_V6_FLOW:
6114 		info->layer = HCLGE_FD_USER_DEF_L4;
6115 		*unused_tuple &= ~BIT(INNER_L4_RSV);
6116 		break;
6117 	default:
6118 		return -EOPNOTSUPP;
6119 	}
6120 
6121 	return 0;
6122 }
6123 
6124 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6125 {
6126 	return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6127 }
6128 
6129 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6130 					 struct ethtool_rx_flow_spec *fs,
6131 					 u32 *unused_tuple,
6132 					 struct hclge_fd_user_def_info *info)
6133 {
6134 	u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6135 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6136 	u16 data, offset, data_mask, offset_mask;
6137 	int ret;
6138 
6139 	info->layer = HCLGE_FD_USER_DEF_NONE;
6140 	*unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6141 
6142 	if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6143 		return 0;
6144 
6145 	/* user-def data from ethtool is 64 bit value, the bit0~15 is used
6146 	 * for data, and bit32~47 is used for offset.
6147 	 */
6148 	data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6149 	data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6150 	offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6151 	offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6152 
6153 	if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6154 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6155 		return -EOPNOTSUPP;
6156 	}
6157 
6158 	if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6159 		dev_err(&hdev->pdev->dev,
6160 			"user-def offset[%u] should be no more than %u\n",
6161 			offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6162 		return -EINVAL;
6163 	}
6164 
6165 	if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6166 		dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6167 		return -EINVAL;
6168 	}
6169 
6170 	ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6171 	if (ret) {
6172 		dev_err(&hdev->pdev->dev,
6173 			"unsupported flow type for user-def bytes, ret = %d\n",
6174 			ret);
6175 		return ret;
6176 	}
6177 
6178 	info->data = data;
6179 	info->data_mask = data_mask;
6180 	info->offset = offset;
6181 
6182 	return 0;
6183 }
6184 
6185 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6186 			       struct ethtool_rx_flow_spec *fs,
6187 			       u32 *unused_tuple,
6188 			       struct hclge_fd_user_def_info *info)
6189 {
6190 	u32 flow_type;
6191 	int ret;
6192 
6193 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6194 		dev_err(&hdev->pdev->dev,
6195 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
6196 			fs->location,
6197 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6198 		return -EINVAL;
6199 	}
6200 
6201 	ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6202 	if (ret)
6203 		return ret;
6204 
6205 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6206 	switch (flow_type) {
6207 	case SCTP_V4_FLOW:
6208 	case TCP_V4_FLOW:
6209 	case UDP_V4_FLOW:
6210 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6211 						  unused_tuple);
6212 		break;
6213 	case IP_USER_FLOW:
6214 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6215 					       unused_tuple);
6216 		break;
6217 	case SCTP_V6_FLOW:
6218 	case TCP_V6_FLOW:
6219 	case UDP_V6_FLOW:
6220 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6221 						  unused_tuple);
6222 		break;
6223 	case IPV6_USER_FLOW:
6224 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6225 					       unused_tuple);
6226 		break;
6227 	case ETHER_FLOW:
6228 		if (hdev->fd_cfg.fd_mode !=
6229 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6230 			dev_err(&hdev->pdev->dev,
6231 				"ETHER_FLOW is not supported in current fd mode!\n");
6232 			return -EOPNOTSUPP;
6233 		}
6234 
6235 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6236 						 unused_tuple);
6237 		break;
6238 	default:
6239 		dev_err(&hdev->pdev->dev,
6240 			"unsupported protocol type, protocol type = %#x\n",
6241 			flow_type);
6242 		return -EOPNOTSUPP;
6243 	}
6244 
6245 	if (ret) {
6246 		dev_err(&hdev->pdev->dev,
6247 			"failed to check flow union tuple, ret = %d\n",
6248 			ret);
6249 		return ret;
6250 	}
6251 
6252 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6253 }
6254 
6255 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6256 				      struct ethtool_rx_flow_spec *fs,
6257 				      struct hclge_fd_rule *rule, u8 ip_proto)
6258 {
6259 	rule->tuples.src_ip[IPV4_INDEX] =
6260 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6261 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6262 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6263 
6264 	rule->tuples.dst_ip[IPV4_INDEX] =
6265 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6266 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6267 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6268 
6269 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6270 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6271 
6272 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6273 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6274 
6275 	rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6276 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6277 
6278 	rule->tuples.ether_proto = ETH_P_IP;
6279 	rule->tuples_mask.ether_proto = 0xFFFF;
6280 
6281 	rule->tuples.ip_proto = ip_proto;
6282 	rule->tuples_mask.ip_proto = 0xFF;
6283 }
6284 
6285 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6286 				   struct ethtool_rx_flow_spec *fs,
6287 				   struct hclge_fd_rule *rule)
6288 {
6289 	rule->tuples.src_ip[IPV4_INDEX] =
6290 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6291 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6292 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6293 
6294 	rule->tuples.dst_ip[IPV4_INDEX] =
6295 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6296 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6297 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6298 
6299 	rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6300 	rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6301 
6302 	rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6303 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6304 
6305 	rule->tuples.ether_proto = ETH_P_IP;
6306 	rule->tuples_mask.ether_proto = 0xFFFF;
6307 }
6308 
6309 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6310 				      struct ethtool_rx_flow_spec *fs,
6311 				      struct hclge_fd_rule *rule, u8 ip_proto)
6312 {
6313 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6314 			  IPV6_SIZE);
6315 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6316 			  IPV6_SIZE);
6317 
6318 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6319 			  IPV6_SIZE);
6320 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6321 			  IPV6_SIZE);
6322 
6323 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6324 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6325 
6326 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6327 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6328 
6329 	rule->tuples.ether_proto = ETH_P_IPV6;
6330 	rule->tuples_mask.ether_proto = 0xFFFF;
6331 
6332 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6333 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6334 
6335 	rule->tuples.ip_proto = ip_proto;
6336 	rule->tuples_mask.ip_proto = 0xFF;
6337 }
6338 
6339 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6340 				   struct ethtool_rx_flow_spec *fs,
6341 				   struct hclge_fd_rule *rule)
6342 {
6343 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6344 			  IPV6_SIZE);
6345 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6346 			  IPV6_SIZE);
6347 
6348 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6349 			  IPV6_SIZE);
6350 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6351 			  IPV6_SIZE);
6352 
6353 	rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6354 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6355 
6356 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6357 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6358 
6359 	rule->tuples.ether_proto = ETH_P_IPV6;
6360 	rule->tuples_mask.ether_proto = 0xFFFF;
6361 }
6362 
6363 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6364 				     struct ethtool_rx_flow_spec *fs,
6365 				     struct hclge_fd_rule *rule)
6366 {
6367 	ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6368 	ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6369 
6370 	ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6371 	ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6372 
6373 	rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6374 	rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6375 }
6376 
6377 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6378 					struct hclge_fd_rule *rule)
6379 {
6380 	switch (info->layer) {
6381 	case HCLGE_FD_USER_DEF_L2:
6382 		rule->tuples.l2_user_def = info->data;
6383 		rule->tuples_mask.l2_user_def = info->data_mask;
6384 		break;
6385 	case HCLGE_FD_USER_DEF_L3:
6386 		rule->tuples.l3_user_def = info->data;
6387 		rule->tuples_mask.l3_user_def = info->data_mask;
6388 		break;
6389 	case HCLGE_FD_USER_DEF_L4:
6390 		rule->tuples.l4_user_def = (u32)info->data << 16;
6391 		rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6392 		break;
6393 	default:
6394 		break;
6395 	}
6396 
6397 	rule->ep.user_def = *info;
6398 }
6399 
6400 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6401 			      struct ethtool_rx_flow_spec *fs,
6402 			      struct hclge_fd_rule *rule,
6403 			      struct hclge_fd_user_def_info *info)
6404 {
6405 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6406 
6407 	switch (flow_type) {
6408 	case SCTP_V4_FLOW:
6409 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6410 		break;
6411 	case TCP_V4_FLOW:
6412 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6413 		break;
6414 	case UDP_V4_FLOW:
6415 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6416 		break;
6417 	case IP_USER_FLOW:
6418 		hclge_fd_get_ip4_tuple(hdev, fs, rule);
6419 		break;
6420 	case SCTP_V6_FLOW:
6421 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6422 		break;
6423 	case TCP_V6_FLOW:
6424 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6425 		break;
6426 	case UDP_V6_FLOW:
6427 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6428 		break;
6429 	case IPV6_USER_FLOW:
6430 		hclge_fd_get_ip6_tuple(hdev, fs, rule);
6431 		break;
6432 	case ETHER_FLOW:
6433 		hclge_fd_get_ether_tuple(hdev, fs, rule);
6434 		break;
6435 	default:
6436 		return -EOPNOTSUPP;
6437 	}
6438 
6439 	if (fs->flow_type & FLOW_EXT) {
6440 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6441 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6442 		hclge_fd_get_user_def_tuple(info, rule);
6443 	}
6444 
6445 	if (fs->flow_type & FLOW_MAC_EXT) {
6446 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6447 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6448 	}
6449 
6450 	return 0;
6451 }
6452 
6453 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6454 				struct hclge_fd_rule *rule)
6455 {
6456 	int ret;
6457 
6458 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6459 	if (ret)
6460 		return ret;
6461 
6462 	return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6463 }
6464 
6465 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6466 				     struct hclge_fd_rule *rule)
6467 {
6468 	int ret;
6469 
6470 	spin_lock_bh(&hdev->fd_rule_lock);
6471 
6472 	if (hdev->fd_active_type != rule->rule_type &&
6473 	    (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6474 	     hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6475 		dev_err(&hdev->pdev->dev,
6476 			"mode conflict(new type %d, active type %d), please delete existent rules first\n",
6477 			rule->rule_type, hdev->fd_active_type);
6478 		spin_unlock_bh(&hdev->fd_rule_lock);
6479 		return -EINVAL;
6480 	}
6481 
6482 	ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6483 	if (ret)
6484 		goto out;
6485 
6486 	ret = hclge_clear_arfs_rules(hdev);
6487 	if (ret)
6488 		goto out;
6489 
6490 	ret = hclge_fd_config_rule(hdev, rule);
6491 	if (ret)
6492 		goto out;
6493 
6494 	rule->state = HCLGE_FD_ACTIVE;
6495 	hdev->fd_active_type = rule->rule_type;
6496 	hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6497 
6498 out:
6499 	spin_unlock_bh(&hdev->fd_rule_lock);
6500 	return ret;
6501 }
6502 
6503 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6504 {
6505 	struct hclge_vport *vport = hclge_get_vport(handle);
6506 	struct hclge_dev *hdev = vport->back;
6507 
6508 	return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6509 }
6510 
6511 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6512 				      u16 *vport_id, u8 *action, u16 *queue_id)
6513 {
6514 	struct hclge_vport *vport = hdev->vport;
6515 
6516 	if (ring_cookie == RX_CLS_FLOW_DISC) {
6517 		*action = HCLGE_FD_ACTION_DROP_PACKET;
6518 	} else {
6519 		u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6520 		u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6521 		u16 tqps;
6522 
6523 		if (vf > hdev->num_req_vfs) {
6524 			dev_err(&hdev->pdev->dev,
6525 				"Error: vf id (%u) > max vf num (%u)\n",
6526 				vf, hdev->num_req_vfs);
6527 			return -EINVAL;
6528 		}
6529 
6530 		*vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6531 		tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6532 
6533 		if (ring >= tqps) {
6534 			dev_err(&hdev->pdev->dev,
6535 				"Error: queue id (%u) > max tqp num (%u)\n",
6536 				ring, tqps - 1);
6537 			return -EINVAL;
6538 		}
6539 
6540 		*action = HCLGE_FD_ACTION_SELECT_QUEUE;
6541 		*queue_id = ring;
6542 	}
6543 
6544 	return 0;
6545 }
6546 
6547 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6548 			      struct ethtool_rxnfc *cmd)
6549 {
6550 	struct hclge_vport *vport = hclge_get_vport(handle);
6551 	struct hclge_dev *hdev = vport->back;
6552 	struct hclge_fd_user_def_info info;
6553 	u16 dst_vport_id = 0, q_index = 0;
6554 	struct ethtool_rx_flow_spec *fs;
6555 	struct hclge_fd_rule *rule;
6556 	u32 unused = 0;
6557 	u8 action;
6558 	int ret;
6559 
6560 	if (!hnae3_dev_fd_supported(hdev)) {
6561 		dev_err(&hdev->pdev->dev,
6562 			"flow table director is not supported\n");
6563 		return -EOPNOTSUPP;
6564 	}
6565 
6566 	if (!hdev->fd_en) {
6567 		dev_err(&hdev->pdev->dev,
6568 			"please enable flow director first\n");
6569 		return -EOPNOTSUPP;
6570 	}
6571 
6572 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6573 
6574 	ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6575 	if (ret)
6576 		return ret;
6577 
6578 	ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6579 					 &action, &q_index);
6580 	if (ret)
6581 		return ret;
6582 
6583 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6584 	if (!rule)
6585 		return -ENOMEM;
6586 
6587 	ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6588 	if (ret) {
6589 		kfree(rule);
6590 		return ret;
6591 	}
6592 
6593 	rule->flow_type = fs->flow_type;
6594 	rule->location = fs->location;
6595 	rule->unused_tuple = unused;
6596 	rule->vf_id = dst_vport_id;
6597 	rule->queue_id = q_index;
6598 	rule->action = action;
6599 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
6600 
6601 	ret = hclge_add_fd_entry_common(hdev, rule);
6602 	if (ret)
6603 		kfree(rule);
6604 
6605 	return ret;
6606 }
6607 
6608 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6609 			      struct ethtool_rxnfc *cmd)
6610 {
6611 	struct hclge_vport *vport = hclge_get_vport(handle);
6612 	struct hclge_dev *hdev = vport->back;
6613 	struct ethtool_rx_flow_spec *fs;
6614 	int ret;
6615 
6616 	if (!hnae3_dev_fd_supported(hdev))
6617 		return -EOPNOTSUPP;
6618 
6619 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6620 
6621 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6622 		return -EINVAL;
6623 
6624 	spin_lock_bh(&hdev->fd_rule_lock);
6625 	if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6626 	    !test_bit(fs->location, hdev->fd_bmap)) {
6627 		dev_err(&hdev->pdev->dev,
6628 			"Delete fail, rule %u is inexistent\n", fs->location);
6629 		spin_unlock_bh(&hdev->fd_rule_lock);
6630 		return -ENOENT;
6631 	}
6632 
6633 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6634 				   NULL, false);
6635 	if (ret)
6636 		goto out;
6637 
6638 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6639 
6640 out:
6641 	spin_unlock_bh(&hdev->fd_rule_lock);
6642 	return ret;
6643 }
6644 
6645 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6646 					 bool clear_list)
6647 {
6648 	struct hclge_fd_rule *rule;
6649 	struct hlist_node *node;
6650 	u16 location;
6651 
6652 	if (!hnae3_dev_fd_supported(hdev))
6653 		return;
6654 
6655 	spin_lock_bh(&hdev->fd_rule_lock);
6656 
6657 	for_each_set_bit(location, hdev->fd_bmap,
6658 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6659 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6660 				     NULL, false);
6661 
6662 	if (clear_list) {
6663 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6664 					  rule_node) {
6665 			hlist_del(&rule->rule_node);
6666 			kfree(rule);
6667 		}
6668 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6669 		hdev->hclge_fd_rule_num = 0;
6670 		bitmap_zero(hdev->fd_bmap,
6671 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6672 	}
6673 
6674 	spin_unlock_bh(&hdev->fd_rule_lock);
6675 }
6676 
6677 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6678 {
6679 	hclge_clear_fd_rules_in_list(hdev, true);
6680 	hclge_fd_disable_user_def(hdev);
6681 }
6682 
6683 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6684 {
6685 	struct hclge_vport *vport = hclge_get_vport(handle);
6686 	struct hclge_dev *hdev = vport->back;
6687 	struct hclge_fd_rule *rule;
6688 	struct hlist_node *node;
6689 
6690 	/* Return ok here, because reset error handling will check this
6691 	 * return value. If error is returned here, the reset process will
6692 	 * fail.
6693 	 */
6694 	if (!hnae3_dev_fd_supported(hdev))
6695 		return 0;
6696 
6697 	/* if fd is disabled, should not restore it when reset */
6698 	if (!hdev->fd_en)
6699 		return 0;
6700 
6701 	spin_lock_bh(&hdev->fd_rule_lock);
6702 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6703 		if (rule->state == HCLGE_FD_ACTIVE)
6704 			rule->state = HCLGE_FD_TO_ADD;
6705 	}
6706 	spin_unlock_bh(&hdev->fd_rule_lock);
6707 	set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6708 
6709 	return 0;
6710 }
6711 
6712 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6713 				 struct ethtool_rxnfc *cmd)
6714 {
6715 	struct hclge_vport *vport = hclge_get_vport(handle);
6716 	struct hclge_dev *hdev = vport->back;
6717 
6718 	if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6719 		return -EOPNOTSUPP;
6720 
6721 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
6722 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6723 
6724 	return 0;
6725 }
6726 
6727 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6728 				     struct ethtool_tcpip4_spec *spec,
6729 				     struct ethtool_tcpip4_spec *spec_mask)
6730 {
6731 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6732 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6733 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6734 
6735 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6736 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6737 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6738 
6739 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6740 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6741 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6742 
6743 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6744 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6745 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6746 
6747 	spec->tos = rule->tuples.ip_tos;
6748 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6749 			0 : rule->tuples_mask.ip_tos;
6750 }
6751 
6752 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6753 				  struct ethtool_usrip4_spec *spec,
6754 				  struct ethtool_usrip4_spec *spec_mask)
6755 {
6756 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6757 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6758 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6759 
6760 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6761 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6762 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6763 
6764 	spec->tos = rule->tuples.ip_tos;
6765 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6766 			0 : rule->tuples_mask.ip_tos;
6767 
6768 	spec->proto = rule->tuples.ip_proto;
6769 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6770 			0 : rule->tuples_mask.ip_proto;
6771 
6772 	spec->ip_ver = ETH_RX_NFC_IP4;
6773 }
6774 
6775 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6776 				     struct ethtool_tcpip6_spec *spec,
6777 				     struct ethtool_tcpip6_spec *spec_mask)
6778 {
6779 	cpu_to_be32_array(spec->ip6src,
6780 			  rule->tuples.src_ip, IPV6_SIZE);
6781 	cpu_to_be32_array(spec->ip6dst,
6782 			  rule->tuples.dst_ip, IPV6_SIZE);
6783 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6784 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6785 	else
6786 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6787 				  IPV6_SIZE);
6788 
6789 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6790 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6791 	else
6792 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6793 				  IPV6_SIZE);
6794 
6795 	spec->tclass = rule->tuples.ip_tos;
6796 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6797 			0 : rule->tuples_mask.ip_tos;
6798 
6799 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6800 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6801 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6802 
6803 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6804 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6805 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6806 }
6807 
6808 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6809 				  struct ethtool_usrip6_spec *spec,
6810 				  struct ethtool_usrip6_spec *spec_mask)
6811 {
6812 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6813 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6814 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6815 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6816 	else
6817 		cpu_to_be32_array(spec_mask->ip6src,
6818 				  rule->tuples_mask.src_ip, IPV6_SIZE);
6819 
6820 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6821 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6822 	else
6823 		cpu_to_be32_array(spec_mask->ip6dst,
6824 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
6825 
6826 	spec->tclass = rule->tuples.ip_tos;
6827 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6828 			0 : rule->tuples_mask.ip_tos;
6829 
6830 	spec->l4_proto = rule->tuples.ip_proto;
6831 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6832 			0 : rule->tuples_mask.ip_proto;
6833 }
6834 
6835 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6836 				    struct ethhdr *spec,
6837 				    struct ethhdr *spec_mask)
6838 {
6839 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6840 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6841 
6842 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6843 		eth_zero_addr(spec_mask->h_source);
6844 	else
6845 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6846 
6847 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
6848 		eth_zero_addr(spec_mask->h_dest);
6849 	else
6850 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6851 
6852 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6853 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6854 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6855 }
6856 
6857 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6858 				       struct hclge_fd_rule *rule)
6859 {
6860 	if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6861 	    HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6862 		fs->h_ext.data[0] = 0;
6863 		fs->h_ext.data[1] = 0;
6864 		fs->m_ext.data[0] = 0;
6865 		fs->m_ext.data[1] = 0;
6866 	} else {
6867 		fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6868 		fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6869 		fs->m_ext.data[0] =
6870 				cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6871 		fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6872 	}
6873 }
6874 
6875 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6876 				  struct hclge_fd_rule *rule)
6877 {
6878 	if (fs->flow_type & FLOW_EXT) {
6879 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6880 		fs->m_ext.vlan_tci =
6881 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6882 				0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6883 
6884 		hclge_fd_get_user_def_info(fs, rule);
6885 	}
6886 
6887 	if (fs->flow_type & FLOW_MAC_EXT) {
6888 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6889 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6890 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6891 		else
6892 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6893 					rule->tuples_mask.dst_mac);
6894 	}
6895 }
6896 
6897 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6898 				  struct ethtool_rxnfc *cmd)
6899 {
6900 	struct hclge_vport *vport = hclge_get_vport(handle);
6901 	struct hclge_fd_rule *rule = NULL;
6902 	struct hclge_dev *hdev = vport->back;
6903 	struct ethtool_rx_flow_spec *fs;
6904 	struct hlist_node *node2;
6905 
6906 	if (!hnae3_dev_fd_supported(hdev))
6907 		return -EOPNOTSUPP;
6908 
6909 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6910 
6911 	spin_lock_bh(&hdev->fd_rule_lock);
6912 
6913 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6914 		if (rule->location >= fs->location)
6915 			break;
6916 	}
6917 
6918 	if (!rule || fs->location != rule->location) {
6919 		spin_unlock_bh(&hdev->fd_rule_lock);
6920 
6921 		return -ENOENT;
6922 	}
6923 
6924 	fs->flow_type = rule->flow_type;
6925 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6926 	case SCTP_V4_FLOW:
6927 	case TCP_V4_FLOW:
6928 	case UDP_V4_FLOW:
6929 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6930 					 &fs->m_u.tcp_ip4_spec);
6931 		break;
6932 	case IP_USER_FLOW:
6933 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6934 				      &fs->m_u.usr_ip4_spec);
6935 		break;
6936 	case SCTP_V6_FLOW:
6937 	case TCP_V6_FLOW:
6938 	case UDP_V6_FLOW:
6939 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6940 					 &fs->m_u.tcp_ip6_spec);
6941 		break;
6942 	case IPV6_USER_FLOW:
6943 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6944 				      &fs->m_u.usr_ip6_spec);
6945 		break;
6946 	/* The flow type of fd rule has been checked before adding in to rule
6947 	 * list. As other flow types have been handled, it must be ETHER_FLOW
6948 	 * for the default case
6949 	 */
6950 	default:
6951 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6952 					&fs->m_u.ether_spec);
6953 		break;
6954 	}
6955 
6956 	hclge_fd_get_ext_info(fs, rule);
6957 
6958 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6959 		fs->ring_cookie = RX_CLS_FLOW_DISC;
6960 	} else {
6961 		u64 vf_id;
6962 
6963 		fs->ring_cookie = rule->queue_id;
6964 		vf_id = rule->vf_id;
6965 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6966 		fs->ring_cookie |= vf_id;
6967 	}
6968 
6969 	spin_unlock_bh(&hdev->fd_rule_lock);
6970 
6971 	return 0;
6972 }
6973 
6974 static int hclge_get_all_rules(struct hnae3_handle *handle,
6975 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
6976 {
6977 	struct hclge_vport *vport = hclge_get_vport(handle);
6978 	struct hclge_dev *hdev = vport->back;
6979 	struct hclge_fd_rule *rule;
6980 	struct hlist_node *node2;
6981 	int cnt = 0;
6982 
6983 	if (!hnae3_dev_fd_supported(hdev))
6984 		return -EOPNOTSUPP;
6985 
6986 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6987 
6988 	spin_lock_bh(&hdev->fd_rule_lock);
6989 	hlist_for_each_entry_safe(rule, node2,
6990 				  &hdev->fd_rule_list, rule_node) {
6991 		if (cnt == cmd->rule_cnt) {
6992 			spin_unlock_bh(&hdev->fd_rule_lock);
6993 			return -EMSGSIZE;
6994 		}
6995 
6996 		if (rule->state == HCLGE_FD_TO_DEL)
6997 			continue;
6998 
6999 		rule_locs[cnt] = rule->location;
7000 		cnt++;
7001 	}
7002 
7003 	spin_unlock_bh(&hdev->fd_rule_lock);
7004 
7005 	cmd->rule_cnt = cnt;
7006 
7007 	return 0;
7008 }
7009 
7010 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7011 				     struct hclge_fd_rule_tuples *tuples)
7012 {
7013 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7014 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7015 
7016 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7017 	tuples->ip_proto = fkeys->basic.ip_proto;
7018 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7019 
7020 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7021 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7022 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7023 	} else {
7024 		int i;
7025 
7026 		for (i = 0; i < IPV6_SIZE; i++) {
7027 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7028 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7029 		}
7030 	}
7031 }
7032 
7033 /* traverse all rules, check whether an existed rule has the same tuples */
7034 static struct hclge_fd_rule *
7035 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7036 			  const struct hclge_fd_rule_tuples *tuples)
7037 {
7038 	struct hclge_fd_rule *rule = NULL;
7039 	struct hlist_node *node;
7040 
7041 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7042 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7043 			return rule;
7044 	}
7045 
7046 	return NULL;
7047 }
7048 
7049 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7050 				     struct hclge_fd_rule *rule)
7051 {
7052 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7053 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7054 			     BIT(INNER_SRC_PORT);
7055 	rule->action = 0;
7056 	rule->vf_id = 0;
7057 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7058 	rule->state = HCLGE_FD_TO_ADD;
7059 	if (tuples->ether_proto == ETH_P_IP) {
7060 		if (tuples->ip_proto == IPPROTO_TCP)
7061 			rule->flow_type = TCP_V4_FLOW;
7062 		else
7063 			rule->flow_type = UDP_V4_FLOW;
7064 	} else {
7065 		if (tuples->ip_proto == IPPROTO_TCP)
7066 			rule->flow_type = TCP_V6_FLOW;
7067 		else
7068 			rule->flow_type = UDP_V6_FLOW;
7069 	}
7070 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7071 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7072 }
7073 
7074 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7075 				      u16 flow_id, struct flow_keys *fkeys)
7076 {
7077 	struct hclge_vport *vport = hclge_get_vport(handle);
7078 	struct hclge_fd_rule_tuples new_tuples = {};
7079 	struct hclge_dev *hdev = vport->back;
7080 	struct hclge_fd_rule *rule;
7081 	u16 bit_id;
7082 
7083 	if (!hnae3_dev_fd_supported(hdev))
7084 		return -EOPNOTSUPP;
7085 
7086 	/* when there is already fd rule existed add by user,
7087 	 * arfs should not work
7088 	 */
7089 	spin_lock_bh(&hdev->fd_rule_lock);
7090 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7091 	    hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7092 		spin_unlock_bh(&hdev->fd_rule_lock);
7093 		return -EOPNOTSUPP;
7094 	}
7095 
7096 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7097 
7098 	/* check is there flow director filter existed for this flow,
7099 	 * if not, create a new filter for it;
7100 	 * if filter exist with different queue id, modify the filter;
7101 	 * if filter exist with same queue id, do nothing
7102 	 */
7103 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7104 	if (!rule) {
7105 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7106 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7107 			spin_unlock_bh(&hdev->fd_rule_lock);
7108 			return -ENOSPC;
7109 		}
7110 
7111 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7112 		if (!rule) {
7113 			spin_unlock_bh(&hdev->fd_rule_lock);
7114 			return -ENOMEM;
7115 		}
7116 
7117 		rule->location = bit_id;
7118 		rule->arfs.flow_id = flow_id;
7119 		rule->queue_id = queue_id;
7120 		hclge_fd_build_arfs_rule(&new_tuples, rule);
7121 		hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7122 		hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7123 	} else if (rule->queue_id != queue_id) {
7124 		rule->queue_id = queue_id;
7125 		rule->state = HCLGE_FD_TO_ADD;
7126 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7127 		hclge_task_schedule(hdev, 0);
7128 	}
7129 	spin_unlock_bh(&hdev->fd_rule_lock);
7130 	return rule->location;
7131 }
7132 
7133 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7134 {
7135 #ifdef CONFIG_RFS_ACCEL
7136 	struct hnae3_handle *handle = &hdev->vport[0].nic;
7137 	struct hclge_fd_rule *rule;
7138 	struct hlist_node *node;
7139 
7140 	spin_lock_bh(&hdev->fd_rule_lock);
7141 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7142 		spin_unlock_bh(&hdev->fd_rule_lock);
7143 		return;
7144 	}
7145 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7146 		if (rule->state != HCLGE_FD_ACTIVE)
7147 			continue;
7148 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7149 					rule->arfs.flow_id, rule->location)) {
7150 			rule->state = HCLGE_FD_TO_DEL;
7151 			set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7152 		}
7153 	}
7154 	spin_unlock_bh(&hdev->fd_rule_lock);
7155 #endif
7156 }
7157 
7158 /* make sure being called after lock up with fd_rule_lock */
7159 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7160 {
7161 #ifdef CONFIG_RFS_ACCEL
7162 	struct hclge_fd_rule *rule;
7163 	struct hlist_node *node;
7164 	int ret;
7165 
7166 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7167 		return 0;
7168 
7169 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7170 		switch (rule->state) {
7171 		case HCLGE_FD_TO_DEL:
7172 		case HCLGE_FD_ACTIVE:
7173 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7174 						   rule->location, NULL, false);
7175 			if (ret)
7176 				return ret;
7177 			fallthrough;
7178 		case HCLGE_FD_TO_ADD:
7179 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7180 			hlist_del(&rule->rule_node);
7181 			kfree(rule);
7182 			break;
7183 		default:
7184 			break;
7185 		}
7186 	}
7187 	hclge_sync_fd_state(hdev);
7188 
7189 #endif
7190 	return 0;
7191 }
7192 
7193 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7194 				    struct hclge_fd_rule *rule)
7195 {
7196 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7197 		struct flow_match_basic match;
7198 		u16 ethtype_key, ethtype_mask;
7199 
7200 		flow_rule_match_basic(flow, &match);
7201 		ethtype_key = ntohs(match.key->n_proto);
7202 		ethtype_mask = ntohs(match.mask->n_proto);
7203 
7204 		if (ethtype_key == ETH_P_ALL) {
7205 			ethtype_key = 0;
7206 			ethtype_mask = 0;
7207 		}
7208 		rule->tuples.ether_proto = ethtype_key;
7209 		rule->tuples_mask.ether_proto = ethtype_mask;
7210 		rule->tuples.ip_proto = match.key->ip_proto;
7211 		rule->tuples_mask.ip_proto = match.mask->ip_proto;
7212 	} else {
7213 		rule->unused_tuple |= BIT(INNER_IP_PROTO);
7214 		rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7215 	}
7216 }
7217 
7218 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7219 				  struct hclge_fd_rule *rule)
7220 {
7221 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7222 		struct flow_match_eth_addrs match;
7223 
7224 		flow_rule_match_eth_addrs(flow, &match);
7225 		ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7226 		ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7227 		ether_addr_copy(rule->tuples.src_mac, match.key->src);
7228 		ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7229 	} else {
7230 		rule->unused_tuple |= BIT(INNER_DST_MAC);
7231 		rule->unused_tuple |= BIT(INNER_SRC_MAC);
7232 	}
7233 }
7234 
7235 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7236 				   struct hclge_fd_rule *rule)
7237 {
7238 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7239 		struct flow_match_vlan match;
7240 
7241 		flow_rule_match_vlan(flow, &match);
7242 		rule->tuples.vlan_tag1 = match.key->vlan_id |
7243 				(match.key->vlan_priority << VLAN_PRIO_SHIFT);
7244 		rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7245 				(match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7246 	} else {
7247 		rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7248 	}
7249 }
7250 
7251 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7252 				 struct hclge_fd_rule *rule)
7253 {
7254 	u16 addr_type = 0;
7255 
7256 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7257 		struct flow_match_control match;
7258 
7259 		flow_rule_match_control(flow, &match);
7260 		addr_type = match.key->addr_type;
7261 	}
7262 
7263 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7264 		struct flow_match_ipv4_addrs match;
7265 
7266 		flow_rule_match_ipv4_addrs(flow, &match);
7267 		rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7268 		rule->tuples_mask.src_ip[IPV4_INDEX] =
7269 						be32_to_cpu(match.mask->src);
7270 		rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7271 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
7272 						be32_to_cpu(match.mask->dst);
7273 	} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7274 		struct flow_match_ipv6_addrs match;
7275 
7276 		flow_rule_match_ipv6_addrs(flow, &match);
7277 		be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7278 				  IPV6_SIZE);
7279 		be32_to_cpu_array(rule->tuples_mask.src_ip,
7280 				  match.mask->src.s6_addr32, IPV6_SIZE);
7281 		be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7282 				  IPV6_SIZE);
7283 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
7284 				  match.mask->dst.s6_addr32, IPV6_SIZE);
7285 	} else {
7286 		rule->unused_tuple |= BIT(INNER_SRC_IP);
7287 		rule->unused_tuple |= BIT(INNER_DST_IP);
7288 	}
7289 }
7290 
7291 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7292 				   struct hclge_fd_rule *rule)
7293 {
7294 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7295 		struct flow_match_ports match;
7296 
7297 		flow_rule_match_ports(flow, &match);
7298 
7299 		rule->tuples.src_port = be16_to_cpu(match.key->src);
7300 		rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7301 		rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7302 		rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7303 	} else {
7304 		rule->unused_tuple |= BIT(INNER_SRC_PORT);
7305 		rule->unused_tuple |= BIT(INNER_DST_PORT);
7306 	}
7307 }
7308 
7309 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7310 				  struct flow_cls_offload *cls_flower,
7311 				  struct hclge_fd_rule *rule)
7312 {
7313 	struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7314 	struct flow_dissector *dissector = flow->match.dissector;
7315 
7316 	if (dissector->used_keys &
7317 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7318 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
7319 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7320 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
7321 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7322 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7323 	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7324 		dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7325 			dissector->used_keys);
7326 		return -EOPNOTSUPP;
7327 	}
7328 
7329 	hclge_get_cls_key_basic(flow, rule);
7330 	hclge_get_cls_key_mac(flow, rule);
7331 	hclge_get_cls_key_vlan(flow, rule);
7332 	hclge_get_cls_key_ip(flow, rule);
7333 	hclge_get_cls_key_port(flow, rule);
7334 
7335 	return 0;
7336 }
7337 
7338 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7339 				  struct flow_cls_offload *cls_flower, int tc)
7340 {
7341 	u32 prio = cls_flower->common.prio;
7342 
7343 	if (tc < 0 || tc > hdev->tc_max) {
7344 		dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7345 		return -EINVAL;
7346 	}
7347 
7348 	if (prio == 0 ||
7349 	    prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7350 		dev_err(&hdev->pdev->dev,
7351 			"prio %u should be in range[1, %u]\n",
7352 			prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7353 		return -EINVAL;
7354 	}
7355 
7356 	if (test_bit(prio - 1, hdev->fd_bmap)) {
7357 		dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7358 		return -EINVAL;
7359 	}
7360 	return 0;
7361 }
7362 
7363 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7364 				struct flow_cls_offload *cls_flower,
7365 				int tc)
7366 {
7367 	struct hclge_vport *vport = hclge_get_vport(handle);
7368 	struct hclge_dev *hdev = vport->back;
7369 	struct hclge_fd_rule *rule;
7370 	int ret;
7371 
7372 	ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7373 	if (ret) {
7374 		dev_err(&hdev->pdev->dev,
7375 			"failed to check cls flower params, ret = %d\n", ret);
7376 		return ret;
7377 	}
7378 
7379 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7380 	if (!rule)
7381 		return -ENOMEM;
7382 
7383 	ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7384 	if (ret) {
7385 		kfree(rule);
7386 		return ret;
7387 	}
7388 
7389 	rule->action = HCLGE_FD_ACTION_SELECT_TC;
7390 	rule->cls_flower.tc = tc;
7391 	rule->location = cls_flower->common.prio - 1;
7392 	rule->vf_id = 0;
7393 	rule->cls_flower.cookie = cls_flower->cookie;
7394 	rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7395 
7396 	ret = hclge_add_fd_entry_common(hdev, rule);
7397 	if (ret)
7398 		kfree(rule);
7399 
7400 	return ret;
7401 }
7402 
7403 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7404 						   unsigned long cookie)
7405 {
7406 	struct hclge_fd_rule *rule;
7407 	struct hlist_node *node;
7408 
7409 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7410 		if (rule->cls_flower.cookie == cookie)
7411 			return rule;
7412 	}
7413 
7414 	return NULL;
7415 }
7416 
7417 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7418 				struct flow_cls_offload *cls_flower)
7419 {
7420 	struct hclge_vport *vport = hclge_get_vport(handle);
7421 	struct hclge_dev *hdev = vport->back;
7422 	struct hclge_fd_rule *rule;
7423 	int ret;
7424 
7425 	spin_lock_bh(&hdev->fd_rule_lock);
7426 
7427 	rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7428 	if (!rule) {
7429 		spin_unlock_bh(&hdev->fd_rule_lock);
7430 		return -EINVAL;
7431 	}
7432 
7433 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7434 				   NULL, false);
7435 	if (ret) {
7436 		spin_unlock_bh(&hdev->fd_rule_lock);
7437 		return ret;
7438 	}
7439 
7440 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7441 	spin_unlock_bh(&hdev->fd_rule_lock);
7442 
7443 	return 0;
7444 }
7445 
7446 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7447 {
7448 	struct hclge_fd_rule *rule;
7449 	struct hlist_node *node;
7450 	int ret = 0;
7451 
7452 	if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7453 		return;
7454 
7455 	spin_lock_bh(&hdev->fd_rule_lock);
7456 
7457 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7458 		switch (rule->state) {
7459 		case HCLGE_FD_TO_ADD:
7460 			ret = hclge_fd_config_rule(hdev, rule);
7461 			if (ret)
7462 				goto out;
7463 			rule->state = HCLGE_FD_ACTIVE;
7464 			break;
7465 		case HCLGE_FD_TO_DEL:
7466 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7467 						   rule->location, NULL, false);
7468 			if (ret)
7469 				goto out;
7470 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7471 			hclge_fd_free_node(hdev, rule);
7472 			break;
7473 		default:
7474 			break;
7475 		}
7476 	}
7477 
7478 out:
7479 	if (ret)
7480 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7481 
7482 	spin_unlock_bh(&hdev->fd_rule_lock);
7483 }
7484 
7485 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7486 {
7487 	if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7488 		bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7489 
7490 		hclge_clear_fd_rules_in_list(hdev, clear_list);
7491 	}
7492 
7493 	hclge_sync_fd_user_def_cfg(hdev, false);
7494 
7495 	hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7496 }
7497 
7498 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7499 {
7500 	struct hclge_vport *vport = hclge_get_vport(handle);
7501 	struct hclge_dev *hdev = vport->back;
7502 
7503 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7504 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7505 }
7506 
7507 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7508 {
7509 	struct hclge_vport *vport = hclge_get_vport(handle);
7510 	struct hclge_dev *hdev = vport->back;
7511 
7512 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7513 }
7514 
7515 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7516 {
7517 	struct hclge_vport *vport = hclge_get_vport(handle);
7518 	struct hclge_dev *hdev = vport->back;
7519 
7520 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7521 }
7522 
7523 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7524 {
7525 	struct hclge_vport *vport = hclge_get_vport(handle);
7526 	struct hclge_dev *hdev = vport->back;
7527 
7528 	return hdev->rst_stats.hw_reset_done_cnt;
7529 }
7530 
7531 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7532 {
7533 	struct hclge_vport *vport = hclge_get_vport(handle);
7534 	struct hclge_dev *hdev = vport->back;
7535 
7536 	hdev->fd_en = enable;
7537 
7538 	if (!enable)
7539 		set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7540 	else
7541 		hclge_restore_fd_entries(handle);
7542 
7543 	hclge_task_schedule(hdev, 0);
7544 }
7545 
7546 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7547 {
7548 	struct hclge_desc desc;
7549 	struct hclge_config_mac_mode_cmd *req =
7550 		(struct hclge_config_mac_mode_cmd *)desc.data;
7551 	u32 loop_en = 0;
7552 	int ret;
7553 
7554 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7555 
7556 	if (enable) {
7557 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7558 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7559 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7560 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7561 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7562 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7563 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7564 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7565 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7566 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7567 	}
7568 
7569 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7570 
7571 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7572 	if (ret)
7573 		dev_err(&hdev->pdev->dev,
7574 			"mac enable fail, ret =%d.\n", ret);
7575 }
7576 
7577 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7578 				     u8 switch_param, u8 param_mask)
7579 {
7580 	struct hclge_mac_vlan_switch_cmd *req;
7581 	struct hclge_desc desc;
7582 	u32 func_id;
7583 	int ret;
7584 
7585 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7586 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7587 
7588 	/* read current config parameter */
7589 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7590 				   true);
7591 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7592 	req->func_id = cpu_to_le32(func_id);
7593 
7594 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7595 	if (ret) {
7596 		dev_err(&hdev->pdev->dev,
7597 			"read mac vlan switch parameter fail, ret = %d\n", ret);
7598 		return ret;
7599 	}
7600 
7601 	/* modify and write new config parameter */
7602 	hclge_cmd_reuse_desc(&desc, false);
7603 	req->switch_param = (req->switch_param & param_mask) | switch_param;
7604 	req->param_mask = param_mask;
7605 
7606 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7607 	if (ret)
7608 		dev_err(&hdev->pdev->dev,
7609 			"set mac vlan switch parameter fail, ret = %d\n", ret);
7610 	return ret;
7611 }
7612 
7613 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7614 				       int link_ret)
7615 {
7616 #define HCLGE_PHY_LINK_STATUS_NUM  200
7617 
7618 	struct phy_device *phydev = hdev->hw.mac.phydev;
7619 	int i = 0;
7620 	int ret;
7621 
7622 	do {
7623 		ret = phy_read_status(phydev);
7624 		if (ret) {
7625 			dev_err(&hdev->pdev->dev,
7626 				"phy update link status fail, ret = %d\n", ret);
7627 			return;
7628 		}
7629 
7630 		if (phydev->link == link_ret)
7631 			break;
7632 
7633 		msleep(HCLGE_LINK_STATUS_MS);
7634 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7635 }
7636 
7637 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7638 {
7639 #define HCLGE_MAC_LINK_STATUS_NUM  100
7640 
7641 	int link_status;
7642 	int i = 0;
7643 	int ret;
7644 
7645 	do {
7646 		ret = hclge_get_mac_link_status(hdev, &link_status);
7647 		if (ret)
7648 			return ret;
7649 		if (link_status == link_ret)
7650 			return 0;
7651 
7652 		msleep(HCLGE_LINK_STATUS_MS);
7653 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7654 	return -EBUSY;
7655 }
7656 
7657 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7658 					  bool is_phy)
7659 {
7660 	int link_ret;
7661 
7662 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7663 
7664 	if (is_phy)
7665 		hclge_phy_link_status_wait(hdev, link_ret);
7666 
7667 	return hclge_mac_link_status_wait(hdev, link_ret);
7668 }
7669 
7670 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7671 {
7672 	struct hclge_config_mac_mode_cmd *req;
7673 	struct hclge_desc desc;
7674 	u32 loop_en;
7675 	int ret;
7676 
7677 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7678 	/* 1 Read out the MAC mode config at first */
7679 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7680 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7681 	if (ret) {
7682 		dev_err(&hdev->pdev->dev,
7683 			"mac loopback get fail, ret =%d.\n", ret);
7684 		return ret;
7685 	}
7686 
7687 	/* 2 Then setup the loopback flag */
7688 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7689 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7690 
7691 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7692 
7693 	/* 3 Config mac work mode with loopback flag
7694 	 * and its original configure parameters
7695 	 */
7696 	hclge_cmd_reuse_desc(&desc, false);
7697 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7698 	if (ret)
7699 		dev_err(&hdev->pdev->dev,
7700 			"mac loopback set fail, ret =%d.\n", ret);
7701 	return ret;
7702 }
7703 
7704 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7705 				     enum hnae3_loop loop_mode)
7706 {
7707 #define HCLGE_COMMON_LB_RETRY_MS	10
7708 #define HCLGE_COMMON_LB_RETRY_NUM	100
7709 
7710 	struct hclge_common_lb_cmd *req;
7711 	struct hclge_desc desc;
7712 	int ret, i = 0;
7713 	u8 loop_mode_b;
7714 
7715 	req = (struct hclge_common_lb_cmd *)desc.data;
7716 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7717 
7718 	switch (loop_mode) {
7719 	case HNAE3_LOOP_SERIAL_SERDES:
7720 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7721 		break;
7722 	case HNAE3_LOOP_PARALLEL_SERDES:
7723 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7724 		break;
7725 	case HNAE3_LOOP_PHY:
7726 		loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7727 		break;
7728 	default:
7729 		dev_err(&hdev->pdev->dev,
7730 			"unsupported common loopback mode %d\n", loop_mode);
7731 		return -ENOTSUPP;
7732 	}
7733 
7734 	if (en) {
7735 		req->enable = loop_mode_b;
7736 		req->mask = loop_mode_b;
7737 	} else {
7738 		req->mask = loop_mode_b;
7739 	}
7740 
7741 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7742 	if (ret) {
7743 		dev_err(&hdev->pdev->dev,
7744 			"common loopback set fail, ret = %d\n", ret);
7745 		return ret;
7746 	}
7747 
7748 	do {
7749 		msleep(HCLGE_COMMON_LB_RETRY_MS);
7750 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7751 					   true);
7752 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7753 		if (ret) {
7754 			dev_err(&hdev->pdev->dev,
7755 				"common loopback get, ret = %d\n", ret);
7756 			return ret;
7757 		}
7758 	} while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7759 		 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7760 
7761 	if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7762 		dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7763 		return -EBUSY;
7764 	} else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7765 		dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7766 		return -EIO;
7767 	}
7768 	return ret;
7769 }
7770 
7771 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7772 				     enum hnae3_loop loop_mode)
7773 {
7774 	int ret;
7775 
7776 	ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7777 	if (ret)
7778 		return ret;
7779 
7780 	hclge_cfg_mac_mode(hdev, en);
7781 
7782 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7783 	if (ret)
7784 		dev_err(&hdev->pdev->dev,
7785 			"serdes loopback config mac mode timeout\n");
7786 
7787 	return ret;
7788 }
7789 
7790 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7791 				     struct phy_device *phydev)
7792 {
7793 	int ret;
7794 
7795 	if (!phydev->suspended) {
7796 		ret = phy_suspend(phydev);
7797 		if (ret)
7798 			return ret;
7799 	}
7800 
7801 	ret = phy_resume(phydev);
7802 	if (ret)
7803 		return ret;
7804 
7805 	return phy_loopback(phydev, true);
7806 }
7807 
7808 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7809 				      struct phy_device *phydev)
7810 {
7811 	int ret;
7812 
7813 	ret = phy_loopback(phydev, false);
7814 	if (ret)
7815 		return ret;
7816 
7817 	return phy_suspend(phydev);
7818 }
7819 
7820 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7821 {
7822 	struct phy_device *phydev = hdev->hw.mac.phydev;
7823 	int ret;
7824 
7825 	if (!phydev) {
7826 		if (hnae3_dev_phy_imp_supported(hdev))
7827 			return hclge_set_common_loopback(hdev, en,
7828 							 HNAE3_LOOP_PHY);
7829 		return -ENOTSUPP;
7830 	}
7831 
7832 	if (en)
7833 		ret = hclge_enable_phy_loopback(hdev, phydev);
7834 	else
7835 		ret = hclge_disable_phy_loopback(hdev, phydev);
7836 	if (ret) {
7837 		dev_err(&hdev->pdev->dev,
7838 			"set phy loopback fail, ret = %d\n", ret);
7839 		return ret;
7840 	}
7841 
7842 	hclge_cfg_mac_mode(hdev, en);
7843 
7844 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7845 	if (ret)
7846 		dev_err(&hdev->pdev->dev,
7847 			"phy loopback config mac mode timeout\n");
7848 
7849 	return ret;
7850 }
7851 
7852 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7853 				     u16 stream_id, bool enable)
7854 {
7855 	struct hclge_desc desc;
7856 	struct hclge_cfg_com_tqp_queue_cmd *req =
7857 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7858 
7859 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7860 	req->tqp_id = cpu_to_le16(tqp_id);
7861 	req->stream_id = cpu_to_le16(stream_id);
7862 	if (enable)
7863 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7864 
7865 	return hclge_cmd_send(&hdev->hw, &desc, 1);
7866 }
7867 
7868 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7869 {
7870 	struct hclge_vport *vport = hclge_get_vport(handle);
7871 	struct hclge_dev *hdev = vport->back;
7872 	int ret;
7873 	u16 i;
7874 
7875 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
7876 		ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7877 		if (ret)
7878 			return ret;
7879 	}
7880 	return 0;
7881 }
7882 
7883 static int hclge_set_loopback(struct hnae3_handle *handle,
7884 			      enum hnae3_loop loop_mode, bool en)
7885 {
7886 	struct hclge_vport *vport = hclge_get_vport(handle);
7887 	struct hclge_dev *hdev = vport->back;
7888 	int ret;
7889 
7890 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7891 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7892 	 * the same, the packets are looped back in the SSU. If SSU loopback
7893 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7894 	 */
7895 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7896 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7897 
7898 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7899 						HCLGE_SWITCH_ALW_LPBK_MASK);
7900 		if (ret)
7901 			return ret;
7902 	}
7903 
7904 	switch (loop_mode) {
7905 	case HNAE3_LOOP_APP:
7906 		ret = hclge_set_app_loopback(hdev, en);
7907 		break;
7908 	case HNAE3_LOOP_SERIAL_SERDES:
7909 	case HNAE3_LOOP_PARALLEL_SERDES:
7910 		ret = hclge_set_common_loopback(hdev, en, loop_mode);
7911 		break;
7912 	case HNAE3_LOOP_PHY:
7913 		ret = hclge_set_phy_loopback(hdev, en);
7914 		break;
7915 	default:
7916 		ret = -ENOTSUPP;
7917 		dev_err(&hdev->pdev->dev,
7918 			"loop_mode %d is not supported\n", loop_mode);
7919 		break;
7920 	}
7921 
7922 	if (ret)
7923 		return ret;
7924 
7925 	ret = hclge_tqp_enable(handle, en);
7926 	if (ret)
7927 		dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
7928 			en ? "enable" : "disable", ret);
7929 
7930 	return ret;
7931 }
7932 
7933 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7934 {
7935 	int ret;
7936 
7937 	ret = hclge_set_app_loopback(hdev, false);
7938 	if (ret)
7939 		return ret;
7940 
7941 	ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7942 	if (ret)
7943 		return ret;
7944 
7945 	return hclge_cfg_common_loopback(hdev, false,
7946 					 HNAE3_LOOP_PARALLEL_SERDES);
7947 }
7948 
7949 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7950 {
7951 	struct hclge_vport *vport = hclge_get_vport(handle);
7952 	struct hnae3_knic_private_info *kinfo;
7953 	struct hnae3_queue *queue;
7954 	struct hclge_tqp *tqp;
7955 	int i;
7956 
7957 	kinfo = &vport->nic.kinfo;
7958 	for (i = 0; i < kinfo->num_tqps; i++) {
7959 		queue = handle->kinfo.tqp[i];
7960 		tqp = container_of(queue, struct hclge_tqp, q);
7961 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7962 	}
7963 }
7964 
7965 static void hclge_flush_link_update(struct hclge_dev *hdev)
7966 {
7967 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
7968 
7969 	unsigned long last = hdev->serv_processed_cnt;
7970 	int i = 0;
7971 
7972 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7973 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7974 	       last == hdev->serv_processed_cnt)
7975 		usleep_range(1, 1);
7976 }
7977 
7978 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7979 {
7980 	struct hclge_vport *vport = hclge_get_vport(handle);
7981 	struct hclge_dev *hdev = vport->back;
7982 
7983 	if (enable) {
7984 		hclge_task_schedule(hdev, 0);
7985 	} else {
7986 		/* Set the DOWN flag here to disable link updating */
7987 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
7988 
7989 		/* flush memory to make sure DOWN is seen by service task */
7990 		smp_mb__before_atomic();
7991 		hclge_flush_link_update(hdev);
7992 	}
7993 }
7994 
7995 static int hclge_ae_start(struct hnae3_handle *handle)
7996 {
7997 	struct hclge_vport *vport = hclge_get_vport(handle);
7998 	struct hclge_dev *hdev = vport->back;
7999 
8000 	/* mac enable */
8001 	hclge_cfg_mac_mode(hdev, true);
8002 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8003 	hdev->hw.mac.link = 0;
8004 
8005 	/* reset tqp stats */
8006 	hclge_reset_tqp_stats(handle);
8007 
8008 	hclge_mac_start_phy(hdev);
8009 
8010 	return 0;
8011 }
8012 
8013 static void hclge_ae_stop(struct hnae3_handle *handle)
8014 {
8015 	struct hclge_vport *vport = hclge_get_vport(handle);
8016 	struct hclge_dev *hdev = vport->back;
8017 
8018 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8019 	spin_lock_bh(&hdev->fd_rule_lock);
8020 	hclge_clear_arfs_rules(hdev);
8021 	spin_unlock_bh(&hdev->fd_rule_lock);
8022 
8023 	/* If it is not PF reset, the firmware will disable the MAC,
8024 	 * so it only need to stop phy here.
8025 	 */
8026 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8027 	    hdev->reset_type != HNAE3_FUNC_RESET) {
8028 		hclge_mac_stop_phy(hdev);
8029 		hclge_update_link_status(hdev);
8030 		return;
8031 	}
8032 
8033 	hclge_reset_tqp(handle);
8034 
8035 	hclge_config_mac_tnl_int(hdev, false);
8036 
8037 	/* Mac disable */
8038 	hclge_cfg_mac_mode(hdev, false);
8039 
8040 	hclge_mac_stop_phy(hdev);
8041 
8042 	/* reset tqp stats */
8043 	hclge_reset_tqp_stats(handle);
8044 	hclge_update_link_status(hdev);
8045 }
8046 
8047 int hclge_vport_start(struct hclge_vport *vport)
8048 {
8049 	struct hclge_dev *hdev = vport->back;
8050 
8051 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8052 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8053 	vport->last_active_jiffies = jiffies;
8054 
8055 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8056 		if (vport->vport_id) {
8057 			hclge_restore_mac_table_common(vport);
8058 			hclge_restore_vport_vlan_table(vport);
8059 		} else {
8060 			hclge_restore_hw_table(hdev);
8061 		}
8062 	}
8063 
8064 	clear_bit(vport->vport_id, hdev->vport_config_block);
8065 
8066 	return 0;
8067 }
8068 
8069 void hclge_vport_stop(struct hclge_vport *vport)
8070 {
8071 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8072 }
8073 
8074 static int hclge_client_start(struct hnae3_handle *handle)
8075 {
8076 	struct hclge_vport *vport = hclge_get_vport(handle);
8077 
8078 	return hclge_vport_start(vport);
8079 }
8080 
8081 static void hclge_client_stop(struct hnae3_handle *handle)
8082 {
8083 	struct hclge_vport *vport = hclge_get_vport(handle);
8084 
8085 	hclge_vport_stop(vport);
8086 }
8087 
8088 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8089 					 u16 cmdq_resp, u8  resp_code,
8090 					 enum hclge_mac_vlan_tbl_opcode op)
8091 {
8092 	struct hclge_dev *hdev = vport->back;
8093 
8094 	if (cmdq_resp) {
8095 		dev_err(&hdev->pdev->dev,
8096 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8097 			cmdq_resp);
8098 		return -EIO;
8099 	}
8100 
8101 	if (op == HCLGE_MAC_VLAN_ADD) {
8102 		if (!resp_code || resp_code == 1)
8103 			return 0;
8104 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8105 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
8106 			return -ENOSPC;
8107 
8108 		dev_err(&hdev->pdev->dev,
8109 			"add mac addr failed for undefined, code=%u.\n",
8110 			resp_code);
8111 		return -EIO;
8112 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
8113 		if (!resp_code) {
8114 			return 0;
8115 		} else if (resp_code == 1) {
8116 			dev_dbg(&hdev->pdev->dev,
8117 				"remove mac addr failed for miss.\n");
8118 			return -ENOENT;
8119 		}
8120 
8121 		dev_err(&hdev->pdev->dev,
8122 			"remove mac addr failed for undefined, code=%u.\n",
8123 			resp_code);
8124 		return -EIO;
8125 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
8126 		if (!resp_code) {
8127 			return 0;
8128 		} else if (resp_code == 1) {
8129 			dev_dbg(&hdev->pdev->dev,
8130 				"lookup mac addr failed for miss.\n");
8131 			return -ENOENT;
8132 		}
8133 
8134 		dev_err(&hdev->pdev->dev,
8135 			"lookup mac addr failed for undefined, code=%u.\n",
8136 			resp_code);
8137 		return -EIO;
8138 	}
8139 
8140 	dev_err(&hdev->pdev->dev,
8141 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8142 
8143 	return -EINVAL;
8144 }
8145 
8146 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8147 {
8148 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8149 
8150 	unsigned int word_num;
8151 	unsigned int bit_num;
8152 
8153 	if (vfid > 255 || vfid < 0)
8154 		return -EIO;
8155 
8156 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8157 		word_num = vfid / 32;
8158 		bit_num  = vfid % 32;
8159 		if (clr)
8160 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8161 		else
8162 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8163 	} else {
8164 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8165 		bit_num  = vfid % 32;
8166 		if (clr)
8167 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8168 		else
8169 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8170 	}
8171 
8172 	return 0;
8173 }
8174 
8175 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8176 {
8177 #define HCLGE_DESC_NUMBER 3
8178 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8179 	int i, j;
8180 
8181 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8182 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8183 			if (desc[i].data[j])
8184 				return false;
8185 
8186 	return true;
8187 }
8188 
8189 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8190 				   const u8 *addr, bool is_mc)
8191 {
8192 	const unsigned char *mac_addr = addr;
8193 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8194 		       (mac_addr[0]) | (mac_addr[1] << 8);
8195 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8196 
8197 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8198 	if (is_mc) {
8199 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8200 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8201 	}
8202 
8203 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8204 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8205 }
8206 
8207 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8208 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
8209 {
8210 	struct hclge_dev *hdev = vport->back;
8211 	struct hclge_desc desc;
8212 	u8 resp_code;
8213 	u16 retval;
8214 	int ret;
8215 
8216 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8217 
8218 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8219 
8220 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8221 	if (ret) {
8222 		dev_err(&hdev->pdev->dev,
8223 			"del mac addr failed for cmd_send, ret =%d.\n",
8224 			ret);
8225 		return ret;
8226 	}
8227 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8228 	retval = le16_to_cpu(desc.retval);
8229 
8230 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8231 					     HCLGE_MAC_VLAN_REMOVE);
8232 }
8233 
8234 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8235 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
8236 				     struct hclge_desc *desc,
8237 				     bool is_mc)
8238 {
8239 	struct hclge_dev *hdev = vport->back;
8240 	u8 resp_code;
8241 	u16 retval;
8242 	int ret;
8243 
8244 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8245 	if (is_mc) {
8246 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8247 		memcpy(desc[0].data,
8248 		       req,
8249 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8250 		hclge_cmd_setup_basic_desc(&desc[1],
8251 					   HCLGE_OPC_MAC_VLAN_ADD,
8252 					   true);
8253 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8254 		hclge_cmd_setup_basic_desc(&desc[2],
8255 					   HCLGE_OPC_MAC_VLAN_ADD,
8256 					   true);
8257 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
8258 	} else {
8259 		memcpy(desc[0].data,
8260 		       req,
8261 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8262 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
8263 	}
8264 	if (ret) {
8265 		dev_err(&hdev->pdev->dev,
8266 			"lookup mac addr failed for cmd_send, ret =%d.\n",
8267 			ret);
8268 		return ret;
8269 	}
8270 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8271 	retval = le16_to_cpu(desc[0].retval);
8272 
8273 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8274 					     HCLGE_MAC_VLAN_LKUP);
8275 }
8276 
8277 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8278 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
8279 				  struct hclge_desc *mc_desc)
8280 {
8281 	struct hclge_dev *hdev = vport->back;
8282 	int cfg_status;
8283 	u8 resp_code;
8284 	u16 retval;
8285 	int ret;
8286 
8287 	if (!mc_desc) {
8288 		struct hclge_desc desc;
8289 
8290 		hclge_cmd_setup_basic_desc(&desc,
8291 					   HCLGE_OPC_MAC_VLAN_ADD,
8292 					   false);
8293 		memcpy(desc.data, req,
8294 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8295 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8296 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8297 		retval = le16_to_cpu(desc.retval);
8298 
8299 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8300 							   resp_code,
8301 							   HCLGE_MAC_VLAN_ADD);
8302 	} else {
8303 		hclge_cmd_reuse_desc(&mc_desc[0], false);
8304 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8305 		hclge_cmd_reuse_desc(&mc_desc[1], false);
8306 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8307 		hclge_cmd_reuse_desc(&mc_desc[2], false);
8308 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8309 		memcpy(mc_desc[0].data, req,
8310 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8311 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8312 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8313 		retval = le16_to_cpu(mc_desc[0].retval);
8314 
8315 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8316 							   resp_code,
8317 							   HCLGE_MAC_VLAN_ADD);
8318 	}
8319 
8320 	if (ret) {
8321 		dev_err(&hdev->pdev->dev,
8322 			"add mac addr failed for cmd_send, ret =%d.\n",
8323 			ret);
8324 		return ret;
8325 	}
8326 
8327 	return cfg_status;
8328 }
8329 
8330 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8331 			       u16 *allocated_size)
8332 {
8333 	struct hclge_umv_spc_alc_cmd *req;
8334 	struct hclge_desc desc;
8335 	int ret;
8336 
8337 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8338 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8339 
8340 	req->space_size = cpu_to_le32(space_size);
8341 
8342 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8343 	if (ret) {
8344 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8345 			ret);
8346 		return ret;
8347 	}
8348 
8349 	*allocated_size = le32_to_cpu(desc.data[1]);
8350 
8351 	return 0;
8352 }
8353 
8354 static int hclge_init_umv_space(struct hclge_dev *hdev)
8355 {
8356 	u16 allocated_size = 0;
8357 	int ret;
8358 
8359 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8360 	if (ret)
8361 		return ret;
8362 
8363 	if (allocated_size < hdev->wanted_umv_size)
8364 		dev_warn(&hdev->pdev->dev,
8365 			 "failed to alloc umv space, want %u, get %u\n",
8366 			 hdev->wanted_umv_size, allocated_size);
8367 
8368 	hdev->max_umv_size = allocated_size;
8369 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8370 	hdev->share_umv_size = hdev->priv_umv_size +
8371 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8372 
8373 	return 0;
8374 }
8375 
8376 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8377 {
8378 	struct hclge_vport *vport;
8379 	int i;
8380 
8381 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8382 		vport = &hdev->vport[i];
8383 		vport->used_umv_num = 0;
8384 	}
8385 
8386 	mutex_lock(&hdev->vport_lock);
8387 	hdev->share_umv_size = hdev->priv_umv_size +
8388 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8389 	mutex_unlock(&hdev->vport_lock);
8390 }
8391 
8392 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8393 {
8394 	struct hclge_dev *hdev = vport->back;
8395 	bool is_full;
8396 
8397 	if (need_lock)
8398 		mutex_lock(&hdev->vport_lock);
8399 
8400 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8401 		   hdev->share_umv_size == 0);
8402 
8403 	if (need_lock)
8404 		mutex_unlock(&hdev->vport_lock);
8405 
8406 	return is_full;
8407 }
8408 
8409 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8410 {
8411 	struct hclge_dev *hdev = vport->back;
8412 
8413 	if (is_free) {
8414 		if (vport->used_umv_num > hdev->priv_umv_size)
8415 			hdev->share_umv_size++;
8416 
8417 		if (vport->used_umv_num > 0)
8418 			vport->used_umv_num--;
8419 	} else {
8420 		if (vport->used_umv_num >= hdev->priv_umv_size &&
8421 		    hdev->share_umv_size > 0)
8422 			hdev->share_umv_size--;
8423 		vport->used_umv_num++;
8424 	}
8425 }
8426 
8427 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8428 						  const u8 *mac_addr)
8429 {
8430 	struct hclge_mac_node *mac_node, *tmp;
8431 
8432 	list_for_each_entry_safe(mac_node, tmp, list, node)
8433 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8434 			return mac_node;
8435 
8436 	return NULL;
8437 }
8438 
8439 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8440 				  enum HCLGE_MAC_NODE_STATE state)
8441 {
8442 	switch (state) {
8443 	/* from set_rx_mode or tmp_add_list */
8444 	case HCLGE_MAC_TO_ADD:
8445 		if (mac_node->state == HCLGE_MAC_TO_DEL)
8446 			mac_node->state = HCLGE_MAC_ACTIVE;
8447 		break;
8448 	/* only from set_rx_mode */
8449 	case HCLGE_MAC_TO_DEL:
8450 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
8451 			list_del(&mac_node->node);
8452 			kfree(mac_node);
8453 		} else {
8454 			mac_node->state = HCLGE_MAC_TO_DEL;
8455 		}
8456 		break;
8457 	/* only from tmp_add_list, the mac_node->state won't be
8458 	 * ACTIVE.
8459 	 */
8460 	case HCLGE_MAC_ACTIVE:
8461 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8462 			mac_node->state = HCLGE_MAC_ACTIVE;
8463 
8464 		break;
8465 	}
8466 }
8467 
8468 int hclge_update_mac_list(struct hclge_vport *vport,
8469 			  enum HCLGE_MAC_NODE_STATE state,
8470 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
8471 			  const unsigned char *addr)
8472 {
8473 	struct hclge_dev *hdev = vport->back;
8474 	struct hclge_mac_node *mac_node;
8475 	struct list_head *list;
8476 
8477 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8478 		&vport->uc_mac_list : &vport->mc_mac_list;
8479 
8480 	spin_lock_bh(&vport->mac_list_lock);
8481 
8482 	/* if the mac addr is already in the mac list, no need to add a new
8483 	 * one into it, just check the mac addr state, convert it to a new
8484 	 * state, or just remove it, or do nothing.
8485 	 */
8486 	mac_node = hclge_find_mac_node(list, addr);
8487 	if (mac_node) {
8488 		hclge_update_mac_node(mac_node, state);
8489 		spin_unlock_bh(&vport->mac_list_lock);
8490 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8491 		return 0;
8492 	}
8493 
8494 	/* if this address is never added, unnecessary to delete */
8495 	if (state == HCLGE_MAC_TO_DEL) {
8496 		spin_unlock_bh(&vport->mac_list_lock);
8497 		dev_err(&hdev->pdev->dev,
8498 			"failed to delete address %pM from mac list\n",
8499 			addr);
8500 		return -ENOENT;
8501 	}
8502 
8503 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8504 	if (!mac_node) {
8505 		spin_unlock_bh(&vport->mac_list_lock);
8506 		return -ENOMEM;
8507 	}
8508 
8509 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8510 
8511 	mac_node->state = state;
8512 	ether_addr_copy(mac_node->mac_addr, addr);
8513 	list_add_tail(&mac_node->node, list);
8514 
8515 	spin_unlock_bh(&vport->mac_list_lock);
8516 
8517 	return 0;
8518 }
8519 
8520 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8521 			     const unsigned char *addr)
8522 {
8523 	struct hclge_vport *vport = hclge_get_vport(handle);
8524 
8525 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8526 				     addr);
8527 }
8528 
8529 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8530 			     const unsigned char *addr)
8531 {
8532 	struct hclge_dev *hdev = vport->back;
8533 	struct hclge_mac_vlan_tbl_entry_cmd req;
8534 	struct hclge_desc desc;
8535 	u16 egress_port = 0;
8536 	int ret;
8537 
8538 	/* mac addr check */
8539 	if (is_zero_ether_addr(addr) ||
8540 	    is_broadcast_ether_addr(addr) ||
8541 	    is_multicast_ether_addr(addr)) {
8542 		dev_err(&hdev->pdev->dev,
8543 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8544 			 addr, is_zero_ether_addr(addr),
8545 			 is_broadcast_ether_addr(addr),
8546 			 is_multicast_ether_addr(addr));
8547 		return -EINVAL;
8548 	}
8549 
8550 	memset(&req, 0, sizeof(req));
8551 
8552 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8553 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8554 
8555 	req.egress_port = cpu_to_le16(egress_port);
8556 
8557 	hclge_prepare_mac_addr(&req, addr, false);
8558 
8559 	/* Lookup the mac address in the mac_vlan table, and add
8560 	 * it if the entry is inexistent. Repeated unicast entry
8561 	 * is not allowed in the mac vlan table.
8562 	 */
8563 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8564 	if (ret == -ENOENT) {
8565 		mutex_lock(&hdev->vport_lock);
8566 		if (!hclge_is_umv_space_full(vport, false)) {
8567 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8568 			if (!ret)
8569 				hclge_update_umv_space(vport, false);
8570 			mutex_unlock(&hdev->vport_lock);
8571 			return ret;
8572 		}
8573 		mutex_unlock(&hdev->vport_lock);
8574 
8575 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8576 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8577 				hdev->priv_umv_size);
8578 
8579 		return -ENOSPC;
8580 	}
8581 
8582 	/* check if we just hit the duplicate */
8583 	if (!ret) {
8584 		dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8585 			 vport->vport_id, addr);
8586 		return 0;
8587 	}
8588 
8589 	dev_err(&hdev->pdev->dev,
8590 		"PF failed to add unicast entry(%pM) in the MAC table\n",
8591 		addr);
8592 
8593 	return ret;
8594 }
8595 
8596 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8597 			    const unsigned char *addr)
8598 {
8599 	struct hclge_vport *vport = hclge_get_vport(handle);
8600 
8601 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8602 				     addr);
8603 }
8604 
8605 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8606 			    const unsigned char *addr)
8607 {
8608 	struct hclge_dev *hdev = vport->back;
8609 	struct hclge_mac_vlan_tbl_entry_cmd req;
8610 	int ret;
8611 
8612 	/* mac addr check */
8613 	if (is_zero_ether_addr(addr) ||
8614 	    is_broadcast_ether_addr(addr) ||
8615 	    is_multicast_ether_addr(addr)) {
8616 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8617 			addr);
8618 		return -EINVAL;
8619 	}
8620 
8621 	memset(&req, 0, sizeof(req));
8622 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8623 	hclge_prepare_mac_addr(&req, addr, false);
8624 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
8625 	if (!ret) {
8626 		mutex_lock(&hdev->vport_lock);
8627 		hclge_update_umv_space(vport, true);
8628 		mutex_unlock(&hdev->vport_lock);
8629 	} else if (ret == -ENOENT) {
8630 		ret = 0;
8631 	}
8632 
8633 	return ret;
8634 }
8635 
8636 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8637 			     const unsigned char *addr)
8638 {
8639 	struct hclge_vport *vport = hclge_get_vport(handle);
8640 
8641 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8642 				     addr);
8643 }
8644 
8645 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8646 			     const unsigned char *addr)
8647 {
8648 	struct hclge_dev *hdev = vport->back;
8649 	struct hclge_mac_vlan_tbl_entry_cmd req;
8650 	struct hclge_desc desc[3];
8651 	int status;
8652 
8653 	/* mac addr check */
8654 	if (!is_multicast_ether_addr(addr)) {
8655 		dev_err(&hdev->pdev->dev,
8656 			"Add mc mac err! invalid mac:%pM.\n",
8657 			 addr);
8658 		return -EINVAL;
8659 	}
8660 	memset(&req, 0, sizeof(req));
8661 	hclge_prepare_mac_addr(&req, addr, true);
8662 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8663 	if (status) {
8664 		/* This mac addr do not exist, add new entry for it */
8665 		memset(desc[0].data, 0, sizeof(desc[0].data));
8666 		memset(desc[1].data, 0, sizeof(desc[0].data));
8667 		memset(desc[2].data, 0, sizeof(desc[0].data));
8668 	}
8669 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8670 	if (status)
8671 		return status;
8672 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8673 	/* if already overflow, not to print each time */
8674 	if (status == -ENOSPC &&
8675 	    !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8676 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8677 
8678 	return status;
8679 }
8680 
8681 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8682 			    const unsigned char *addr)
8683 {
8684 	struct hclge_vport *vport = hclge_get_vport(handle);
8685 
8686 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8687 				     addr);
8688 }
8689 
8690 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8691 			    const unsigned char *addr)
8692 {
8693 	struct hclge_dev *hdev = vport->back;
8694 	struct hclge_mac_vlan_tbl_entry_cmd req;
8695 	enum hclge_cmd_status status;
8696 	struct hclge_desc desc[3];
8697 
8698 	/* mac addr check */
8699 	if (!is_multicast_ether_addr(addr)) {
8700 		dev_dbg(&hdev->pdev->dev,
8701 			"Remove mc mac err! invalid mac:%pM.\n",
8702 			 addr);
8703 		return -EINVAL;
8704 	}
8705 
8706 	memset(&req, 0, sizeof(req));
8707 	hclge_prepare_mac_addr(&req, addr, true);
8708 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8709 	if (!status) {
8710 		/* This mac addr exist, remove this handle's VFID for it */
8711 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8712 		if (status)
8713 			return status;
8714 
8715 		if (hclge_is_all_function_id_zero(desc))
8716 			/* All the vfid is zero, so need to delete this entry */
8717 			status = hclge_remove_mac_vlan_tbl(vport, &req);
8718 		else
8719 			/* Not all the vfid is zero, update the vfid */
8720 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8721 	} else if (status == -ENOENT) {
8722 		status = 0;
8723 	}
8724 
8725 	return status;
8726 }
8727 
8728 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8729 				      struct list_head *list,
8730 				      int (*sync)(struct hclge_vport *,
8731 						  const unsigned char *))
8732 {
8733 	struct hclge_mac_node *mac_node, *tmp;
8734 	int ret;
8735 
8736 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8737 		ret = sync(vport, mac_node->mac_addr);
8738 		if (!ret) {
8739 			mac_node->state = HCLGE_MAC_ACTIVE;
8740 		} else {
8741 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8742 				&vport->state);
8743 			break;
8744 		}
8745 	}
8746 }
8747 
8748 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8749 					struct list_head *list,
8750 					int (*unsync)(struct hclge_vport *,
8751 						      const unsigned char *))
8752 {
8753 	struct hclge_mac_node *mac_node, *tmp;
8754 	int ret;
8755 
8756 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8757 		ret = unsync(vport, mac_node->mac_addr);
8758 		if (!ret || ret == -ENOENT) {
8759 			list_del(&mac_node->node);
8760 			kfree(mac_node);
8761 		} else {
8762 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8763 				&vport->state);
8764 			break;
8765 		}
8766 	}
8767 }
8768 
8769 static bool hclge_sync_from_add_list(struct list_head *add_list,
8770 				     struct list_head *mac_list)
8771 {
8772 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8773 	bool all_added = true;
8774 
8775 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8776 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8777 			all_added = false;
8778 
8779 		/* if the mac address from tmp_add_list is not in the
8780 		 * uc/mc_mac_list, it means have received a TO_DEL request
8781 		 * during the time window of adding the mac address into mac
8782 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8783 		 * then it will be removed at next time. else it must be TO_ADD,
8784 		 * this address hasn't been added into mac table,
8785 		 * so just remove the mac node.
8786 		 */
8787 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8788 		if (new_node) {
8789 			hclge_update_mac_node(new_node, mac_node->state);
8790 			list_del(&mac_node->node);
8791 			kfree(mac_node);
8792 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8793 			mac_node->state = HCLGE_MAC_TO_DEL;
8794 			list_del(&mac_node->node);
8795 			list_add_tail(&mac_node->node, mac_list);
8796 		} else {
8797 			list_del(&mac_node->node);
8798 			kfree(mac_node);
8799 		}
8800 	}
8801 
8802 	return all_added;
8803 }
8804 
8805 static void hclge_sync_from_del_list(struct list_head *del_list,
8806 				     struct list_head *mac_list)
8807 {
8808 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8809 
8810 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8811 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8812 		if (new_node) {
8813 			/* If the mac addr exists in the mac list, it means
8814 			 * received a new TO_ADD request during the time window
8815 			 * of configuring the mac address. For the mac node
8816 			 * state is TO_ADD, and the address is already in the
8817 			 * in the hardware(due to delete fail), so we just need
8818 			 * to change the mac node state to ACTIVE.
8819 			 */
8820 			new_node->state = HCLGE_MAC_ACTIVE;
8821 			list_del(&mac_node->node);
8822 			kfree(mac_node);
8823 		} else {
8824 			list_del(&mac_node->node);
8825 			list_add_tail(&mac_node->node, mac_list);
8826 		}
8827 	}
8828 }
8829 
8830 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8831 					enum HCLGE_MAC_ADDR_TYPE mac_type,
8832 					bool is_all_added)
8833 {
8834 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8835 		if (is_all_added)
8836 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8837 		else
8838 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8839 	} else {
8840 		if (is_all_added)
8841 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8842 		else
8843 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8844 	}
8845 }
8846 
8847 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8848 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
8849 {
8850 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8851 	struct list_head tmp_add_list, tmp_del_list;
8852 	struct list_head *list;
8853 	bool all_added;
8854 
8855 	INIT_LIST_HEAD(&tmp_add_list);
8856 	INIT_LIST_HEAD(&tmp_del_list);
8857 
8858 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
8859 	 * we can add/delete these mac addr outside the spin lock
8860 	 */
8861 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8862 		&vport->uc_mac_list : &vport->mc_mac_list;
8863 
8864 	spin_lock_bh(&vport->mac_list_lock);
8865 
8866 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8867 		switch (mac_node->state) {
8868 		case HCLGE_MAC_TO_DEL:
8869 			list_del(&mac_node->node);
8870 			list_add_tail(&mac_node->node, &tmp_del_list);
8871 			break;
8872 		case HCLGE_MAC_TO_ADD:
8873 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8874 			if (!new_node)
8875 				goto stop_traverse;
8876 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8877 			new_node->state = mac_node->state;
8878 			list_add_tail(&new_node->node, &tmp_add_list);
8879 			break;
8880 		default:
8881 			break;
8882 		}
8883 	}
8884 
8885 stop_traverse:
8886 	spin_unlock_bh(&vport->mac_list_lock);
8887 
8888 	/* delete first, in order to get max mac table space for adding */
8889 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8890 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8891 					    hclge_rm_uc_addr_common);
8892 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8893 					  hclge_add_uc_addr_common);
8894 	} else {
8895 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8896 					    hclge_rm_mc_addr_common);
8897 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8898 					  hclge_add_mc_addr_common);
8899 	}
8900 
8901 	/* if some mac addresses were added/deleted fail, move back to the
8902 	 * mac_list, and retry at next time.
8903 	 */
8904 	spin_lock_bh(&vport->mac_list_lock);
8905 
8906 	hclge_sync_from_del_list(&tmp_del_list, list);
8907 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8908 
8909 	spin_unlock_bh(&vport->mac_list_lock);
8910 
8911 	hclge_update_overflow_flags(vport, mac_type, all_added);
8912 }
8913 
8914 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8915 {
8916 	struct hclge_dev *hdev = vport->back;
8917 
8918 	if (test_bit(vport->vport_id, hdev->vport_config_block))
8919 		return false;
8920 
8921 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8922 		return true;
8923 
8924 	return false;
8925 }
8926 
8927 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8928 {
8929 	int i;
8930 
8931 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8932 		struct hclge_vport *vport = &hdev->vport[i];
8933 
8934 		if (!hclge_need_sync_mac_table(vport))
8935 			continue;
8936 
8937 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8938 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8939 	}
8940 }
8941 
8942 static void hclge_build_del_list(struct list_head *list,
8943 				 bool is_del_list,
8944 				 struct list_head *tmp_del_list)
8945 {
8946 	struct hclge_mac_node *mac_cfg, *tmp;
8947 
8948 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8949 		switch (mac_cfg->state) {
8950 		case HCLGE_MAC_TO_DEL:
8951 		case HCLGE_MAC_ACTIVE:
8952 			list_del(&mac_cfg->node);
8953 			list_add_tail(&mac_cfg->node, tmp_del_list);
8954 			break;
8955 		case HCLGE_MAC_TO_ADD:
8956 			if (is_del_list) {
8957 				list_del(&mac_cfg->node);
8958 				kfree(mac_cfg);
8959 			}
8960 			break;
8961 		}
8962 	}
8963 }
8964 
8965 static void hclge_unsync_del_list(struct hclge_vport *vport,
8966 				  int (*unsync)(struct hclge_vport *vport,
8967 						const unsigned char *addr),
8968 				  bool is_del_list,
8969 				  struct list_head *tmp_del_list)
8970 {
8971 	struct hclge_mac_node *mac_cfg, *tmp;
8972 	int ret;
8973 
8974 	list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8975 		ret = unsync(vport, mac_cfg->mac_addr);
8976 		if (!ret || ret == -ENOENT) {
8977 			/* clear all mac addr from hardware, but remain these
8978 			 * mac addr in the mac list, and restore them after
8979 			 * vf reset finished.
8980 			 */
8981 			if (!is_del_list &&
8982 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
8983 				mac_cfg->state = HCLGE_MAC_TO_ADD;
8984 			} else {
8985 				list_del(&mac_cfg->node);
8986 				kfree(mac_cfg);
8987 			}
8988 		} else if (is_del_list) {
8989 			mac_cfg->state = HCLGE_MAC_TO_DEL;
8990 		}
8991 	}
8992 }
8993 
8994 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8995 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
8996 {
8997 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8998 	struct hclge_dev *hdev = vport->back;
8999 	struct list_head tmp_del_list, *list;
9000 
9001 	if (mac_type == HCLGE_MAC_ADDR_UC) {
9002 		list = &vport->uc_mac_list;
9003 		unsync = hclge_rm_uc_addr_common;
9004 	} else {
9005 		list = &vport->mc_mac_list;
9006 		unsync = hclge_rm_mc_addr_common;
9007 	}
9008 
9009 	INIT_LIST_HEAD(&tmp_del_list);
9010 
9011 	if (!is_del_list)
9012 		set_bit(vport->vport_id, hdev->vport_config_block);
9013 
9014 	spin_lock_bh(&vport->mac_list_lock);
9015 
9016 	hclge_build_del_list(list, is_del_list, &tmp_del_list);
9017 
9018 	spin_unlock_bh(&vport->mac_list_lock);
9019 
9020 	hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9021 
9022 	spin_lock_bh(&vport->mac_list_lock);
9023 
9024 	hclge_sync_from_del_list(&tmp_del_list, list);
9025 
9026 	spin_unlock_bh(&vport->mac_list_lock);
9027 }
9028 
9029 /* remove all mac address when uninitailize */
9030 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9031 					enum HCLGE_MAC_ADDR_TYPE mac_type)
9032 {
9033 	struct hclge_mac_node *mac_node, *tmp;
9034 	struct hclge_dev *hdev = vport->back;
9035 	struct list_head tmp_del_list, *list;
9036 
9037 	INIT_LIST_HEAD(&tmp_del_list);
9038 
9039 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9040 		&vport->uc_mac_list : &vport->mc_mac_list;
9041 
9042 	spin_lock_bh(&vport->mac_list_lock);
9043 
9044 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9045 		switch (mac_node->state) {
9046 		case HCLGE_MAC_TO_DEL:
9047 		case HCLGE_MAC_ACTIVE:
9048 			list_del(&mac_node->node);
9049 			list_add_tail(&mac_node->node, &tmp_del_list);
9050 			break;
9051 		case HCLGE_MAC_TO_ADD:
9052 			list_del(&mac_node->node);
9053 			kfree(mac_node);
9054 			break;
9055 		}
9056 	}
9057 
9058 	spin_unlock_bh(&vport->mac_list_lock);
9059 
9060 	if (mac_type == HCLGE_MAC_ADDR_UC)
9061 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9062 					    hclge_rm_uc_addr_common);
9063 	else
9064 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9065 					    hclge_rm_mc_addr_common);
9066 
9067 	if (!list_empty(&tmp_del_list))
9068 		dev_warn(&hdev->pdev->dev,
9069 			 "uninit %s mac list for vport %u not completely.\n",
9070 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9071 			 vport->vport_id);
9072 
9073 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9074 		list_del(&mac_node->node);
9075 		kfree(mac_node);
9076 	}
9077 }
9078 
9079 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9080 {
9081 	struct hclge_vport *vport;
9082 	int i;
9083 
9084 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9085 		vport = &hdev->vport[i];
9086 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9087 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9088 	}
9089 }
9090 
9091 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9092 					      u16 cmdq_resp, u8 resp_code)
9093 {
9094 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
9095 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
9096 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
9097 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
9098 
9099 	int return_status;
9100 
9101 	if (cmdq_resp) {
9102 		dev_err(&hdev->pdev->dev,
9103 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9104 			cmdq_resp);
9105 		return -EIO;
9106 	}
9107 
9108 	switch (resp_code) {
9109 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
9110 	case HCLGE_ETHERTYPE_ALREADY_ADD:
9111 		return_status = 0;
9112 		break;
9113 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9114 		dev_err(&hdev->pdev->dev,
9115 			"add mac ethertype failed for manager table overflow.\n");
9116 		return_status = -EIO;
9117 		break;
9118 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
9119 		dev_err(&hdev->pdev->dev,
9120 			"add mac ethertype failed for key conflict.\n");
9121 		return_status = -EIO;
9122 		break;
9123 	default:
9124 		dev_err(&hdev->pdev->dev,
9125 			"add mac ethertype failed for undefined, code=%u.\n",
9126 			resp_code);
9127 		return_status = -EIO;
9128 	}
9129 
9130 	return return_status;
9131 }
9132 
9133 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9134 				     u8 *mac_addr)
9135 {
9136 	struct hclge_mac_vlan_tbl_entry_cmd req;
9137 	struct hclge_dev *hdev = vport->back;
9138 	struct hclge_desc desc;
9139 	u16 egress_port = 0;
9140 	int i;
9141 
9142 	if (is_zero_ether_addr(mac_addr))
9143 		return false;
9144 
9145 	memset(&req, 0, sizeof(req));
9146 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9147 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9148 	req.egress_port = cpu_to_le16(egress_port);
9149 	hclge_prepare_mac_addr(&req, mac_addr, false);
9150 
9151 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9152 		return true;
9153 
9154 	vf_idx += HCLGE_VF_VPORT_START_NUM;
9155 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9156 		if (i != vf_idx &&
9157 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9158 			return true;
9159 
9160 	return false;
9161 }
9162 
9163 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9164 			    u8 *mac_addr)
9165 {
9166 	struct hclge_vport *vport = hclge_get_vport(handle);
9167 	struct hclge_dev *hdev = vport->back;
9168 
9169 	vport = hclge_get_vf_vport(hdev, vf);
9170 	if (!vport)
9171 		return -EINVAL;
9172 
9173 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9174 		dev_info(&hdev->pdev->dev,
9175 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
9176 			 mac_addr);
9177 		return 0;
9178 	}
9179 
9180 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9181 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9182 			mac_addr);
9183 		return -EEXIST;
9184 	}
9185 
9186 	ether_addr_copy(vport->vf_info.mac, mac_addr);
9187 
9188 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9189 		dev_info(&hdev->pdev->dev,
9190 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9191 			 vf, mac_addr);
9192 		return hclge_inform_reset_assert_to_vf(vport);
9193 	}
9194 
9195 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9196 		 vf, mac_addr);
9197 	return 0;
9198 }
9199 
9200 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9201 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
9202 {
9203 	struct hclge_desc desc;
9204 	u8 resp_code;
9205 	u16 retval;
9206 	int ret;
9207 
9208 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9209 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9210 
9211 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9212 	if (ret) {
9213 		dev_err(&hdev->pdev->dev,
9214 			"add mac ethertype failed for cmd_send, ret =%d.\n",
9215 			ret);
9216 		return ret;
9217 	}
9218 
9219 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9220 	retval = le16_to_cpu(desc.retval);
9221 
9222 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9223 }
9224 
9225 static int init_mgr_tbl(struct hclge_dev *hdev)
9226 {
9227 	int ret;
9228 	int i;
9229 
9230 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9231 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9232 		if (ret) {
9233 			dev_err(&hdev->pdev->dev,
9234 				"add mac ethertype failed, ret =%d.\n",
9235 				ret);
9236 			return ret;
9237 		}
9238 	}
9239 
9240 	return 0;
9241 }
9242 
9243 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9244 {
9245 	struct hclge_vport *vport = hclge_get_vport(handle);
9246 	struct hclge_dev *hdev = vport->back;
9247 
9248 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
9249 }
9250 
9251 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9252 				       const u8 *old_addr, const u8 *new_addr)
9253 {
9254 	struct list_head *list = &vport->uc_mac_list;
9255 	struct hclge_mac_node *old_node, *new_node;
9256 
9257 	new_node = hclge_find_mac_node(list, new_addr);
9258 	if (!new_node) {
9259 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9260 		if (!new_node)
9261 			return -ENOMEM;
9262 
9263 		new_node->state = HCLGE_MAC_TO_ADD;
9264 		ether_addr_copy(new_node->mac_addr, new_addr);
9265 		list_add(&new_node->node, list);
9266 	} else {
9267 		if (new_node->state == HCLGE_MAC_TO_DEL)
9268 			new_node->state = HCLGE_MAC_ACTIVE;
9269 
9270 		/* make sure the new addr is in the list head, avoid dev
9271 		 * addr may be not re-added into mac table for the umv space
9272 		 * limitation after global/imp reset which will clear mac
9273 		 * table by hardware.
9274 		 */
9275 		list_move(&new_node->node, list);
9276 	}
9277 
9278 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9279 		old_node = hclge_find_mac_node(list, old_addr);
9280 		if (old_node) {
9281 			if (old_node->state == HCLGE_MAC_TO_ADD) {
9282 				list_del(&old_node->node);
9283 				kfree(old_node);
9284 			} else {
9285 				old_node->state = HCLGE_MAC_TO_DEL;
9286 			}
9287 		}
9288 	}
9289 
9290 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9291 
9292 	return 0;
9293 }
9294 
9295 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9296 			      bool is_first)
9297 {
9298 	const unsigned char *new_addr = (const unsigned char *)p;
9299 	struct hclge_vport *vport = hclge_get_vport(handle);
9300 	struct hclge_dev *hdev = vport->back;
9301 	unsigned char *old_addr = NULL;
9302 	int ret;
9303 
9304 	/* mac addr check */
9305 	if (is_zero_ether_addr(new_addr) ||
9306 	    is_broadcast_ether_addr(new_addr) ||
9307 	    is_multicast_ether_addr(new_addr)) {
9308 		dev_err(&hdev->pdev->dev,
9309 			"change uc mac err! invalid mac: %pM.\n",
9310 			 new_addr);
9311 		return -EINVAL;
9312 	}
9313 
9314 	ret = hclge_pause_addr_cfg(hdev, new_addr);
9315 	if (ret) {
9316 		dev_err(&hdev->pdev->dev,
9317 			"failed to configure mac pause address, ret = %d\n",
9318 			ret);
9319 		return ret;
9320 	}
9321 
9322 	if (!is_first)
9323 		old_addr = hdev->hw.mac.mac_addr;
9324 
9325 	spin_lock_bh(&vport->mac_list_lock);
9326 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9327 	if (ret) {
9328 		dev_err(&hdev->pdev->dev,
9329 			"failed to change the mac addr:%pM, ret = %d\n",
9330 			new_addr, ret);
9331 		spin_unlock_bh(&vport->mac_list_lock);
9332 
9333 		if (!is_first)
9334 			hclge_pause_addr_cfg(hdev, old_addr);
9335 
9336 		return ret;
9337 	}
9338 	/* we must update dev addr with spin lock protect, preventing dev addr
9339 	 * being removed by set_rx_mode path.
9340 	 */
9341 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9342 	spin_unlock_bh(&vport->mac_list_lock);
9343 
9344 	hclge_task_schedule(hdev, 0);
9345 
9346 	return 0;
9347 }
9348 
9349 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9350 {
9351 	struct mii_ioctl_data *data = if_mii(ifr);
9352 
9353 	if (!hnae3_dev_phy_imp_supported(hdev))
9354 		return -EOPNOTSUPP;
9355 
9356 	switch (cmd) {
9357 	case SIOCGMIIPHY:
9358 		data->phy_id = hdev->hw.mac.phy_addr;
9359 		/* this command reads phy id and register at the same time */
9360 		fallthrough;
9361 	case SIOCGMIIREG:
9362 		data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9363 		return 0;
9364 
9365 	case SIOCSMIIREG:
9366 		return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9367 	default:
9368 		return -EOPNOTSUPP;
9369 	}
9370 }
9371 
9372 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9373 			  int cmd)
9374 {
9375 	struct hclge_vport *vport = hclge_get_vport(handle);
9376 	struct hclge_dev *hdev = vport->back;
9377 
9378 	if (!hdev->hw.mac.phydev)
9379 		return hclge_mii_ioctl(hdev, ifr, cmd);
9380 
9381 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9382 }
9383 
9384 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9385 				      u8 fe_type, bool filter_en, u8 vf_id)
9386 {
9387 	struct hclge_vlan_filter_ctrl_cmd *req;
9388 	struct hclge_desc desc;
9389 	int ret;
9390 
9391 	/* read current vlan filter parameter */
9392 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9393 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9394 	req->vlan_type = vlan_type;
9395 	req->vf_id = vf_id;
9396 
9397 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9398 	if (ret) {
9399 		dev_err(&hdev->pdev->dev,
9400 			"failed to get vlan filter config, ret = %d.\n", ret);
9401 		return ret;
9402 	}
9403 
9404 	/* modify and write new config parameter */
9405 	hclge_cmd_reuse_desc(&desc, false);
9406 	req->vlan_fe = filter_en ?
9407 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9408 
9409 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9410 	if (ret)
9411 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9412 			ret);
9413 
9414 	return ret;
9415 }
9416 
9417 #define HCLGE_FILTER_TYPE_VF		0
9418 #define HCLGE_FILTER_TYPE_PORT		1
9419 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
9420 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
9421 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
9422 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
9423 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
9424 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
9425 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
9426 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
9427 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
9428 
9429 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9430 {
9431 	struct hclge_vport *vport = hclge_get_vport(handle);
9432 	struct hclge_dev *hdev = vport->back;
9433 
9434 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9435 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9436 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
9437 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9438 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
9439 	} else {
9440 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9441 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
9442 					   0);
9443 	}
9444 	if (enable)
9445 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
9446 	else
9447 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
9448 }
9449 
9450 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9451 					bool is_kill, u16 vlan,
9452 					struct hclge_desc *desc)
9453 {
9454 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
9455 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
9456 	u8 vf_byte_val;
9457 	u8 vf_byte_off;
9458 	int ret;
9459 
9460 	hclge_cmd_setup_basic_desc(&desc[0],
9461 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9462 	hclge_cmd_setup_basic_desc(&desc[1],
9463 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9464 
9465 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9466 
9467 	vf_byte_off = vfid / 8;
9468 	vf_byte_val = 1 << (vfid % 8);
9469 
9470 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9471 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9472 
9473 	req0->vlan_id  = cpu_to_le16(vlan);
9474 	req0->vlan_cfg = is_kill;
9475 
9476 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9477 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9478 	else
9479 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9480 
9481 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
9482 	if (ret) {
9483 		dev_err(&hdev->pdev->dev,
9484 			"Send vf vlan command fail, ret =%d.\n",
9485 			ret);
9486 		return ret;
9487 	}
9488 
9489 	return 0;
9490 }
9491 
9492 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9493 					  bool is_kill, struct hclge_desc *desc)
9494 {
9495 	struct hclge_vlan_filter_vf_cfg_cmd *req;
9496 
9497 	req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9498 
9499 	if (!is_kill) {
9500 #define HCLGE_VF_VLAN_NO_ENTRY	2
9501 		if (!req->resp_code || req->resp_code == 1)
9502 			return 0;
9503 
9504 		if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9505 			set_bit(vfid, hdev->vf_vlan_full);
9506 			dev_warn(&hdev->pdev->dev,
9507 				 "vf vlan table is full, vf vlan filter is disabled\n");
9508 			return 0;
9509 		}
9510 
9511 		dev_err(&hdev->pdev->dev,
9512 			"Add vf vlan filter fail, ret =%u.\n",
9513 			req->resp_code);
9514 	} else {
9515 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
9516 		if (!req->resp_code)
9517 			return 0;
9518 
9519 		/* vf vlan filter is disabled when vf vlan table is full,
9520 		 * then new vlan id will not be added into vf vlan table.
9521 		 * Just return 0 without warning, avoid massive verbose
9522 		 * print logs when unload.
9523 		 */
9524 		if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9525 			return 0;
9526 
9527 		dev_err(&hdev->pdev->dev,
9528 			"Kill vf vlan filter fail, ret =%u.\n",
9529 			req->resp_code);
9530 	}
9531 
9532 	return -EIO;
9533 }
9534 
9535 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9536 				    bool is_kill, u16 vlan)
9537 {
9538 	struct hclge_vport *vport = &hdev->vport[vfid];
9539 	struct hclge_desc desc[2];
9540 	int ret;
9541 
9542 	/* if vf vlan table is full, firmware will close vf vlan filter, it
9543 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
9544 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
9545 	 * new vlan, because tx packets with these vlan id will be dropped.
9546 	 */
9547 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9548 		if (vport->vf_info.spoofchk && vlan) {
9549 			dev_err(&hdev->pdev->dev,
9550 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
9551 			return -EPERM;
9552 		}
9553 		return 0;
9554 	}
9555 
9556 	ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9557 	if (ret)
9558 		return ret;
9559 
9560 	return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9561 }
9562 
9563 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9564 				      u16 vlan_id, bool is_kill)
9565 {
9566 	struct hclge_vlan_filter_pf_cfg_cmd *req;
9567 	struct hclge_desc desc;
9568 	u8 vlan_offset_byte_val;
9569 	u8 vlan_offset_byte;
9570 	u8 vlan_offset_160;
9571 	int ret;
9572 
9573 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9574 
9575 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9576 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9577 			   HCLGE_VLAN_BYTE_SIZE;
9578 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9579 
9580 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9581 	req->vlan_offset = vlan_offset_160;
9582 	req->vlan_cfg = is_kill;
9583 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9584 
9585 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9586 	if (ret)
9587 		dev_err(&hdev->pdev->dev,
9588 			"port vlan command, send fail, ret =%d.\n", ret);
9589 	return ret;
9590 }
9591 
9592 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9593 				    u16 vport_id, u16 vlan_id,
9594 				    bool is_kill)
9595 {
9596 	u16 vport_idx, vport_num = 0;
9597 	int ret;
9598 
9599 	if (is_kill && !vlan_id)
9600 		return 0;
9601 
9602 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9603 	if (ret) {
9604 		dev_err(&hdev->pdev->dev,
9605 			"Set %u vport vlan filter config fail, ret =%d.\n",
9606 			vport_id, ret);
9607 		return ret;
9608 	}
9609 
9610 	/* vlan 0 may be added twice when 8021q module is enabled */
9611 	if (!is_kill && !vlan_id &&
9612 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
9613 		return 0;
9614 
9615 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9616 		dev_err(&hdev->pdev->dev,
9617 			"Add port vlan failed, vport %u is already in vlan %u\n",
9618 			vport_id, vlan_id);
9619 		return -EINVAL;
9620 	}
9621 
9622 	if (is_kill &&
9623 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9624 		dev_err(&hdev->pdev->dev,
9625 			"Delete port vlan failed, vport %u is not in vlan %u\n",
9626 			vport_id, vlan_id);
9627 		return -EINVAL;
9628 	}
9629 
9630 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9631 		vport_num++;
9632 
9633 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9634 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9635 						 is_kill);
9636 
9637 	return ret;
9638 }
9639 
9640 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9641 {
9642 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9643 	struct hclge_vport_vtag_tx_cfg_cmd *req;
9644 	struct hclge_dev *hdev = vport->back;
9645 	struct hclge_desc desc;
9646 	u16 bmap_index;
9647 	int status;
9648 
9649 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9650 
9651 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9652 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9653 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9654 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9655 		      vcfg->accept_tag1 ? 1 : 0);
9656 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9657 		      vcfg->accept_untag1 ? 1 : 0);
9658 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9659 		      vcfg->accept_tag2 ? 1 : 0);
9660 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9661 		      vcfg->accept_untag2 ? 1 : 0);
9662 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9663 		      vcfg->insert_tag1_en ? 1 : 0);
9664 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9665 		      vcfg->insert_tag2_en ? 1 : 0);
9666 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9667 		      vcfg->tag_shift_mode_en ? 1 : 0);
9668 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9669 
9670 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9671 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9672 			HCLGE_VF_NUM_PER_BYTE;
9673 	req->vf_bitmap[bmap_index] =
9674 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9675 
9676 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9677 	if (status)
9678 		dev_err(&hdev->pdev->dev,
9679 			"Send port txvlan cfg command fail, ret =%d\n",
9680 			status);
9681 
9682 	return status;
9683 }
9684 
9685 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9686 {
9687 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9688 	struct hclge_vport_vtag_rx_cfg_cmd *req;
9689 	struct hclge_dev *hdev = vport->back;
9690 	struct hclge_desc desc;
9691 	u16 bmap_index;
9692 	int status;
9693 
9694 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9695 
9696 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9697 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9698 		      vcfg->strip_tag1_en ? 1 : 0);
9699 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9700 		      vcfg->strip_tag2_en ? 1 : 0);
9701 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9702 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
9703 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9704 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
9705 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9706 		      vcfg->strip_tag1_discard_en ? 1 : 0);
9707 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9708 		      vcfg->strip_tag2_discard_en ? 1 : 0);
9709 
9710 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9711 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9712 			HCLGE_VF_NUM_PER_BYTE;
9713 	req->vf_bitmap[bmap_index] =
9714 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9715 
9716 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9717 	if (status)
9718 		dev_err(&hdev->pdev->dev,
9719 			"Send port rxvlan cfg command fail, ret =%d\n",
9720 			status);
9721 
9722 	return status;
9723 }
9724 
9725 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9726 				  u16 port_base_vlan_state,
9727 				  u16 vlan_tag)
9728 {
9729 	int ret;
9730 
9731 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9732 		vport->txvlan_cfg.accept_tag1 = true;
9733 		vport->txvlan_cfg.insert_tag1_en = false;
9734 		vport->txvlan_cfg.default_tag1 = 0;
9735 	} else {
9736 		struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9737 
9738 		vport->txvlan_cfg.accept_tag1 =
9739 			ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9740 		vport->txvlan_cfg.insert_tag1_en = true;
9741 		vport->txvlan_cfg.default_tag1 = vlan_tag;
9742 	}
9743 
9744 	vport->txvlan_cfg.accept_untag1 = true;
9745 
9746 	/* accept_tag2 and accept_untag2 are not supported on
9747 	 * pdev revision(0x20), new revision support them,
9748 	 * this two fields can not be configured by user.
9749 	 */
9750 	vport->txvlan_cfg.accept_tag2 = true;
9751 	vport->txvlan_cfg.accept_untag2 = true;
9752 	vport->txvlan_cfg.insert_tag2_en = false;
9753 	vport->txvlan_cfg.default_tag2 = 0;
9754 	vport->txvlan_cfg.tag_shift_mode_en = true;
9755 
9756 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9757 		vport->rxvlan_cfg.strip_tag1_en = false;
9758 		vport->rxvlan_cfg.strip_tag2_en =
9759 				vport->rxvlan_cfg.rx_vlan_offload_en;
9760 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
9761 	} else {
9762 		vport->rxvlan_cfg.strip_tag1_en =
9763 				vport->rxvlan_cfg.rx_vlan_offload_en;
9764 		vport->rxvlan_cfg.strip_tag2_en = true;
9765 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
9766 	}
9767 
9768 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
9769 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9770 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9771 
9772 	ret = hclge_set_vlan_tx_offload_cfg(vport);
9773 	if (ret)
9774 		return ret;
9775 
9776 	return hclge_set_vlan_rx_offload_cfg(vport);
9777 }
9778 
9779 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9780 {
9781 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9782 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9783 	struct hclge_desc desc;
9784 	int status;
9785 
9786 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9787 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9788 	rx_req->ot_fst_vlan_type =
9789 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9790 	rx_req->ot_sec_vlan_type =
9791 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9792 	rx_req->in_fst_vlan_type =
9793 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9794 	rx_req->in_sec_vlan_type =
9795 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9796 
9797 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9798 	if (status) {
9799 		dev_err(&hdev->pdev->dev,
9800 			"Send rxvlan protocol type command fail, ret =%d\n",
9801 			status);
9802 		return status;
9803 	}
9804 
9805 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9806 
9807 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9808 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9809 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9810 
9811 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9812 	if (status)
9813 		dev_err(&hdev->pdev->dev,
9814 			"Send txvlan protocol type command fail, ret =%d\n",
9815 			status);
9816 
9817 	return status;
9818 }
9819 
9820 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9821 {
9822 #define HCLGE_DEF_VLAN_TYPE		0x8100
9823 
9824 	struct hnae3_handle *handle = &hdev->vport[0].nic;
9825 	struct hclge_vport *vport;
9826 	int ret;
9827 	int i;
9828 
9829 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9830 		/* for revision 0x21, vf vlan filter is per function */
9831 		for (i = 0; i < hdev->num_alloc_vport; i++) {
9832 			vport = &hdev->vport[i];
9833 			ret = hclge_set_vlan_filter_ctrl(hdev,
9834 							 HCLGE_FILTER_TYPE_VF,
9835 							 HCLGE_FILTER_FE_EGRESS,
9836 							 true,
9837 							 vport->vport_id);
9838 			if (ret)
9839 				return ret;
9840 		}
9841 
9842 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9843 						 HCLGE_FILTER_FE_INGRESS, true,
9844 						 0);
9845 		if (ret)
9846 			return ret;
9847 	} else {
9848 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9849 						 HCLGE_FILTER_FE_EGRESS_V1_B,
9850 						 true, 0);
9851 		if (ret)
9852 			return ret;
9853 	}
9854 
9855 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
9856 
9857 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9858 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9859 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9860 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9861 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9862 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9863 
9864 	ret = hclge_set_vlan_protocol_type(hdev);
9865 	if (ret)
9866 		return ret;
9867 
9868 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9869 		u16 vlan_tag;
9870 
9871 		vport = &hdev->vport[i];
9872 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9873 
9874 		ret = hclge_vlan_offload_cfg(vport,
9875 					     vport->port_base_vlan_cfg.state,
9876 					     vlan_tag);
9877 		if (ret)
9878 			return ret;
9879 	}
9880 
9881 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9882 }
9883 
9884 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9885 				       bool writen_to_tbl)
9886 {
9887 	struct hclge_vport_vlan_cfg *vlan;
9888 
9889 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9890 	if (!vlan)
9891 		return;
9892 
9893 	vlan->hd_tbl_status = writen_to_tbl;
9894 	vlan->vlan_id = vlan_id;
9895 
9896 	list_add_tail(&vlan->node, &vport->vlan_list);
9897 }
9898 
9899 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9900 {
9901 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9902 	struct hclge_dev *hdev = vport->back;
9903 	int ret;
9904 
9905 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9906 		if (!vlan->hd_tbl_status) {
9907 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9908 						       vport->vport_id,
9909 						       vlan->vlan_id, false);
9910 			if (ret) {
9911 				dev_err(&hdev->pdev->dev,
9912 					"restore vport vlan list failed, ret=%d\n",
9913 					ret);
9914 				return ret;
9915 			}
9916 		}
9917 		vlan->hd_tbl_status = true;
9918 	}
9919 
9920 	return 0;
9921 }
9922 
9923 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9924 				      bool is_write_tbl)
9925 {
9926 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9927 	struct hclge_dev *hdev = vport->back;
9928 
9929 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9930 		if (vlan->vlan_id == vlan_id) {
9931 			if (is_write_tbl && vlan->hd_tbl_status)
9932 				hclge_set_vlan_filter_hw(hdev,
9933 							 htons(ETH_P_8021Q),
9934 							 vport->vport_id,
9935 							 vlan_id,
9936 							 true);
9937 
9938 			list_del(&vlan->node);
9939 			kfree(vlan);
9940 			break;
9941 		}
9942 	}
9943 }
9944 
9945 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9946 {
9947 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9948 	struct hclge_dev *hdev = vport->back;
9949 
9950 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9951 		if (vlan->hd_tbl_status)
9952 			hclge_set_vlan_filter_hw(hdev,
9953 						 htons(ETH_P_8021Q),
9954 						 vport->vport_id,
9955 						 vlan->vlan_id,
9956 						 true);
9957 
9958 		vlan->hd_tbl_status = false;
9959 		if (is_del_list) {
9960 			list_del(&vlan->node);
9961 			kfree(vlan);
9962 		}
9963 	}
9964 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
9965 }
9966 
9967 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9968 {
9969 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9970 	struct hclge_vport *vport;
9971 	int i;
9972 
9973 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9974 		vport = &hdev->vport[i];
9975 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9976 			list_del(&vlan->node);
9977 			kfree(vlan);
9978 		}
9979 	}
9980 }
9981 
9982 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9983 {
9984 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9985 	struct hclge_dev *hdev = vport->back;
9986 	u16 vlan_proto;
9987 	u16 vlan_id;
9988 	u16 state;
9989 	int ret;
9990 
9991 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9992 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9993 	state = vport->port_base_vlan_cfg.state;
9994 
9995 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9996 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9997 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9998 					 vport->vport_id, vlan_id,
9999 					 false);
10000 		return;
10001 	}
10002 
10003 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10004 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10005 					       vport->vport_id,
10006 					       vlan->vlan_id, false);
10007 		if (ret)
10008 			break;
10009 		vlan->hd_tbl_status = true;
10010 	}
10011 }
10012 
10013 /* For global reset and imp reset, hardware will clear the mac table,
10014  * so we change the mac address state from ACTIVE to TO_ADD, then they
10015  * can be restored in the service task after reset complete. Furtherly,
10016  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10017  * be restored after reset, so just remove these mac nodes from mac_list.
10018  */
10019 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10020 {
10021 	struct hclge_mac_node *mac_node, *tmp;
10022 
10023 	list_for_each_entry_safe(mac_node, tmp, list, node) {
10024 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
10025 			mac_node->state = HCLGE_MAC_TO_ADD;
10026 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10027 			list_del(&mac_node->node);
10028 			kfree(mac_node);
10029 		}
10030 	}
10031 }
10032 
10033 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10034 {
10035 	spin_lock_bh(&vport->mac_list_lock);
10036 
10037 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10038 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10039 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10040 
10041 	spin_unlock_bh(&vport->mac_list_lock);
10042 }
10043 
10044 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10045 {
10046 	struct hclge_vport *vport = &hdev->vport[0];
10047 	struct hnae3_handle *handle = &vport->nic;
10048 
10049 	hclge_restore_mac_table_common(vport);
10050 	hclge_restore_vport_vlan_table(vport);
10051 	set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10052 	hclge_restore_fd_entries(handle);
10053 }
10054 
10055 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10056 {
10057 	struct hclge_vport *vport = hclge_get_vport(handle);
10058 
10059 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10060 		vport->rxvlan_cfg.strip_tag1_en = false;
10061 		vport->rxvlan_cfg.strip_tag2_en = enable;
10062 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
10063 	} else {
10064 		vport->rxvlan_cfg.strip_tag1_en = enable;
10065 		vport->rxvlan_cfg.strip_tag2_en = true;
10066 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
10067 	}
10068 
10069 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
10070 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10071 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10072 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10073 
10074 	return hclge_set_vlan_rx_offload_cfg(vport);
10075 }
10076 
10077 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10078 					    u16 port_base_vlan_state,
10079 					    struct hclge_vlan_info *new_info,
10080 					    struct hclge_vlan_info *old_info)
10081 {
10082 	struct hclge_dev *hdev = vport->back;
10083 	int ret;
10084 
10085 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10086 		hclge_rm_vport_all_vlan_table(vport, false);
10087 		return hclge_set_vlan_filter_hw(hdev,
10088 						 htons(new_info->vlan_proto),
10089 						 vport->vport_id,
10090 						 new_info->vlan_tag,
10091 						 false);
10092 	}
10093 
10094 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10095 				       vport->vport_id, old_info->vlan_tag,
10096 				       true);
10097 	if (ret)
10098 		return ret;
10099 
10100 	return hclge_add_vport_all_vlan_table(vport);
10101 }
10102 
10103 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10104 				    struct hclge_vlan_info *vlan_info)
10105 {
10106 	struct hnae3_handle *nic = &vport->nic;
10107 	struct hclge_vlan_info *old_vlan_info;
10108 	struct hclge_dev *hdev = vport->back;
10109 	int ret;
10110 
10111 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10112 
10113 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
10114 	if (ret)
10115 		return ret;
10116 
10117 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10118 		/* add new VLAN tag */
10119 		ret = hclge_set_vlan_filter_hw(hdev,
10120 					       htons(vlan_info->vlan_proto),
10121 					       vport->vport_id,
10122 					       vlan_info->vlan_tag,
10123 					       false);
10124 		if (ret)
10125 			return ret;
10126 
10127 		/* remove old VLAN tag */
10128 		ret = hclge_set_vlan_filter_hw(hdev,
10129 					       htons(old_vlan_info->vlan_proto),
10130 					       vport->vport_id,
10131 					       old_vlan_info->vlan_tag,
10132 					       true);
10133 		if (ret)
10134 			return ret;
10135 
10136 		goto update;
10137 	}
10138 
10139 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10140 					       old_vlan_info);
10141 	if (ret)
10142 		return ret;
10143 
10144 	/* update state only when disable/enable port based VLAN */
10145 	vport->port_base_vlan_cfg.state = state;
10146 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10147 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10148 	else
10149 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10150 
10151 update:
10152 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
10153 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
10154 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
10155 
10156 	return 0;
10157 }
10158 
10159 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10160 					  enum hnae3_port_base_vlan_state state,
10161 					  u16 vlan)
10162 {
10163 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10164 		if (!vlan)
10165 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10166 		else
10167 			return HNAE3_PORT_BASE_VLAN_ENABLE;
10168 	} else {
10169 		if (!vlan)
10170 			return HNAE3_PORT_BASE_VLAN_DISABLE;
10171 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
10172 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10173 		else
10174 			return HNAE3_PORT_BASE_VLAN_MODIFY;
10175 	}
10176 }
10177 
10178 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10179 				    u16 vlan, u8 qos, __be16 proto)
10180 {
10181 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10182 	struct hclge_vport *vport = hclge_get_vport(handle);
10183 	struct hclge_dev *hdev = vport->back;
10184 	struct hclge_vlan_info vlan_info;
10185 	u16 state;
10186 	int ret;
10187 
10188 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10189 		return -EOPNOTSUPP;
10190 
10191 	vport = hclge_get_vf_vport(hdev, vfid);
10192 	if (!vport)
10193 		return -EINVAL;
10194 
10195 	/* qos is a 3 bits value, so can not be bigger than 7 */
10196 	if (vlan > VLAN_N_VID - 1 || qos > 7)
10197 		return -EINVAL;
10198 	if (proto != htons(ETH_P_8021Q))
10199 		return -EPROTONOSUPPORT;
10200 
10201 	state = hclge_get_port_base_vlan_state(vport,
10202 					       vport->port_base_vlan_cfg.state,
10203 					       vlan);
10204 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10205 		return 0;
10206 
10207 	vlan_info.vlan_tag = vlan;
10208 	vlan_info.qos = qos;
10209 	vlan_info.vlan_proto = ntohs(proto);
10210 
10211 	ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10212 	if (ret) {
10213 		dev_err(&hdev->pdev->dev,
10214 			"failed to update port base vlan for vf %d, ret = %d\n",
10215 			vfid, ret);
10216 		return ret;
10217 	}
10218 
10219 	/* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10220 	 * VLAN state.
10221 	 */
10222 	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10223 	    test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10224 		hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10225 						  vport->vport_id, state,
10226 						  vlan, qos,
10227 						  ntohs(proto));
10228 
10229 	return 0;
10230 }
10231 
10232 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10233 {
10234 	struct hclge_vlan_info *vlan_info;
10235 	struct hclge_vport *vport;
10236 	int ret;
10237 	int vf;
10238 
10239 	/* clear port base vlan for all vf */
10240 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10241 		vport = &hdev->vport[vf];
10242 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10243 
10244 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10245 					       vport->vport_id,
10246 					       vlan_info->vlan_tag, true);
10247 		if (ret)
10248 			dev_err(&hdev->pdev->dev,
10249 				"failed to clear vf vlan for vf%d, ret = %d\n",
10250 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10251 	}
10252 }
10253 
10254 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10255 			  u16 vlan_id, bool is_kill)
10256 {
10257 	struct hclge_vport *vport = hclge_get_vport(handle);
10258 	struct hclge_dev *hdev = vport->back;
10259 	bool writen_to_tbl = false;
10260 	int ret = 0;
10261 
10262 	/* When device is resetting or reset failed, firmware is unable to
10263 	 * handle mailbox. Just record the vlan id, and remove it after
10264 	 * reset finished.
10265 	 */
10266 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10267 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10268 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10269 		return -EBUSY;
10270 	}
10271 
10272 	/* when port base vlan enabled, we use port base vlan as the vlan
10273 	 * filter entry. In this case, we don't update vlan filter table
10274 	 * when user add new vlan or remove exist vlan, just update the vport
10275 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
10276 	 * table until port base vlan disabled
10277 	 */
10278 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10279 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10280 					       vlan_id, is_kill);
10281 		writen_to_tbl = true;
10282 	}
10283 
10284 	if (!ret) {
10285 		if (is_kill)
10286 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10287 		else
10288 			hclge_add_vport_vlan_table(vport, vlan_id,
10289 						   writen_to_tbl);
10290 	} else if (is_kill) {
10291 		/* when remove hw vlan filter failed, record the vlan id,
10292 		 * and try to remove it from hw later, to be consistence
10293 		 * with stack
10294 		 */
10295 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10296 	}
10297 	return ret;
10298 }
10299 
10300 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10301 {
10302 #define HCLGE_MAX_SYNC_COUNT	60
10303 
10304 	int i, ret, sync_cnt = 0;
10305 	u16 vlan_id;
10306 
10307 	/* start from vport 1 for PF is always alive */
10308 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10309 		struct hclge_vport *vport = &hdev->vport[i];
10310 
10311 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10312 					 VLAN_N_VID);
10313 		while (vlan_id != VLAN_N_VID) {
10314 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10315 						       vport->vport_id, vlan_id,
10316 						       true);
10317 			if (ret && ret != -EINVAL)
10318 				return;
10319 
10320 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10321 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10322 
10323 			sync_cnt++;
10324 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10325 				return;
10326 
10327 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10328 						 VLAN_N_VID);
10329 		}
10330 	}
10331 }
10332 
10333 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10334 {
10335 	struct hclge_config_max_frm_size_cmd *req;
10336 	struct hclge_desc desc;
10337 
10338 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10339 
10340 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10341 	req->max_frm_size = cpu_to_le16(new_mps);
10342 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10343 
10344 	return hclge_cmd_send(&hdev->hw, &desc, 1);
10345 }
10346 
10347 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10348 {
10349 	struct hclge_vport *vport = hclge_get_vport(handle);
10350 
10351 	return hclge_set_vport_mtu(vport, new_mtu);
10352 }
10353 
10354 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10355 {
10356 	struct hclge_dev *hdev = vport->back;
10357 	int i, max_frm_size, ret;
10358 
10359 	/* HW supprt 2 layer vlan */
10360 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10361 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10362 	    max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10363 		return -EINVAL;
10364 
10365 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10366 	mutex_lock(&hdev->vport_lock);
10367 	/* VF's mps must fit within hdev->mps */
10368 	if (vport->vport_id && max_frm_size > hdev->mps) {
10369 		mutex_unlock(&hdev->vport_lock);
10370 		return -EINVAL;
10371 	} else if (vport->vport_id) {
10372 		vport->mps = max_frm_size;
10373 		mutex_unlock(&hdev->vport_lock);
10374 		return 0;
10375 	}
10376 
10377 	/* PF's mps must be greater then VF's mps */
10378 	for (i = 1; i < hdev->num_alloc_vport; i++)
10379 		if (max_frm_size < hdev->vport[i].mps) {
10380 			mutex_unlock(&hdev->vport_lock);
10381 			return -EINVAL;
10382 		}
10383 
10384 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10385 
10386 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
10387 	if (ret) {
10388 		dev_err(&hdev->pdev->dev,
10389 			"Change mtu fail, ret =%d\n", ret);
10390 		goto out;
10391 	}
10392 
10393 	hdev->mps = max_frm_size;
10394 	vport->mps = max_frm_size;
10395 
10396 	ret = hclge_buffer_alloc(hdev);
10397 	if (ret)
10398 		dev_err(&hdev->pdev->dev,
10399 			"Allocate buffer fail, ret =%d\n", ret);
10400 
10401 out:
10402 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10403 	mutex_unlock(&hdev->vport_lock);
10404 	return ret;
10405 }
10406 
10407 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10408 				    bool enable)
10409 {
10410 	struct hclge_reset_tqp_queue_cmd *req;
10411 	struct hclge_desc desc;
10412 	int ret;
10413 
10414 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10415 
10416 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10417 	req->tqp_id = cpu_to_le16(queue_id);
10418 	if (enable)
10419 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10420 
10421 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10422 	if (ret) {
10423 		dev_err(&hdev->pdev->dev,
10424 			"Send tqp reset cmd error, status =%d\n", ret);
10425 		return ret;
10426 	}
10427 
10428 	return 0;
10429 }
10430 
10431 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10432 {
10433 	struct hclge_reset_tqp_queue_cmd *req;
10434 	struct hclge_desc desc;
10435 	int ret;
10436 
10437 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10438 
10439 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10440 	req->tqp_id = cpu_to_le16(queue_id);
10441 
10442 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10443 	if (ret) {
10444 		dev_err(&hdev->pdev->dev,
10445 			"Get reset status error, status =%d\n", ret);
10446 		return ret;
10447 	}
10448 
10449 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10450 }
10451 
10452 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10453 {
10454 	struct hnae3_queue *queue;
10455 	struct hclge_tqp *tqp;
10456 
10457 	queue = handle->kinfo.tqp[queue_id];
10458 	tqp = container_of(queue, struct hclge_tqp, q);
10459 
10460 	return tqp->index;
10461 }
10462 
10463 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10464 {
10465 	struct hclge_vport *vport = hclge_get_vport(handle);
10466 	struct hclge_dev *hdev = vport->back;
10467 	u16 reset_try_times = 0;
10468 	int reset_status;
10469 	u16 queue_gid;
10470 	int ret;
10471 	u16 i;
10472 
10473 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
10474 		queue_gid = hclge_covert_handle_qid_global(handle, i);
10475 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10476 		if (ret) {
10477 			dev_err(&hdev->pdev->dev,
10478 				"failed to send reset tqp cmd, ret = %d\n",
10479 				ret);
10480 			return ret;
10481 		}
10482 
10483 		while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10484 			reset_status = hclge_get_reset_status(hdev, queue_gid);
10485 			if (reset_status)
10486 				break;
10487 
10488 			/* Wait for tqp hw reset */
10489 			usleep_range(1000, 1200);
10490 		}
10491 
10492 		if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10493 			dev_err(&hdev->pdev->dev,
10494 				"wait for tqp hw reset timeout\n");
10495 			return -ETIME;
10496 		}
10497 
10498 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10499 		if (ret) {
10500 			dev_err(&hdev->pdev->dev,
10501 				"failed to deassert soft reset, ret = %d\n",
10502 				ret);
10503 			return ret;
10504 		}
10505 		reset_try_times = 0;
10506 	}
10507 	return 0;
10508 }
10509 
10510 static int hclge_reset_rcb(struct hnae3_handle *handle)
10511 {
10512 #define HCLGE_RESET_RCB_NOT_SUPPORT	0U
10513 #define HCLGE_RESET_RCB_SUCCESS		1U
10514 
10515 	struct hclge_vport *vport = hclge_get_vport(handle);
10516 	struct hclge_dev *hdev = vport->back;
10517 	struct hclge_reset_cmd *req;
10518 	struct hclge_desc desc;
10519 	u8 return_status;
10520 	u16 queue_gid;
10521 	int ret;
10522 
10523 	queue_gid = hclge_covert_handle_qid_global(handle, 0);
10524 
10525 	req = (struct hclge_reset_cmd *)desc.data;
10526 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10527 	hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10528 	req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10529 	req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10530 
10531 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10532 	if (ret) {
10533 		dev_err(&hdev->pdev->dev,
10534 			"failed to send rcb reset cmd, ret = %d\n", ret);
10535 		return ret;
10536 	}
10537 
10538 	return_status = req->fun_reset_rcb_return_status;
10539 	if (return_status == HCLGE_RESET_RCB_SUCCESS)
10540 		return 0;
10541 
10542 	if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10543 		dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10544 			return_status);
10545 		return -EIO;
10546 	}
10547 
10548 	/* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10549 	 * again to reset all tqps
10550 	 */
10551 	return hclge_reset_tqp_cmd(handle);
10552 }
10553 
10554 int hclge_reset_tqp(struct hnae3_handle *handle)
10555 {
10556 	struct hclge_vport *vport = hclge_get_vport(handle);
10557 	struct hclge_dev *hdev = vport->back;
10558 	int ret;
10559 
10560 	/* only need to disable PF's tqp */
10561 	if (!vport->vport_id) {
10562 		ret = hclge_tqp_enable(handle, false);
10563 		if (ret) {
10564 			dev_err(&hdev->pdev->dev,
10565 				"failed to disable tqp, ret = %d\n", ret);
10566 			return ret;
10567 		}
10568 	}
10569 
10570 	return hclge_reset_rcb(handle);
10571 }
10572 
10573 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10574 {
10575 	struct hclge_vport *vport = hclge_get_vport(handle);
10576 	struct hclge_dev *hdev = vport->back;
10577 
10578 	return hdev->fw_version;
10579 }
10580 
10581 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10582 {
10583 	struct phy_device *phydev = hdev->hw.mac.phydev;
10584 
10585 	if (!phydev)
10586 		return;
10587 
10588 	phy_set_asym_pause(phydev, rx_en, tx_en);
10589 }
10590 
10591 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10592 {
10593 	int ret;
10594 
10595 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10596 		return 0;
10597 
10598 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10599 	if (ret)
10600 		dev_err(&hdev->pdev->dev,
10601 			"configure pauseparam error, ret = %d.\n", ret);
10602 
10603 	return ret;
10604 }
10605 
10606 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10607 {
10608 	struct phy_device *phydev = hdev->hw.mac.phydev;
10609 	u16 remote_advertising = 0;
10610 	u16 local_advertising;
10611 	u32 rx_pause, tx_pause;
10612 	u8 flowctl;
10613 
10614 	if (!phydev->link || !phydev->autoneg)
10615 		return 0;
10616 
10617 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10618 
10619 	if (phydev->pause)
10620 		remote_advertising = LPA_PAUSE_CAP;
10621 
10622 	if (phydev->asym_pause)
10623 		remote_advertising |= LPA_PAUSE_ASYM;
10624 
10625 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10626 					   remote_advertising);
10627 	tx_pause = flowctl & FLOW_CTRL_TX;
10628 	rx_pause = flowctl & FLOW_CTRL_RX;
10629 
10630 	if (phydev->duplex == HCLGE_MAC_HALF) {
10631 		tx_pause = 0;
10632 		rx_pause = 0;
10633 	}
10634 
10635 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10636 }
10637 
10638 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10639 				 u32 *rx_en, u32 *tx_en)
10640 {
10641 	struct hclge_vport *vport = hclge_get_vport(handle);
10642 	struct hclge_dev *hdev = vport->back;
10643 	u8 media_type = hdev->hw.mac.media_type;
10644 
10645 	*auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10646 		    hclge_get_autoneg(handle) : 0;
10647 
10648 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10649 		*rx_en = 0;
10650 		*tx_en = 0;
10651 		return;
10652 	}
10653 
10654 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10655 		*rx_en = 1;
10656 		*tx_en = 0;
10657 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10658 		*tx_en = 1;
10659 		*rx_en = 0;
10660 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10661 		*rx_en = 1;
10662 		*tx_en = 1;
10663 	} else {
10664 		*rx_en = 0;
10665 		*tx_en = 0;
10666 	}
10667 }
10668 
10669 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10670 					 u32 rx_en, u32 tx_en)
10671 {
10672 	if (rx_en && tx_en)
10673 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
10674 	else if (rx_en && !tx_en)
10675 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10676 	else if (!rx_en && tx_en)
10677 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10678 	else
10679 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
10680 
10681 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10682 }
10683 
10684 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10685 				u32 rx_en, u32 tx_en)
10686 {
10687 	struct hclge_vport *vport = hclge_get_vport(handle);
10688 	struct hclge_dev *hdev = vport->back;
10689 	struct phy_device *phydev = hdev->hw.mac.phydev;
10690 	u32 fc_autoneg;
10691 
10692 	if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10693 		fc_autoneg = hclge_get_autoneg(handle);
10694 		if (auto_neg != fc_autoneg) {
10695 			dev_info(&hdev->pdev->dev,
10696 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10697 			return -EOPNOTSUPP;
10698 		}
10699 	}
10700 
10701 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10702 		dev_info(&hdev->pdev->dev,
10703 			 "Priority flow control enabled. Cannot set link flow control.\n");
10704 		return -EOPNOTSUPP;
10705 	}
10706 
10707 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10708 
10709 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10710 
10711 	if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10712 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10713 
10714 	if (phydev)
10715 		return phy_start_aneg(phydev);
10716 
10717 	return -EOPNOTSUPP;
10718 }
10719 
10720 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10721 					  u8 *auto_neg, u32 *speed, u8 *duplex)
10722 {
10723 	struct hclge_vport *vport = hclge_get_vport(handle);
10724 	struct hclge_dev *hdev = vport->back;
10725 
10726 	if (speed)
10727 		*speed = hdev->hw.mac.speed;
10728 	if (duplex)
10729 		*duplex = hdev->hw.mac.duplex;
10730 	if (auto_neg)
10731 		*auto_neg = hdev->hw.mac.autoneg;
10732 }
10733 
10734 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10735 				 u8 *module_type)
10736 {
10737 	struct hclge_vport *vport = hclge_get_vport(handle);
10738 	struct hclge_dev *hdev = vport->back;
10739 
10740 	/* When nic is down, the service task is not running, doesn't update
10741 	 * the port information per second. Query the port information before
10742 	 * return the media type, ensure getting the correct media information.
10743 	 */
10744 	hclge_update_port_info(hdev);
10745 
10746 	if (media_type)
10747 		*media_type = hdev->hw.mac.media_type;
10748 
10749 	if (module_type)
10750 		*module_type = hdev->hw.mac.module_type;
10751 }
10752 
10753 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10754 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
10755 {
10756 	struct hclge_vport *vport = hclge_get_vport(handle);
10757 	struct hclge_dev *hdev = vport->back;
10758 	struct phy_device *phydev = hdev->hw.mac.phydev;
10759 	int mdix_ctrl, mdix, is_resolved;
10760 	unsigned int retval;
10761 
10762 	if (!phydev) {
10763 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10764 		*tp_mdix = ETH_TP_MDI_INVALID;
10765 		return;
10766 	}
10767 
10768 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10769 
10770 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10771 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10772 				    HCLGE_PHY_MDIX_CTRL_S);
10773 
10774 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10775 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10776 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10777 
10778 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10779 
10780 	switch (mdix_ctrl) {
10781 	case 0x0:
10782 		*tp_mdix_ctrl = ETH_TP_MDI;
10783 		break;
10784 	case 0x1:
10785 		*tp_mdix_ctrl = ETH_TP_MDI_X;
10786 		break;
10787 	case 0x3:
10788 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10789 		break;
10790 	default:
10791 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10792 		break;
10793 	}
10794 
10795 	if (!is_resolved)
10796 		*tp_mdix = ETH_TP_MDI_INVALID;
10797 	else if (mdix)
10798 		*tp_mdix = ETH_TP_MDI_X;
10799 	else
10800 		*tp_mdix = ETH_TP_MDI;
10801 }
10802 
10803 static void hclge_info_show(struct hclge_dev *hdev)
10804 {
10805 	struct device *dev = &hdev->pdev->dev;
10806 
10807 	dev_info(dev, "PF info begin:\n");
10808 
10809 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10810 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10811 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10812 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10813 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10814 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10815 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10816 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10817 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10818 	dev_info(dev, "This is %s PF\n",
10819 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10820 	dev_info(dev, "DCB %s\n",
10821 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10822 	dev_info(dev, "MQPRIO %s\n",
10823 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10824 
10825 	dev_info(dev, "PF info end.\n");
10826 }
10827 
10828 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10829 					  struct hclge_vport *vport)
10830 {
10831 	struct hnae3_client *client = vport->nic.client;
10832 	struct hclge_dev *hdev = ae_dev->priv;
10833 	int rst_cnt = hdev->rst_stats.reset_cnt;
10834 	int ret;
10835 
10836 	ret = client->ops->init_instance(&vport->nic);
10837 	if (ret)
10838 		return ret;
10839 
10840 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10841 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10842 	    rst_cnt != hdev->rst_stats.reset_cnt) {
10843 		ret = -EBUSY;
10844 		goto init_nic_err;
10845 	}
10846 
10847 	/* Enable nic hw error interrupts */
10848 	ret = hclge_config_nic_hw_error(hdev, true);
10849 	if (ret) {
10850 		dev_err(&ae_dev->pdev->dev,
10851 			"fail(%d) to enable hw error interrupts\n", ret);
10852 		goto init_nic_err;
10853 	}
10854 
10855 	hnae3_set_client_init_flag(client, ae_dev, 1);
10856 
10857 	if (netif_msg_drv(&hdev->vport->nic))
10858 		hclge_info_show(hdev);
10859 
10860 	return ret;
10861 
10862 init_nic_err:
10863 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10864 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10865 		msleep(HCLGE_WAIT_RESET_DONE);
10866 
10867 	client->ops->uninit_instance(&vport->nic, 0);
10868 
10869 	return ret;
10870 }
10871 
10872 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10873 					   struct hclge_vport *vport)
10874 {
10875 	struct hclge_dev *hdev = ae_dev->priv;
10876 	struct hnae3_client *client;
10877 	int rst_cnt;
10878 	int ret;
10879 
10880 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10881 	    !hdev->nic_client)
10882 		return 0;
10883 
10884 	client = hdev->roce_client;
10885 	ret = hclge_init_roce_base_info(vport);
10886 	if (ret)
10887 		return ret;
10888 
10889 	rst_cnt = hdev->rst_stats.reset_cnt;
10890 	ret = client->ops->init_instance(&vport->roce);
10891 	if (ret)
10892 		return ret;
10893 
10894 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10895 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10896 	    rst_cnt != hdev->rst_stats.reset_cnt) {
10897 		ret = -EBUSY;
10898 		goto init_roce_err;
10899 	}
10900 
10901 	/* Enable roce ras interrupts */
10902 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
10903 	if (ret) {
10904 		dev_err(&ae_dev->pdev->dev,
10905 			"fail(%d) to enable roce ras interrupts\n", ret);
10906 		goto init_roce_err;
10907 	}
10908 
10909 	hnae3_set_client_init_flag(client, ae_dev, 1);
10910 
10911 	return 0;
10912 
10913 init_roce_err:
10914 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10915 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10916 		msleep(HCLGE_WAIT_RESET_DONE);
10917 
10918 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10919 
10920 	return ret;
10921 }
10922 
10923 static int hclge_init_client_instance(struct hnae3_client *client,
10924 				      struct hnae3_ae_dev *ae_dev)
10925 {
10926 	struct hclge_dev *hdev = ae_dev->priv;
10927 	struct hclge_vport *vport = &hdev->vport[0];
10928 	int ret;
10929 
10930 	switch (client->type) {
10931 	case HNAE3_CLIENT_KNIC:
10932 		hdev->nic_client = client;
10933 		vport->nic.client = client;
10934 		ret = hclge_init_nic_client_instance(ae_dev, vport);
10935 		if (ret)
10936 			goto clear_nic;
10937 
10938 		ret = hclge_init_roce_client_instance(ae_dev, vport);
10939 		if (ret)
10940 			goto clear_roce;
10941 
10942 		break;
10943 	case HNAE3_CLIENT_ROCE:
10944 		if (hnae3_dev_roce_supported(hdev)) {
10945 			hdev->roce_client = client;
10946 			vport->roce.client = client;
10947 		}
10948 
10949 		ret = hclge_init_roce_client_instance(ae_dev, vport);
10950 		if (ret)
10951 			goto clear_roce;
10952 
10953 		break;
10954 	default:
10955 		return -EINVAL;
10956 	}
10957 
10958 	return 0;
10959 
10960 clear_nic:
10961 	hdev->nic_client = NULL;
10962 	vport->nic.client = NULL;
10963 	return ret;
10964 clear_roce:
10965 	hdev->roce_client = NULL;
10966 	vport->roce.client = NULL;
10967 	return ret;
10968 }
10969 
10970 static void hclge_uninit_client_instance(struct hnae3_client *client,
10971 					 struct hnae3_ae_dev *ae_dev)
10972 {
10973 	struct hclge_dev *hdev = ae_dev->priv;
10974 	struct hclge_vport *vport = &hdev->vport[0];
10975 
10976 	if (hdev->roce_client) {
10977 		clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10978 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10979 			msleep(HCLGE_WAIT_RESET_DONE);
10980 
10981 		hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10982 		hdev->roce_client = NULL;
10983 		vport->roce.client = NULL;
10984 	}
10985 	if (client->type == HNAE3_CLIENT_ROCE)
10986 		return;
10987 	if (hdev->nic_client && client->ops->uninit_instance) {
10988 		clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10989 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10990 			msleep(HCLGE_WAIT_RESET_DONE);
10991 
10992 		client->ops->uninit_instance(&vport->nic, 0);
10993 		hdev->nic_client = NULL;
10994 		vport->nic.client = NULL;
10995 	}
10996 }
10997 
10998 static int hclge_dev_mem_map(struct hclge_dev *hdev)
10999 {
11000 #define HCLGE_MEM_BAR		4
11001 
11002 	struct pci_dev *pdev = hdev->pdev;
11003 	struct hclge_hw *hw = &hdev->hw;
11004 
11005 	/* for device does not have device memory, return directly */
11006 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11007 		return 0;
11008 
11009 	hw->mem_base = devm_ioremap_wc(&pdev->dev,
11010 				       pci_resource_start(pdev, HCLGE_MEM_BAR),
11011 				       pci_resource_len(pdev, HCLGE_MEM_BAR));
11012 	if (!hw->mem_base) {
11013 		dev_err(&pdev->dev, "failed to map device memory\n");
11014 		return -EFAULT;
11015 	}
11016 
11017 	return 0;
11018 }
11019 
11020 static int hclge_pci_init(struct hclge_dev *hdev)
11021 {
11022 	struct pci_dev *pdev = hdev->pdev;
11023 	struct hclge_hw *hw;
11024 	int ret;
11025 
11026 	ret = pci_enable_device(pdev);
11027 	if (ret) {
11028 		dev_err(&pdev->dev, "failed to enable PCI device\n");
11029 		return ret;
11030 	}
11031 
11032 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11033 	if (ret) {
11034 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11035 		if (ret) {
11036 			dev_err(&pdev->dev,
11037 				"can't set consistent PCI DMA");
11038 			goto err_disable_device;
11039 		}
11040 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11041 	}
11042 
11043 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11044 	if (ret) {
11045 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11046 		goto err_disable_device;
11047 	}
11048 
11049 	pci_set_master(pdev);
11050 	hw = &hdev->hw;
11051 	hw->io_base = pcim_iomap(pdev, 2, 0);
11052 	if (!hw->io_base) {
11053 		dev_err(&pdev->dev, "Can't map configuration register space\n");
11054 		ret = -ENOMEM;
11055 		goto err_clr_master;
11056 	}
11057 
11058 	ret = hclge_dev_mem_map(hdev);
11059 	if (ret)
11060 		goto err_unmap_io_base;
11061 
11062 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11063 
11064 	return 0;
11065 
11066 err_unmap_io_base:
11067 	pcim_iounmap(pdev, hdev->hw.io_base);
11068 err_clr_master:
11069 	pci_clear_master(pdev);
11070 	pci_release_regions(pdev);
11071 err_disable_device:
11072 	pci_disable_device(pdev);
11073 
11074 	return ret;
11075 }
11076 
11077 static void hclge_pci_uninit(struct hclge_dev *hdev)
11078 {
11079 	struct pci_dev *pdev = hdev->pdev;
11080 
11081 	if (hdev->hw.mem_base)
11082 		devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11083 
11084 	pcim_iounmap(pdev, hdev->hw.io_base);
11085 	pci_free_irq_vectors(pdev);
11086 	pci_clear_master(pdev);
11087 	pci_release_mem_regions(pdev);
11088 	pci_disable_device(pdev);
11089 }
11090 
11091 static void hclge_state_init(struct hclge_dev *hdev)
11092 {
11093 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11094 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11095 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11096 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11097 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11098 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11099 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11100 }
11101 
11102 static void hclge_state_uninit(struct hclge_dev *hdev)
11103 {
11104 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11105 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11106 
11107 	if (hdev->reset_timer.function)
11108 		del_timer_sync(&hdev->reset_timer);
11109 	if (hdev->service_task.work.func)
11110 		cancel_delayed_work_sync(&hdev->service_task);
11111 }
11112 
11113 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11114 					enum hnae3_reset_type rst_type)
11115 {
11116 #define HCLGE_RESET_RETRY_WAIT_MS	500
11117 #define HCLGE_RESET_RETRY_CNT	5
11118 
11119 	struct hclge_dev *hdev = ae_dev->priv;
11120 	int retry_cnt = 0;
11121 	int ret;
11122 
11123 retry:
11124 	down(&hdev->reset_sem);
11125 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11126 	hdev->reset_type = rst_type;
11127 	ret = hclge_reset_prepare(hdev);
11128 	if (ret || hdev->reset_pending) {
11129 		dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11130 			ret);
11131 		if (hdev->reset_pending ||
11132 		    retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11133 			dev_err(&hdev->pdev->dev,
11134 				"reset_pending:0x%lx, retry_cnt:%d\n",
11135 				hdev->reset_pending, retry_cnt);
11136 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11137 			up(&hdev->reset_sem);
11138 			msleep(HCLGE_RESET_RETRY_WAIT_MS);
11139 			goto retry;
11140 		}
11141 	}
11142 
11143 	/* disable misc vector before reset done */
11144 	hclge_enable_vector(&hdev->misc_vector, false);
11145 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11146 
11147 	if (hdev->reset_type == HNAE3_FLR_RESET)
11148 		hdev->rst_stats.flr_rst_cnt++;
11149 }
11150 
11151 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11152 {
11153 	struct hclge_dev *hdev = ae_dev->priv;
11154 	int ret;
11155 
11156 	hclge_enable_vector(&hdev->misc_vector, true);
11157 
11158 	ret = hclge_reset_rebuild(hdev);
11159 	if (ret)
11160 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11161 
11162 	hdev->reset_type = HNAE3_NONE_RESET;
11163 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11164 	up(&hdev->reset_sem);
11165 }
11166 
11167 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11168 {
11169 	u16 i;
11170 
11171 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11172 		struct hclge_vport *vport = &hdev->vport[i];
11173 		int ret;
11174 
11175 		 /* Send cmd to clear VF's FUNC_RST_ING */
11176 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11177 		if (ret)
11178 			dev_warn(&hdev->pdev->dev,
11179 				 "clear vf(%u) rst failed %d!\n",
11180 				 vport->vport_id, ret);
11181 	}
11182 }
11183 
11184 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11185 {
11186 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11187 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11188 }
11189 
11190 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11191 {
11192 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11193 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11194 }
11195 
11196 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11197 {
11198 	struct pci_dev *pdev = ae_dev->pdev;
11199 	struct hclge_dev *hdev;
11200 	int ret;
11201 
11202 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11203 	if (!hdev)
11204 		return -ENOMEM;
11205 
11206 	hdev->pdev = pdev;
11207 	hdev->ae_dev = ae_dev;
11208 	hdev->reset_type = HNAE3_NONE_RESET;
11209 	hdev->reset_level = HNAE3_FUNC_RESET;
11210 	ae_dev->priv = hdev;
11211 
11212 	/* HW supprt 2 layer vlan */
11213 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11214 
11215 	mutex_init(&hdev->vport_lock);
11216 	spin_lock_init(&hdev->fd_rule_lock);
11217 	sema_init(&hdev->reset_sem, 1);
11218 
11219 	ret = hclge_pci_init(hdev);
11220 	if (ret)
11221 		goto out;
11222 
11223 	/* Firmware command queue initialize */
11224 	ret = hclge_cmd_queue_init(hdev);
11225 	if (ret)
11226 		goto err_pci_uninit;
11227 
11228 	/* Firmware command initialize */
11229 	ret = hclge_cmd_init(hdev);
11230 	if (ret)
11231 		goto err_cmd_uninit;
11232 
11233 	ret = hclge_get_cap(hdev);
11234 	if (ret)
11235 		goto err_cmd_uninit;
11236 
11237 	ret = hclge_query_dev_specs(hdev);
11238 	if (ret) {
11239 		dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11240 			ret);
11241 		goto err_cmd_uninit;
11242 	}
11243 
11244 	ret = hclge_configure(hdev);
11245 	if (ret) {
11246 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11247 		goto err_cmd_uninit;
11248 	}
11249 
11250 	ret = hclge_init_msi(hdev);
11251 	if (ret) {
11252 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11253 		goto err_cmd_uninit;
11254 	}
11255 
11256 	ret = hclge_misc_irq_init(hdev);
11257 	if (ret)
11258 		goto err_msi_uninit;
11259 
11260 	ret = hclge_alloc_tqps(hdev);
11261 	if (ret) {
11262 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11263 		goto err_msi_irq_uninit;
11264 	}
11265 
11266 	ret = hclge_alloc_vport(hdev);
11267 	if (ret)
11268 		goto err_msi_irq_uninit;
11269 
11270 	ret = hclge_map_tqp(hdev);
11271 	if (ret)
11272 		goto err_msi_irq_uninit;
11273 
11274 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11275 	    !hnae3_dev_phy_imp_supported(hdev)) {
11276 		ret = hclge_mac_mdio_config(hdev);
11277 		if (ret)
11278 			goto err_msi_irq_uninit;
11279 	}
11280 
11281 	ret = hclge_init_umv_space(hdev);
11282 	if (ret)
11283 		goto err_mdiobus_unreg;
11284 
11285 	ret = hclge_mac_init(hdev);
11286 	if (ret) {
11287 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11288 		goto err_mdiobus_unreg;
11289 	}
11290 
11291 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11292 	if (ret) {
11293 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11294 		goto err_mdiobus_unreg;
11295 	}
11296 
11297 	ret = hclge_config_gro(hdev, true);
11298 	if (ret)
11299 		goto err_mdiobus_unreg;
11300 
11301 	ret = hclge_init_vlan_config(hdev);
11302 	if (ret) {
11303 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11304 		goto err_mdiobus_unreg;
11305 	}
11306 
11307 	ret = hclge_tm_schd_init(hdev);
11308 	if (ret) {
11309 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11310 		goto err_mdiobus_unreg;
11311 	}
11312 
11313 	ret = hclge_rss_init_cfg(hdev);
11314 	if (ret) {
11315 		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11316 		goto err_mdiobus_unreg;
11317 	}
11318 
11319 	ret = hclge_rss_init_hw(hdev);
11320 	if (ret) {
11321 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11322 		goto err_mdiobus_unreg;
11323 	}
11324 
11325 	ret = init_mgr_tbl(hdev);
11326 	if (ret) {
11327 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11328 		goto err_mdiobus_unreg;
11329 	}
11330 
11331 	ret = hclge_init_fd_config(hdev);
11332 	if (ret) {
11333 		dev_err(&pdev->dev,
11334 			"fd table init fail, ret=%d\n", ret);
11335 		goto err_mdiobus_unreg;
11336 	}
11337 
11338 	INIT_KFIFO(hdev->mac_tnl_log);
11339 
11340 	hclge_dcb_ops_set(hdev);
11341 
11342 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11343 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11344 
11345 	/* Setup affinity after service timer setup because add_timer_on
11346 	 * is called in affinity notify.
11347 	 */
11348 	hclge_misc_affinity_setup(hdev);
11349 
11350 	hclge_clear_all_event_cause(hdev);
11351 	hclge_clear_resetting_state(hdev);
11352 
11353 	/* Log and clear the hw errors those already occurred */
11354 	hclge_handle_all_hns_hw_errors(ae_dev);
11355 
11356 	/* request delayed reset for the error recovery because an immediate
11357 	 * global reset on a PF affecting pending initialization of other PFs
11358 	 */
11359 	if (ae_dev->hw_err_reset_req) {
11360 		enum hnae3_reset_type reset_level;
11361 
11362 		reset_level = hclge_get_reset_level(ae_dev,
11363 						    &ae_dev->hw_err_reset_req);
11364 		hclge_set_def_reset_request(ae_dev, reset_level);
11365 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11366 	}
11367 
11368 	hclge_init_rxd_adv_layout(hdev);
11369 
11370 	/* Enable MISC vector(vector0) */
11371 	hclge_enable_vector(&hdev->misc_vector, true);
11372 
11373 	hclge_state_init(hdev);
11374 	hdev->last_reset_time = jiffies;
11375 
11376 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11377 		 HCLGE_DRIVER_NAME);
11378 
11379 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11380 
11381 	return 0;
11382 
11383 err_mdiobus_unreg:
11384 	if (hdev->hw.mac.phydev)
11385 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
11386 err_msi_irq_uninit:
11387 	hclge_misc_irq_uninit(hdev);
11388 err_msi_uninit:
11389 	pci_free_irq_vectors(pdev);
11390 err_cmd_uninit:
11391 	hclge_cmd_uninit(hdev);
11392 err_pci_uninit:
11393 	pcim_iounmap(pdev, hdev->hw.io_base);
11394 	pci_clear_master(pdev);
11395 	pci_release_regions(pdev);
11396 	pci_disable_device(pdev);
11397 out:
11398 	mutex_destroy(&hdev->vport_lock);
11399 	return ret;
11400 }
11401 
11402 static void hclge_stats_clear(struct hclge_dev *hdev)
11403 {
11404 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11405 }
11406 
11407 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11408 {
11409 	return hclge_config_switch_param(hdev, vf, enable,
11410 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11411 }
11412 
11413 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11414 {
11415 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11416 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
11417 					  enable, vf);
11418 }
11419 
11420 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11421 {
11422 	int ret;
11423 
11424 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11425 	if (ret) {
11426 		dev_err(&hdev->pdev->dev,
11427 			"Set vf %d mac spoof check %s failed, ret=%d\n",
11428 			vf, enable ? "on" : "off", ret);
11429 		return ret;
11430 	}
11431 
11432 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11433 	if (ret)
11434 		dev_err(&hdev->pdev->dev,
11435 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
11436 			vf, enable ? "on" : "off", ret);
11437 
11438 	return ret;
11439 }
11440 
11441 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11442 				 bool enable)
11443 {
11444 	struct hclge_vport *vport = hclge_get_vport(handle);
11445 	struct hclge_dev *hdev = vport->back;
11446 	u32 new_spoofchk = enable ? 1 : 0;
11447 	int ret;
11448 
11449 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11450 		return -EOPNOTSUPP;
11451 
11452 	vport = hclge_get_vf_vport(hdev, vf);
11453 	if (!vport)
11454 		return -EINVAL;
11455 
11456 	if (vport->vf_info.spoofchk == new_spoofchk)
11457 		return 0;
11458 
11459 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11460 		dev_warn(&hdev->pdev->dev,
11461 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11462 			 vf);
11463 	else if (enable && hclge_is_umv_space_full(vport, true))
11464 		dev_warn(&hdev->pdev->dev,
11465 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11466 			 vf);
11467 
11468 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11469 	if (ret)
11470 		return ret;
11471 
11472 	vport->vf_info.spoofchk = new_spoofchk;
11473 	return 0;
11474 }
11475 
11476 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11477 {
11478 	struct hclge_vport *vport = hdev->vport;
11479 	int ret;
11480 	int i;
11481 
11482 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11483 		return 0;
11484 
11485 	/* resume the vf spoof check state after reset */
11486 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11487 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11488 					       vport->vf_info.spoofchk);
11489 		if (ret)
11490 			return ret;
11491 
11492 		vport++;
11493 	}
11494 
11495 	return 0;
11496 }
11497 
11498 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11499 {
11500 	struct hclge_vport *vport = hclge_get_vport(handle);
11501 	struct hclge_dev *hdev = vport->back;
11502 	u32 new_trusted = enable ? 1 : 0;
11503 
11504 	vport = hclge_get_vf_vport(hdev, vf);
11505 	if (!vport)
11506 		return -EINVAL;
11507 
11508 	if (vport->vf_info.trusted == new_trusted)
11509 		return 0;
11510 
11511 	vport->vf_info.trusted = new_trusted;
11512 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11513 	hclge_task_schedule(hdev, 0);
11514 
11515 	return 0;
11516 }
11517 
11518 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11519 {
11520 	int ret;
11521 	int vf;
11522 
11523 	/* reset vf rate to default value */
11524 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11525 		struct hclge_vport *vport = &hdev->vport[vf];
11526 
11527 		vport->vf_info.max_tx_rate = 0;
11528 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11529 		if (ret)
11530 			dev_err(&hdev->pdev->dev,
11531 				"vf%d failed to reset to default, ret=%d\n",
11532 				vf - HCLGE_VF_VPORT_START_NUM, ret);
11533 	}
11534 }
11535 
11536 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11537 				     int min_tx_rate, int max_tx_rate)
11538 {
11539 	if (min_tx_rate != 0 ||
11540 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11541 		dev_err(&hdev->pdev->dev,
11542 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11543 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11544 		return -EINVAL;
11545 	}
11546 
11547 	return 0;
11548 }
11549 
11550 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11551 			     int min_tx_rate, int max_tx_rate, bool force)
11552 {
11553 	struct hclge_vport *vport = hclge_get_vport(handle);
11554 	struct hclge_dev *hdev = vport->back;
11555 	int ret;
11556 
11557 	ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11558 	if (ret)
11559 		return ret;
11560 
11561 	vport = hclge_get_vf_vport(hdev, vf);
11562 	if (!vport)
11563 		return -EINVAL;
11564 
11565 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11566 		return 0;
11567 
11568 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11569 	if (ret)
11570 		return ret;
11571 
11572 	vport->vf_info.max_tx_rate = max_tx_rate;
11573 
11574 	return 0;
11575 }
11576 
11577 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11578 {
11579 	struct hnae3_handle *handle = &hdev->vport->nic;
11580 	struct hclge_vport *vport;
11581 	int ret;
11582 	int vf;
11583 
11584 	/* resume the vf max_tx_rate after reset */
11585 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11586 		vport = hclge_get_vf_vport(hdev, vf);
11587 		if (!vport)
11588 			return -EINVAL;
11589 
11590 		/* zero means max rate, after reset, firmware already set it to
11591 		 * max rate, so just continue.
11592 		 */
11593 		if (!vport->vf_info.max_tx_rate)
11594 			continue;
11595 
11596 		ret = hclge_set_vf_rate(handle, vf, 0,
11597 					vport->vf_info.max_tx_rate, true);
11598 		if (ret) {
11599 			dev_err(&hdev->pdev->dev,
11600 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
11601 				vf, vport->vf_info.max_tx_rate, ret);
11602 			return ret;
11603 		}
11604 	}
11605 
11606 	return 0;
11607 }
11608 
11609 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11610 {
11611 	struct hclge_vport *vport = hdev->vport;
11612 	int i;
11613 
11614 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11615 		hclge_vport_stop(vport);
11616 		vport++;
11617 	}
11618 }
11619 
11620 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11621 {
11622 	struct hclge_dev *hdev = ae_dev->priv;
11623 	struct pci_dev *pdev = ae_dev->pdev;
11624 	int ret;
11625 
11626 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11627 
11628 	hclge_stats_clear(hdev);
11629 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11630 	 * so here should not clean table in memory.
11631 	 */
11632 	if (hdev->reset_type == HNAE3_IMP_RESET ||
11633 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
11634 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11635 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11636 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11637 		hclge_reset_umv_space(hdev);
11638 	}
11639 
11640 	ret = hclge_cmd_init(hdev);
11641 	if (ret) {
11642 		dev_err(&pdev->dev, "Cmd queue init failed\n");
11643 		return ret;
11644 	}
11645 
11646 	ret = hclge_map_tqp(hdev);
11647 	if (ret) {
11648 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11649 		return ret;
11650 	}
11651 
11652 	ret = hclge_mac_init(hdev);
11653 	if (ret) {
11654 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11655 		return ret;
11656 	}
11657 
11658 	ret = hclge_tp_port_init(hdev);
11659 	if (ret) {
11660 		dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11661 			ret);
11662 		return ret;
11663 	}
11664 
11665 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11666 	if (ret) {
11667 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11668 		return ret;
11669 	}
11670 
11671 	ret = hclge_config_gro(hdev, true);
11672 	if (ret)
11673 		return ret;
11674 
11675 	ret = hclge_init_vlan_config(hdev);
11676 	if (ret) {
11677 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11678 		return ret;
11679 	}
11680 
11681 	ret = hclge_tm_init_hw(hdev, true);
11682 	if (ret) {
11683 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11684 		return ret;
11685 	}
11686 
11687 	ret = hclge_rss_init_hw(hdev);
11688 	if (ret) {
11689 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11690 		return ret;
11691 	}
11692 
11693 	ret = init_mgr_tbl(hdev);
11694 	if (ret) {
11695 		dev_err(&pdev->dev,
11696 			"failed to reinit manager table, ret = %d\n", ret);
11697 		return ret;
11698 	}
11699 
11700 	ret = hclge_init_fd_config(hdev);
11701 	if (ret) {
11702 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11703 		return ret;
11704 	}
11705 
11706 	/* Log and clear the hw errors those already occurred */
11707 	hclge_handle_all_hns_hw_errors(ae_dev);
11708 
11709 	/* Re-enable the hw error interrupts because
11710 	 * the interrupts get disabled on global reset.
11711 	 */
11712 	ret = hclge_config_nic_hw_error(hdev, true);
11713 	if (ret) {
11714 		dev_err(&pdev->dev,
11715 			"fail(%d) to re-enable NIC hw error interrupts\n",
11716 			ret);
11717 		return ret;
11718 	}
11719 
11720 	if (hdev->roce_client) {
11721 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
11722 		if (ret) {
11723 			dev_err(&pdev->dev,
11724 				"fail(%d) to re-enable roce ras interrupts\n",
11725 				ret);
11726 			return ret;
11727 		}
11728 	}
11729 
11730 	hclge_reset_vport_state(hdev);
11731 	ret = hclge_reset_vport_spoofchk(hdev);
11732 	if (ret)
11733 		return ret;
11734 
11735 	ret = hclge_resume_vf_rate(hdev);
11736 	if (ret)
11737 		return ret;
11738 
11739 	hclge_init_rxd_adv_layout(hdev);
11740 
11741 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11742 		 HCLGE_DRIVER_NAME);
11743 
11744 	return 0;
11745 }
11746 
11747 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11748 {
11749 	struct hclge_dev *hdev = ae_dev->priv;
11750 	struct hclge_mac *mac = &hdev->hw.mac;
11751 
11752 	hclge_reset_vf_rate(hdev);
11753 	hclge_clear_vf_vlan(hdev);
11754 	hclge_misc_affinity_teardown(hdev);
11755 	hclge_state_uninit(hdev);
11756 	hclge_uninit_rxd_adv_layout(hdev);
11757 	hclge_uninit_mac_table(hdev);
11758 	hclge_del_all_fd_entries(hdev);
11759 
11760 	if (mac->phydev)
11761 		mdiobus_unregister(mac->mdio_bus);
11762 
11763 	/* Disable MISC vector(vector0) */
11764 	hclge_enable_vector(&hdev->misc_vector, false);
11765 	synchronize_irq(hdev->misc_vector.vector_irq);
11766 
11767 	/* Disable all hw interrupts */
11768 	hclge_config_mac_tnl_int(hdev, false);
11769 	hclge_config_nic_hw_error(hdev, false);
11770 	hclge_config_rocee_ras_interrupt(hdev, false);
11771 
11772 	hclge_cmd_uninit(hdev);
11773 	hclge_misc_irq_uninit(hdev);
11774 	hclge_pci_uninit(hdev);
11775 	mutex_destroy(&hdev->vport_lock);
11776 	hclge_uninit_vport_vlan_table(hdev);
11777 	ae_dev->priv = NULL;
11778 }
11779 
11780 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11781 {
11782 	struct hclge_vport *vport = hclge_get_vport(handle);
11783 	struct hclge_dev *hdev = vport->back;
11784 
11785 	return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11786 }
11787 
11788 static void hclge_get_channels(struct hnae3_handle *handle,
11789 			       struct ethtool_channels *ch)
11790 {
11791 	ch->max_combined = hclge_get_max_channels(handle);
11792 	ch->other_count = 1;
11793 	ch->max_other = 1;
11794 	ch->combined_count = handle->kinfo.rss_size;
11795 }
11796 
11797 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11798 					u16 *alloc_tqps, u16 *max_rss_size)
11799 {
11800 	struct hclge_vport *vport = hclge_get_vport(handle);
11801 	struct hclge_dev *hdev = vport->back;
11802 
11803 	*alloc_tqps = vport->alloc_tqps;
11804 	*max_rss_size = hdev->pf_rss_size_max;
11805 }
11806 
11807 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11808 			      bool rxfh_configured)
11809 {
11810 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11811 	struct hclge_vport *vport = hclge_get_vport(handle);
11812 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11813 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11814 	struct hclge_dev *hdev = vport->back;
11815 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11816 	u16 cur_rss_size = kinfo->rss_size;
11817 	u16 cur_tqps = kinfo->num_tqps;
11818 	u16 tc_valid[HCLGE_MAX_TC_NUM];
11819 	u16 roundup_size;
11820 	u32 *rss_indir;
11821 	unsigned int i;
11822 	int ret;
11823 
11824 	kinfo->req_rss_size = new_tqps_num;
11825 
11826 	ret = hclge_tm_vport_map_update(hdev);
11827 	if (ret) {
11828 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11829 		return ret;
11830 	}
11831 
11832 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
11833 	roundup_size = ilog2(roundup_size);
11834 	/* Set the RSS TC mode according to the new RSS size */
11835 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11836 		tc_valid[i] = 0;
11837 
11838 		if (!(hdev->hw_tc_map & BIT(i)))
11839 			continue;
11840 
11841 		tc_valid[i] = 1;
11842 		tc_size[i] = roundup_size;
11843 		tc_offset[i] = kinfo->rss_size * i;
11844 	}
11845 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11846 	if (ret)
11847 		return ret;
11848 
11849 	/* RSS indirection table has been configured by user */
11850 	if (rxfh_configured)
11851 		goto out;
11852 
11853 	/* Reinitializes the rss indirect table according to the new RSS size */
11854 	rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
11855 			    GFP_KERNEL);
11856 	if (!rss_indir)
11857 		return -ENOMEM;
11858 
11859 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
11860 		rss_indir[i] = i % kinfo->rss_size;
11861 
11862 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11863 	if (ret)
11864 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11865 			ret);
11866 
11867 	kfree(rss_indir);
11868 
11869 out:
11870 	if (!ret)
11871 		dev_info(&hdev->pdev->dev,
11872 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
11873 			 cur_rss_size, kinfo->rss_size,
11874 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
11875 
11876 	return ret;
11877 }
11878 
11879 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11880 			      u32 *regs_num_64_bit)
11881 {
11882 	struct hclge_desc desc;
11883 	u32 total_num;
11884 	int ret;
11885 
11886 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11887 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11888 	if (ret) {
11889 		dev_err(&hdev->pdev->dev,
11890 			"Query register number cmd failed, ret = %d.\n", ret);
11891 		return ret;
11892 	}
11893 
11894 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
11895 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
11896 
11897 	total_num = *regs_num_32_bit + *regs_num_64_bit;
11898 	if (!total_num)
11899 		return -EINVAL;
11900 
11901 	return 0;
11902 }
11903 
11904 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11905 				 void *data)
11906 {
11907 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
11908 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
11909 
11910 	struct hclge_desc *desc;
11911 	u32 *reg_val = data;
11912 	__le32 *desc_data;
11913 	int nodata_num;
11914 	int cmd_num;
11915 	int i, k, n;
11916 	int ret;
11917 
11918 	if (regs_num == 0)
11919 		return 0;
11920 
11921 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11922 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11923 			       HCLGE_32_BIT_REG_RTN_DATANUM);
11924 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11925 	if (!desc)
11926 		return -ENOMEM;
11927 
11928 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11929 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11930 	if (ret) {
11931 		dev_err(&hdev->pdev->dev,
11932 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
11933 		kfree(desc);
11934 		return ret;
11935 	}
11936 
11937 	for (i = 0; i < cmd_num; i++) {
11938 		if (i == 0) {
11939 			desc_data = (__le32 *)(&desc[i].data[0]);
11940 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
11941 		} else {
11942 			desc_data = (__le32 *)(&desc[i]);
11943 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
11944 		}
11945 		for (k = 0; k < n; k++) {
11946 			*reg_val++ = le32_to_cpu(*desc_data++);
11947 
11948 			regs_num--;
11949 			if (!regs_num)
11950 				break;
11951 		}
11952 	}
11953 
11954 	kfree(desc);
11955 	return 0;
11956 }
11957 
11958 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11959 				 void *data)
11960 {
11961 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
11962 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
11963 
11964 	struct hclge_desc *desc;
11965 	u64 *reg_val = data;
11966 	__le64 *desc_data;
11967 	int nodata_len;
11968 	int cmd_num;
11969 	int i, k, n;
11970 	int ret;
11971 
11972 	if (regs_num == 0)
11973 		return 0;
11974 
11975 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11976 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11977 			       HCLGE_64_BIT_REG_RTN_DATANUM);
11978 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11979 	if (!desc)
11980 		return -ENOMEM;
11981 
11982 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11983 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11984 	if (ret) {
11985 		dev_err(&hdev->pdev->dev,
11986 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
11987 		kfree(desc);
11988 		return ret;
11989 	}
11990 
11991 	for (i = 0; i < cmd_num; i++) {
11992 		if (i == 0) {
11993 			desc_data = (__le64 *)(&desc[i].data[0]);
11994 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
11995 		} else {
11996 			desc_data = (__le64 *)(&desc[i]);
11997 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
11998 		}
11999 		for (k = 0; k < n; k++) {
12000 			*reg_val++ = le64_to_cpu(*desc_data++);
12001 
12002 			regs_num--;
12003 			if (!regs_num)
12004 				break;
12005 		}
12006 	}
12007 
12008 	kfree(desc);
12009 	return 0;
12010 }
12011 
12012 #define MAX_SEPARATE_NUM	4
12013 #define SEPARATOR_VALUE		0xFDFCFBFA
12014 #define REG_NUM_PER_LINE	4
12015 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
12016 #define REG_SEPARATOR_LINE	1
12017 #define REG_NUM_REMAIN_MASK	3
12018 
12019 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12020 {
12021 	int i;
12022 
12023 	/* initialize command BD except the last one */
12024 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12025 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12026 					   true);
12027 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12028 	}
12029 
12030 	/* initialize the last command BD */
12031 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12032 
12033 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12034 }
12035 
12036 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12037 				    int *bd_num_list,
12038 				    u32 type_num)
12039 {
12040 	u32 entries_per_desc, desc_index, index, offset, i;
12041 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12042 	int ret;
12043 
12044 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
12045 	if (ret) {
12046 		dev_err(&hdev->pdev->dev,
12047 			"Get dfx bd num fail, status is %d.\n", ret);
12048 		return ret;
12049 	}
12050 
12051 	entries_per_desc = ARRAY_SIZE(desc[0].data);
12052 	for (i = 0; i < type_num; i++) {
12053 		offset = hclge_dfx_bd_offset_list[i];
12054 		index = offset % entries_per_desc;
12055 		desc_index = offset / entries_per_desc;
12056 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12057 	}
12058 
12059 	return ret;
12060 }
12061 
12062 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12063 				  struct hclge_desc *desc_src, int bd_num,
12064 				  enum hclge_opcode_type cmd)
12065 {
12066 	struct hclge_desc *desc = desc_src;
12067 	int i, ret;
12068 
12069 	hclge_cmd_setup_basic_desc(desc, cmd, true);
12070 	for (i = 0; i < bd_num - 1; i++) {
12071 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12072 		desc++;
12073 		hclge_cmd_setup_basic_desc(desc, cmd, true);
12074 	}
12075 
12076 	desc = desc_src;
12077 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12078 	if (ret)
12079 		dev_err(&hdev->pdev->dev,
12080 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12081 			cmd, ret);
12082 
12083 	return ret;
12084 }
12085 
12086 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12087 				    void *data)
12088 {
12089 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12090 	struct hclge_desc *desc = desc_src;
12091 	u32 *reg = data;
12092 
12093 	entries_per_desc = ARRAY_SIZE(desc->data);
12094 	reg_num = entries_per_desc * bd_num;
12095 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12096 	for (i = 0; i < reg_num; i++) {
12097 		index = i % entries_per_desc;
12098 		desc_index = i / entries_per_desc;
12099 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
12100 	}
12101 	for (i = 0; i < separator_num; i++)
12102 		*reg++ = SEPARATOR_VALUE;
12103 
12104 	return reg_num + separator_num;
12105 }
12106 
12107 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12108 {
12109 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12110 	int data_len_per_desc, bd_num, i;
12111 	int *bd_num_list;
12112 	u32 data_len;
12113 	int ret;
12114 
12115 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12116 	if (!bd_num_list)
12117 		return -ENOMEM;
12118 
12119 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12120 	if (ret) {
12121 		dev_err(&hdev->pdev->dev,
12122 			"Get dfx reg bd num fail, status is %d.\n", ret);
12123 		goto out;
12124 	}
12125 
12126 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
12127 	*len = 0;
12128 	for (i = 0; i < dfx_reg_type_num; i++) {
12129 		bd_num = bd_num_list[i];
12130 		data_len = data_len_per_desc * bd_num;
12131 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12132 	}
12133 
12134 out:
12135 	kfree(bd_num_list);
12136 	return ret;
12137 }
12138 
12139 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12140 {
12141 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12142 	int bd_num, bd_num_max, buf_len, i;
12143 	struct hclge_desc *desc_src;
12144 	int *bd_num_list;
12145 	u32 *reg = data;
12146 	int ret;
12147 
12148 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12149 	if (!bd_num_list)
12150 		return -ENOMEM;
12151 
12152 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12153 	if (ret) {
12154 		dev_err(&hdev->pdev->dev,
12155 			"Get dfx reg bd num fail, status is %d.\n", ret);
12156 		goto out;
12157 	}
12158 
12159 	bd_num_max = bd_num_list[0];
12160 	for (i = 1; i < dfx_reg_type_num; i++)
12161 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12162 
12163 	buf_len = sizeof(*desc_src) * bd_num_max;
12164 	desc_src = kzalloc(buf_len, GFP_KERNEL);
12165 	if (!desc_src) {
12166 		ret = -ENOMEM;
12167 		goto out;
12168 	}
12169 
12170 	for (i = 0; i < dfx_reg_type_num; i++) {
12171 		bd_num = bd_num_list[i];
12172 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12173 					     hclge_dfx_reg_opcode_list[i]);
12174 		if (ret) {
12175 			dev_err(&hdev->pdev->dev,
12176 				"Get dfx reg fail, status is %d.\n", ret);
12177 			break;
12178 		}
12179 
12180 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12181 	}
12182 
12183 	kfree(desc_src);
12184 out:
12185 	kfree(bd_num_list);
12186 	return ret;
12187 }
12188 
12189 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12190 			      struct hnae3_knic_private_info *kinfo)
12191 {
12192 #define HCLGE_RING_REG_OFFSET		0x200
12193 #define HCLGE_RING_INT_REG_OFFSET	0x4
12194 
12195 	int i, j, reg_num, separator_num;
12196 	int data_num_sum;
12197 	u32 *reg = data;
12198 
12199 	/* fetching per-PF registers valus from PF PCIe register space */
12200 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12201 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12202 	for (i = 0; i < reg_num; i++)
12203 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12204 	for (i = 0; i < separator_num; i++)
12205 		*reg++ = SEPARATOR_VALUE;
12206 	data_num_sum = reg_num + separator_num;
12207 
12208 	reg_num = ARRAY_SIZE(common_reg_addr_list);
12209 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12210 	for (i = 0; i < reg_num; i++)
12211 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12212 	for (i = 0; i < separator_num; i++)
12213 		*reg++ = SEPARATOR_VALUE;
12214 	data_num_sum += reg_num + separator_num;
12215 
12216 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
12217 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12218 	for (j = 0; j < kinfo->num_tqps; j++) {
12219 		for (i = 0; i < reg_num; i++)
12220 			*reg++ = hclge_read_dev(&hdev->hw,
12221 						ring_reg_addr_list[i] +
12222 						HCLGE_RING_REG_OFFSET * j);
12223 		for (i = 0; i < separator_num; i++)
12224 			*reg++ = SEPARATOR_VALUE;
12225 	}
12226 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12227 
12228 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12229 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12230 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
12231 		for (i = 0; i < reg_num; i++)
12232 			*reg++ = hclge_read_dev(&hdev->hw,
12233 						tqp_intr_reg_addr_list[i] +
12234 						HCLGE_RING_INT_REG_OFFSET * j);
12235 		for (i = 0; i < separator_num; i++)
12236 			*reg++ = SEPARATOR_VALUE;
12237 	}
12238 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12239 
12240 	return data_num_sum;
12241 }
12242 
12243 static int hclge_get_regs_len(struct hnae3_handle *handle)
12244 {
12245 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12246 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12247 	struct hclge_vport *vport = hclge_get_vport(handle);
12248 	struct hclge_dev *hdev = vport->back;
12249 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12250 	int regs_lines_32_bit, regs_lines_64_bit;
12251 	int ret;
12252 
12253 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12254 	if (ret) {
12255 		dev_err(&hdev->pdev->dev,
12256 			"Get register number failed, ret = %d.\n", ret);
12257 		return ret;
12258 	}
12259 
12260 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12261 	if (ret) {
12262 		dev_err(&hdev->pdev->dev,
12263 			"Get dfx reg len failed, ret = %d.\n", ret);
12264 		return ret;
12265 	}
12266 
12267 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12268 		REG_SEPARATOR_LINE;
12269 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12270 		REG_SEPARATOR_LINE;
12271 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12272 		REG_SEPARATOR_LINE;
12273 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12274 		REG_SEPARATOR_LINE;
12275 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12276 		REG_SEPARATOR_LINE;
12277 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12278 		REG_SEPARATOR_LINE;
12279 
12280 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12281 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12282 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12283 }
12284 
12285 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12286 			   void *data)
12287 {
12288 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12289 	struct hclge_vport *vport = hclge_get_vport(handle);
12290 	struct hclge_dev *hdev = vport->back;
12291 	u32 regs_num_32_bit, regs_num_64_bit;
12292 	int i, reg_num, separator_num, ret;
12293 	u32 *reg = data;
12294 
12295 	*version = hdev->fw_version;
12296 
12297 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12298 	if (ret) {
12299 		dev_err(&hdev->pdev->dev,
12300 			"Get register number failed, ret = %d.\n", ret);
12301 		return;
12302 	}
12303 
12304 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12305 
12306 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12307 	if (ret) {
12308 		dev_err(&hdev->pdev->dev,
12309 			"Get 32 bit register failed, ret = %d.\n", ret);
12310 		return;
12311 	}
12312 	reg_num = regs_num_32_bit;
12313 	reg += reg_num;
12314 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12315 	for (i = 0; i < separator_num; i++)
12316 		*reg++ = SEPARATOR_VALUE;
12317 
12318 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12319 	if (ret) {
12320 		dev_err(&hdev->pdev->dev,
12321 			"Get 64 bit register failed, ret = %d.\n", ret);
12322 		return;
12323 	}
12324 	reg_num = regs_num_64_bit * 2;
12325 	reg += reg_num;
12326 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12327 	for (i = 0; i < separator_num; i++)
12328 		*reg++ = SEPARATOR_VALUE;
12329 
12330 	ret = hclge_get_dfx_reg(hdev, reg);
12331 	if (ret)
12332 		dev_err(&hdev->pdev->dev,
12333 			"Get dfx register failed, ret = %d.\n", ret);
12334 }
12335 
12336 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12337 {
12338 	struct hclge_set_led_state_cmd *req;
12339 	struct hclge_desc desc;
12340 	int ret;
12341 
12342 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12343 
12344 	req = (struct hclge_set_led_state_cmd *)desc.data;
12345 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12346 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12347 
12348 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12349 	if (ret)
12350 		dev_err(&hdev->pdev->dev,
12351 			"Send set led state cmd error, ret =%d\n", ret);
12352 
12353 	return ret;
12354 }
12355 
12356 enum hclge_led_status {
12357 	HCLGE_LED_OFF,
12358 	HCLGE_LED_ON,
12359 	HCLGE_LED_NO_CHANGE = 0xFF,
12360 };
12361 
12362 static int hclge_set_led_id(struct hnae3_handle *handle,
12363 			    enum ethtool_phys_id_state status)
12364 {
12365 	struct hclge_vport *vport = hclge_get_vport(handle);
12366 	struct hclge_dev *hdev = vport->back;
12367 
12368 	switch (status) {
12369 	case ETHTOOL_ID_ACTIVE:
12370 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
12371 	case ETHTOOL_ID_INACTIVE:
12372 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12373 	default:
12374 		return -EINVAL;
12375 	}
12376 }
12377 
12378 static void hclge_get_link_mode(struct hnae3_handle *handle,
12379 				unsigned long *supported,
12380 				unsigned long *advertising)
12381 {
12382 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12383 	struct hclge_vport *vport = hclge_get_vport(handle);
12384 	struct hclge_dev *hdev = vport->back;
12385 	unsigned int idx = 0;
12386 
12387 	for (; idx < size; idx++) {
12388 		supported[idx] = hdev->hw.mac.supported[idx];
12389 		advertising[idx] = hdev->hw.mac.advertising[idx];
12390 	}
12391 }
12392 
12393 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12394 {
12395 	struct hclge_vport *vport = hclge_get_vport(handle);
12396 	struct hclge_dev *hdev = vport->back;
12397 
12398 	return hclge_config_gro(hdev, enable);
12399 }
12400 
12401 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12402 {
12403 	struct hclge_vport *vport = &hdev->vport[0];
12404 	struct hnae3_handle *handle = &vport->nic;
12405 	u8 tmp_flags;
12406 	int ret;
12407 	u16 i;
12408 
12409 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12410 		set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12411 		vport->last_promisc_flags = vport->overflow_promisc_flags;
12412 	}
12413 
12414 	if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12415 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12416 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12417 					     tmp_flags & HNAE3_MPE);
12418 		if (!ret) {
12419 			clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12420 				  &vport->state);
12421 			hclge_enable_vlan_filter(handle,
12422 						 tmp_flags & HNAE3_VLAN_FLTR);
12423 		}
12424 	}
12425 
12426 	for (i = 1; i < hdev->num_alloc_vport; i++) {
12427 		bool uc_en = false;
12428 		bool mc_en = false;
12429 		bool bc_en;
12430 
12431 		vport = &hdev->vport[i];
12432 
12433 		if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12434 					&vport->state))
12435 			continue;
12436 
12437 		if (vport->vf_info.trusted) {
12438 			uc_en = vport->vf_info.request_uc_en > 0;
12439 			mc_en = vport->vf_info.request_mc_en > 0;
12440 		}
12441 		bc_en = vport->vf_info.request_bc_en > 0;
12442 
12443 		ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12444 						 mc_en, bc_en);
12445 		if (ret) {
12446 			set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12447 				&vport->state);
12448 			return;
12449 		}
12450 	}
12451 }
12452 
12453 static bool hclge_module_existed(struct hclge_dev *hdev)
12454 {
12455 	struct hclge_desc desc;
12456 	u32 existed;
12457 	int ret;
12458 
12459 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12460 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12461 	if (ret) {
12462 		dev_err(&hdev->pdev->dev,
12463 			"failed to get SFP exist state, ret = %d\n", ret);
12464 		return false;
12465 	}
12466 
12467 	existed = le32_to_cpu(desc.data[0]);
12468 
12469 	return existed != 0;
12470 }
12471 
12472 /* need 6 bds(total 140 bytes) in one reading
12473  * return the number of bytes actually read, 0 means read failed.
12474  */
12475 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12476 				     u32 len, u8 *data)
12477 {
12478 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12479 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12480 	u16 read_len;
12481 	u16 copy_len;
12482 	int ret;
12483 	int i;
12484 
12485 	/* setup all 6 bds to read module eeprom info. */
12486 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12487 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12488 					   true);
12489 
12490 		/* bd0~bd4 need next flag */
12491 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12492 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12493 	}
12494 
12495 	/* setup bd0, this bd contains offset and read length. */
12496 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12497 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12498 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12499 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
12500 
12501 	ret = hclge_cmd_send(&hdev->hw, desc, i);
12502 	if (ret) {
12503 		dev_err(&hdev->pdev->dev,
12504 			"failed to get SFP eeprom info, ret = %d\n", ret);
12505 		return 0;
12506 	}
12507 
12508 	/* copy sfp info from bd0 to out buffer. */
12509 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12510 	memcpy(data, sfp_info_bd0->data, copy_len);
12511 	read_len = copy_len;
12512 
12513 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
12514 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12515 		if (read_len >= len)
12516 			return read_len;
12517 
12518 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12519 		memcpy(data + read_len, desc[i].data, copy_len);
12520 		read_len += copy_len;
12521 	}
12522 
12523 	return read_len;
12524 }
12525 
12526 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12527 				   u32 len, u8 *data)
12528 {
12529 	struct hclge_vport *vport = hclge_get_vport(handle);
12530 	struct hclge_dev *hdev = vport->back;
12531 	u32 read_len = 0;
12532 	u16 data_len;
12533 
12534 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12535 		return -EOPNOTSUPP;
12536 
12537 	if (!hclge_module_existed(hdev))
12538 		return -ENXIO;
12539 
12540 	while (read_len < len) {
12541 		data_len = hclge_get_sfp_eeprom_info(hdev,
12542 						     offset + read_len,
12543 						     len - read_len,
12544 						     data + read_len);
12545 		if (!data_len)
12546 			return -EIO;
12547 
12548 		read_len += data_len;
12549 	}
12550 
12551 	return 0;
12552 }
12553 
12554 static const struct hnae3_ae_ops hclge_ops = {
12555 	.init_ae_dev = hclge_init_ae_dev,
12556 	.uninit_ae_dev = hclge_uninit_ae_dev,
12557 	.reset_prepare = hclge_reset_prepare_general,
12558 	.reset_done = hclge_reset_done,
12559 	.init_client_instance = hclge_init_client_instance,
12560 	.uninit_client_instance = hclge_uninit_client_instance,
12561 	.map_ring_to_vector = hclge_map_ring_to_vector,
12562 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12563 	.get_vector = hclge_get_vector,
12564 	.put_vector = hclge_put_vector,
12565 	.set_promisc_mode = hclge_set_promisc_mode,
12566 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
12567 	.set_loopback = hclge_set_loopback,
12568 	.start = hclge_ae_start,
12569 	.stop = hclge_ae_stop,
12570 	.client_start = hclge_client_start,
12571 	.client_stop = hclge_client_stop,
12572 	.get_status = hclge_get_status,
12573 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
12574 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12575 	.get_media_type = hclge_get_media_type,
12576 	.check_port_speed = hclge_check_port_speed,
12577 	.get_fec = hclge_get_fec,
12578 	.set_fec = hclge_set_fec,
12579 	.get_rss_key_size = hclge_get_rss_key_size,
12580 	.get_rss = hclge_get_rss,
12581 	.set_rss = hclge_set_rss,
12582 	.set_rss_tuple = hclge_set_rss_tuple,
12583 	.get_rss_tuple = hclge_get_rss_tuple,
12584 	.get_tc_size = hclge_get_tc_size,
12585 	.get_mac_addr = hclge_get_mac_addr,
12586 	.set_mac_addr = hclge_set_mac_addr,
12587 	.do_ioctl = hclge_do_ioctl,
12588 	.add_uc_addr = hclge_add_uc_addr,
12589 	.rm_uc_addr = hclge_rm_uc_addr,
12590 	.add_mc_addr = hclge_add_mc_addr,
12591 	.rm_mc_addr = hclge_rm_mc_addr,
12592 	.set_autoneg = hclge_set_autoneg,
12593 	.get_autoneg = hclge_get_autoneg,
12594 	.restart_autoneg = hclge_restart_autoneg,
12595 	.halt_autoneg = hclge_halt_autoneg,
12596 	.get_pauseparam = hclge_get_pauseparam,
12597 	.set_pauseparam = hclge_set_pauseparam,
12598 	.set_mtu = hclge_set_mtu,
12599 	.reset_queue = hclge_reset_tqp,
12600 	.get_stats = hclge_get_stats,
12601 	.get_mac_stats = hclge_get_mac_stat,
12602 	.update_stats = hclge_update_stats,
12603 	.get_strings = hclge_get_strings,
12604 	.get_sset_count = hclge_get_sset_count,
12605 	.get_fw_version = hclge_get_fw_version,
12606 	.get_mdix_mode = hclge_get_mdix_mode,
12607 	.enable_vlan_filter = hclge_enable_vlan_filter,
12608 	.set_vlan_filter = hclge_set_vlan_filter,
12609 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12610 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12611 	.reset_event = hclge_reset_event,
12612 	.get_reset_level = hclge_get_reset_level,
12613 	.set_default_reset_request = hclge_set_def_reset_request,
12614 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12615 	.set_channels = hclge_set_channels,
12616 	.get_channels = hclge_get_channels,
12617 	.get_regs_len = hclge_get_regs_len,
12618 	.get_regs = hclge_get_regs,
12619 	.set_led_id = hclge_set_led_id,
12620 	.get_link_mode = hclge_get_link_mode,
12621 	.add_fd_entry = hclge_add_fd_entry,
12622 	.del_fd_entry = hclge_del_fd_entry,
12623 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12624 	.get_fd_rule_info = hclge_get_fd_rule_info,
12625 	.get_fd_all_rules = hclge_get_all_rules,
12626 	.enable_fd = hclge_enable_fd,
12627 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
12628 	.dbg_read_cmd = hclge_dbg_read_cmd,
12629 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
12630 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
12631 	.ae_dev_resetting = hclge_ae_dev_resetting,
12632 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12633 	.set_gro_en = hclge_gro_en,
12634 	.get_global_queue_id = hclge_covert_handle_qid_global,
12635 	.set_timer_task = hclge_set_timer_task,
12636 	.mac_connect_phy = hclge_mac_connect_phy,
12637 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
12638 	.get_vf_config = hclge_get_vf_config,
12639 	.set_vf_link_state = hclge_set_vf_link_state,
12640 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
12641 	.set_vf_trust = hclge_set_vf_trust,
12642 	.set_vf_rate = hclge_set_vf_rate,
12643 	.set_vf_mac = hclge_set_vf_mac,
12644 	.get_module_eeprom = hclge_get_module_eeprom,
12645 	.get_cmdq_stat = hclge_get_cmdq_stat,
12646 	.add_cls_flower = hclge_add_cls_flower,
12647 	.del_cls_flower = hclge_del_cls_flower,
12648 	.cls_flower_active = hclge_is_cls_flower_active,
12649 	.get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12650 	.set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12651 };
12652 
12653 static struct hnae3_ae_algo ae_algo = {
12654 	.ops = &hclge_ops,
12655 	.pdev_id_table = ae_algo_pci_tbl,
12656 };
12657 
12658 static int hclge_init(void)
12659 {
12660 	pr_info("%s is initializing\n", HCLGE_NAME);
12661 
12662 	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
12663 	if (!hclge_wq) {
12664 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12665 		return -ENOMEM;
12666 	}
12667 
12668 	hnae3_register_ae_algo(&ae_algo);
12669 
12670 	return 0;
12671 }
12672 
12673 static void hclge_exit(void)
12674 {
12675 	hnae3_unregister_ae_algo(&ae_algo);
12676 	destroy_workqueue(hclge_wq);
12677 }
12678 module_init(hclge_init);
12679 module_exit(hclge_exit);
12680 
12681 MODULE_LICENSE("GPL");
12682 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12683 MODULE_DESCRIPTION("HCLGE Driver");
12684 MODULE_VERSION(HCLGE_MOD_VERSION);
12685