1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256U
31 #define HCLGE_BUF_MUL_BY	2
32 #define HCLGE_BUF_DIV_BY	2
33 #define NEED_RESERVE_TC_NUM	2
34 #define BUF_MAX_PERCENT		100
35 #define BUF_RESERVE_PERCENT	90
36 
37 #define HCLGE_RESET_MAX_FAIL_CNT	5
38 #define HCLGE_RESET_SYNC_TIME		100
39 #define HCLGE_PF_RESET_SYNC_TIME	20
40 #define HCLGE_PF_RESET_SYNC_CNT		1500
41 
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55 
56 #define HCLGE_LINK_STATUS_MS	10
57 
58 #define HCLGE_VF_VPORT_START_NUM	1
59 
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 						   unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 
75 static struct hnae3_ae_algo ae_algo;
76 
77 static struct workqueue_struct *hclge_wq;
78 
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 	/* required last entry */
88 	{0, }
89 };
90 
91 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
92 
93 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94 					 HCLGE_CMDQ_TX_ADDR_H_REG,
95 					 HCLGE_CMDQ_TX_DEPTH_REG,
96 					 HCLGE_CMDQ_TX_TAIL_REG,
97 					 HCLGE_CMDQ_TX_HEAD_REG,
98 					 HCLGE_CMDQ_RX_ADDR_L_REG,
99 					 HCLGE_CMDQ_RX_ADDR_H_REG,
100 					 HCLGE_CMDQ_RX_DEPTH_REG,
101 					 HCLGE_CMDQ_RX_TAIL_REG,
102 					 HCLGE_CMDQ_RX_HEAD_REG,
103 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
104 					 HCLGE_CMDQ_INTR_STS_REG,
105 					 HCLGE_CMDQ_INTR_EN_REG,
106 					 HCLGE_CMDQ_INTR_GEN_REG};
107 
108 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109 					   HCLGE_VECTOR0_OTER_EN_REG,
110 					   HCLGE_MISC_RESET_STS_REG,
111 					   HCLGE_MISC_VECTOR_INT_STS,
112 					   HCLGE_GLOBAL_RESET_REG,
113 					   HCLGE_FUN_RST_ING,
114 					   HCLGE_GRO_EN_REG};
115 
116 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117 					 HCLGE_RING_RX_ADDR_H_REG,
118 					 HCLGE_RING_RX_BD_NUM_REG,
119 					 HCLGE_RING_RX_BD_LENGTH_REG,
120 					 HCLGE_RING_RX_MERGE_EN_REG,
121 					 HCLGE_RING_RX_TAIL_REG,
122 					 HCLGE_RING_RX_HEAD_REG,
123 					 HCLGE_RING_RX_FBD_NUM_REG,
124 					 HCLGE_RING_RX_OFFSET_REG,
125 					 HCLGE_RING_RX_FBD_OFFSET_REG,
126 					 HCLGE_RING_RX_STASH_REG,
127 					 HCLGE_RING_RX_BD_ERR_REG,
128 					 HCLGE_RING_TX_ADDR_L_REG,
129 					 HCLGE_RING_TX_ADDR_H_REG,
130 					 HCLGE_RING_TX_BD_NUM_REG,
131 					 HCLGE_RING_TX_PRIORITY_REG,
132 					 HCLGE_RING_TX_TC_REG,
133 					 HCLGE_RING_TX_MERGE_EN_REG,
134 					 HCLGE_RING_TX_TAIL_REG,
135 					 HCLGE_RING_TX_HEAD_REG,
136 					 HCLGE_RING_TX_FBD_NUM_REG,
137 					 HCLGE_RING_TX_OFFSET_REG,
138 					 HCLGE_RING_TX_EBD_NUM_REG,
139 					 HCLGE_RING_TX_EBD_OFFSET_REG,
140 					 HCLGE_RING_TX_BD_ERR_REG,
141 					 HCLGE_RING_EN_REG};
142 
143 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144 					     HCLGE_TQP_INTR_GL0_REG,
145 					     HCLGE_TQP_INTR_GL1_REG,
146 					     HCLGE_TQP_INTR_GL2_REG,
147 					     HCLGE_TQP_INTR_RL_REG};
148 
149 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
150 	"App    Loopback test",
151 	"Serdes serial Loopback test",
152 	"Serdes parallel Loopback test",
153 	"Phy    Loopback test"
154 };
155 
156 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157 	{"mac_tx_mac_pause_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159 	{"mac_rx_mac_pause_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
161 	{"mac_tx_control_pkt_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163 	{"mac_rx_control_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165 	{"mac_tx_pfc_pkt_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
167 	{"mac_tx_pfc_pri0_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169 	{"mac_tx_pfc_pri1_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171 	{"mac_tx_pfc_pri2_pkt_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173 	{"mac_tx_pfc_pri3_pkt_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175 	{"mac_tx_pfc_pri4_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177 	{"mac_tx_pfc_pri5_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179 	{"mac_tx_pfc_pri6_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181 	{"mac_tx_pfc_pri7_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
183 	{"mac_rx_pfc_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
185 	{"mac_rx_pfc_pri0_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187 	{"mac_rx_pfc_pri1_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189 	{"mac_rx_pfc_pri2_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191 	{"mac_rx_pfc_pri3_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193 	{"mac_rx_pfc_pri4_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195 	{"mac_rx_pfc_pri5_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197 	{"mac_rx_pfc_pri6_pkt_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199 	{"mac_rx_pfc_pri7_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201 	{"mac_tx_total_pkt_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203 	{"mac_tx_total_oct_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205 	{"mac_tx_good_pkt_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207 	{"mac_tx_bad_pkt_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209 	{"mac_tx_good_oct_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211 	{"mac_tx_bad_oct_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213 	{"mac_tx_uni_pkt_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215 	{"mac_tx_multi_pkt_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217 	{"mac_tx_broad_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219 	{"mac_tx_undersize_pkt_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
221 	{"mac_tx_oversize_pkt_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
223 	{"mac_tx_64_oct_pkt_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225 	{"mac_tx_65_127_oct_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227 	{"mac_tx_128_255_oct_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229 	{"mac_tx_256_511_oct_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231 	{"mac_tx_512_1023_oct_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233 	{"mac_tx_1024_1518_oct_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
235 	{"mac_tx_1519_2047_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237 	{"mac_tx_2048_4095_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239 	{"mac_tx_4096_8191_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
241 	{"mac_tx_8192_9216_oct_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243 	{"mac_tx_9217_12287_oct_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245 	{"mac_tx_12288_16383_oct_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247 	{"mac_tx_1519_max_good_pkt_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249 	{"mac_tx_1519_max_bad_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
251 	{"mac_rx_total_pkt_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253 	{"mac_rx_total_oct_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255 	{"mac_rx_good_pkt_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257 	{"mac_rx_bad_pkt_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259 	{"mac_rx_good_oct_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261 	{"mac_rx_bad_oct_num",
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263 	{"mac_rx_uni_pkt_num",
264 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265 	{"mac_rx_multi_pkt_num",
266 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267 	{"mac_rx_broad_pkt_num",
268 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269 	{"mac_rx_undersize_pkt_num",
270 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
271 	{"mac_rx_oversize_pkt_num",
272 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
273 	{"mac_rx_64_oct_pkt_num",
274 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275 	{"mac_rx_65_127_oct_pkt_num",
276 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277 	{"mac_rx_128_255_oct_pkt_num",
278 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279 	{"mac_rx_256_511_oct_pkt_num",
280 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281 	{"mac_rx_512_1023_oct_pkt_num",
282 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283 	{"mac_rx_1024_1518_oct_pkt_num",
284 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
285 	{"mac_rx_1519_2047_oct_pkt_num",
286 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287 	{"mac_rx_2048_4095_oct_pkt_num",
288 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289 	{"mac_rx_4096_8191_oct_pkt_num",
290 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
291 	{"mac_rx_8192_9216_oct_pkt_num",
292 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293 	{"mac_rx_9217_12287_oct_pkt_num",
294 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295 	{"mac_rx_12288_16383_oct_pkt_num",
296 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297 	{"mac_rx_1519_max_good_pkt_num",
298 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299 	{"mac_rx_1519_max_bad_pkt_num",
300 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
301 
302 	{"mac_tx_fragment_pkt_num",
303 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304 	{"mac_tx_undermin_pkt_num",
305 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306 	{"mac_tx_jabber_pkt_num",
307 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308 	{"mac_tx_err_all_pkt_num",
309 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310 	{"mac_tx_from_app_good_pkt_num",
311 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312 	{"mac_tx_from_app_bad_pkt_num",
313 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314 	{"mac_rx_fragment_pkt_num",
315 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316 	{"mac_rx_undermin_pkt_num",
317 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318 	{"mac_rx_jabber_pkt_num",
319 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320 	{"mac_rx_fcs_err_pkt_num",
321 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322 	{"mac_rx_send_app_good_pkt_num",
323 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324 	{"mac_rx_send_app_bad_pkt_num",
325 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
326 };
327 
328 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
329 	{
330 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
331 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
332 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
333 		.i_port_bitmap = 0x1,
334 	},
335 };
336 
337 static const u8 hclge_hash_key[] = {
338 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343 };
344 
345 static const u32 hclge_dfx_bd_offset_list[] = {
346 	HCLGE_DFX_BIOS_BD_OFFSET,
347 	HCLGE_DFX_SSU_0_BD_OFFSET,
348 	HCLGE_DFX_SSU_1_BD_OFFSET,
349 	HCLGE_DFX_IGU_BD_OFFSET,
350 	HCLGE_DFX_RPU_0_BD_OFFSET,
351 	HCLGE_DFX_RPU_1_BD_OFFSET,
352 	HCLGE_DFX_NCSI_BD_OFFSET,
353 	HCLGE_DFX_RTC_BD_OFFSET,
354 	HCLGE_DFX_PPP_BD_OFFSET,
355 	HCLGE_DFX_RCB_BD_OFFSET,
356 	HCLGE_DFX_TQP_BD_OFFSET,
357 	HCLGE_DFX_SSU_2_BD_OFFSET
358 };
359 
360 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
362 	HCLGE_OPC_DFX_SSU_REG_0,
363 	HCLGE_OPC_DFX_SSU_REG_1,
364 	HCLGE_OPC_DFX_IGU_EGU_REG,
365 	HCLGE_OPC_DFX_RPU_REG_0,
366 	HCLGE_OPC_DFX_RPU_REG_1,
367 	HCLGE_OPC_DFX_NCSI_REG,
368 	HCLGE_OPC_DFX_RTC_REG,
369 	HCLGE_OPC_DFX_PPP_REG,
370 	HCLGE_OPC_DFX_RCB_REG,
371 	HCLGE_OPC_DFX_TQP_REG,
372 	HCLGE_OPC_DFX_SSU_REG_2
373 };
374 
375 static const struct key_info meta_data_key_info[] = {
376 	{ PACKET_TYPE_ID, 6},
377 	{ IP_FRAGEMENT, 1},
378 	{ ROCE_TYPE, 1},
379 	{ NEXT_KEY, 5},
380 	{ VLAN_NUMBER, 2},
381 	{ SRC_VPORT, 12},
382 	{ DST_VPORT, 12},
383 	{ TUNNEL_PACKET, 1},
384 };
385 
386 static const struct key_info tuple_key_info[] = {
387 	{ OUTER_DST_MAC, 48},
388 	{ OUTER_SRC_MAC, 48},
389 	{ OUTER_VLAN_TAG_FST, 16},
390 	{ OUTER_VLAN_TAG_SEC, 16},
391 	{ OUTER_ETH_TYPE, 16},
392 	{ OUTER_L2_RSV, 16},
393 	{ OUTER_IP_TOS, 8},
394 	{ OUTER_IP_PROTO, 8},
395 	{ OUTER_SRC_IP, 32},
396 	{ OUTER_DST_IP, 32},
397 	{ OUTER_L3_RSV, 16},
398 	{ OUTER_SRC_PORT, 16},
399 	{ OUTER_DST_PORT, 16},
400 	{ OUTER_L4_RSV, 32},
401 	{ OUTER_TUN_VNI, 24},
402 	{ OUTER_TUN_FLOW_ID, 8},
403 	{ INNER_DST_MAC, 48},
404 	{ INNER_SRC_MAC, 48},
405 	{ INNER_VLAN_TAG_FST, 16},
406 	{ INNER_VLAN_TAG_SEC, 16},
407 	{ INNER_ETH_TYPE, 16},
408 	{ INNER_L2_RSV, 16},
409 	{ INNER_IP_TOS, 8},
410 	{ INNER_IP_PROTO, 8},
411 	{ INNER_SRC_IP, 32},
412 	{ INNER_DST_IP, 32},
413 	{ INNER_L3_RSV, 16},
414 	{ INNER_SRC_PORT, 16},
415 	{ INNER_DST_PORT, 16},
416 	{ INNER_L4_RSV, 32},
417 };
418 
419 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
420 {
421 #define HCLGE_MAC_CMD_NUM 21
422 
423 	u64 *data = (u64 *)(&hdev->mac_stats);
424 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
425 	__le64 *desc_data;
426 	int i, k, n;
427 	int ret;
428 
429 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
431 	if (ret) {
432 		dev_err(&hdev->pdev->dev,
433 			"Get MAC pkt stats fail, status = %d.\n", ret);
434 
435 		return ret;
436 	}
437 
438 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
439 		/* for special opcode 0032, only the first desc has the head */
440 		if (unlikely(i == 0)) {
441 			desc_data = (__le64 *)(&desc[i].data[0]);
442 			n = HCLGE_RD_FIRST_STATS_NUM;
443 		} else {
444 			desc_data = (__le64 *)(&desc[i]);
445 			n = HCLGE_RD_OTHER_STATS_NUM;
446 		}
447 
448 		for (k = 0; k < n; k++) {
449 			*data += le64_to_cpu(*desc_data);
450 			data++;
451 			desc_data++;
452 		}
453 	}
454 
455 	return 0;
456 }
457 
458 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
459 {
460 	u64 *data = (u64 *)(&hdev->mac_stats);
461 	struct hclge_desc *desc;
462 	__le64 *desc_data;
463 	u16 i, k, n;
464 	int ret;
465 
466 	/* This may be called inside atomic sections,
467 	 * so GFP_ATOMIC is more suitalbe here
468 	 */
469 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
470 	if (!desc)
471 		return -ENOMEM;
472 
473 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
475 	if (ret) {
476 		kfree(desc);
477 		return ret;
478 	}
479 
480 	for (i = 0; i < desc_num; i++) {
481 		/* for special opcode 0034, only the first desc has the head */
482 		if (i == 0) {
483 			desc_data = (__le64 *)(&desc[i].data[0]);
484 			n = HCLGE_RD_FIRST_STATS_NUM;
485 		} else {
486 			desc_data = (__le64 *)(&desc[i]);
487 			n = HCLGE_RD_OTHER_STATS_NUM;
488 		}
489 
490 		for (k = 0; k < n; k++) {
491 			*data += le64_to_cpu(*desc_data);
492 			data++;
493 			desc_data++;
494 		}
495 	}
496 
497 	kfree(desc);
498 
499 	return 0;
500 }
501 
502 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
503 {
504 	struct hclge_desc desc;
505 	__le32 *desc_data;
506 	u32 reg_num;
507 	int ret;
508 
509 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
511 	if (ret)
512 		return ret;
513 
514 	desc_data = (__le32 *)(&desc.data[0]);
515 	reg_num = le32_to_cpu(*desc_data);
516 
517 	*desc_num = 1 + ((reg_num - 3) >> 2) +
518 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
519 
520 	return 0;
521 }
522 
523 static int hclge_mac_update_stats(struct hclge_dev *hdev)
524 {
525 	u32 desc_num;
526 	int ret;
527 
528 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
529 
530 	/* The firmware supports the new statistics acquisition method */
531 	if (!ret)
532 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
533 	else if (ret == -EOPNOTSUPP)
534 		ret = hclge_mac_update_stats_defective(hdev);
535 	else
536 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
537 
538 	return ret;
539 }
540 
541 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
542 {
543 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544 	struct hclge_vport *vport = hclge_get_vport(handle);
545 	struct hclge_dev *hdev = vport->back;
546 	struct hnae3_queue *queue;
547 	struct hclge_desc desc[1];
548 	struct hclge_tqp *tqp;
549 	int ret, i;
550 
551 	for (i = 0; i < kinfo->num_tqps; i++) {
552 		queue = handle->kinfo.tqp[i];
553 		tqp = container_of(queue, struct hclge_tqp, q);
554 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
555 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
556 					   true);
557 
558 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
559 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
560 		if (ret) {
561 			dev_err(&hdev->pdev->dev,
562 				"Query tqp stat fail, status = %d,queue = %d\n",
563 				ret, i);
564 			return ret;
565 		}
566 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
567 			le32_to_cpu(desc[0].data[1]);
568 	}
569 
570 	for (i = 0; i < kinfo->num_tqps; i++) {
571 		queue = handle->kinfo.tqp[i];
572 		tqp = container_of(queue, struct hclge_tqp, q);
573 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
574 		hclge_cmd_setup_basic_desc(&desc[0],
575 					   HCLGE_OPC_QUERY_TX_STATS,
576 					   true);
577 
578 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
579 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
580 		if (ret) {
581 			dev_err(&hdev->pdev->dev,
582 				"Query tqp stat fail, status = %d,queue = %d\n",
583 				ret, i);
584 			return ret;
585 		}
586 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
587 			le32_to_cpu(desc[0].data[1]);
588 	}
589 
590 	return 0;
591 }
592 
593 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
594 {
595 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596 	struct hclge_tqp *tqp;
597 	u64 *buff = data;
598 	int i;
599 
600 	for (i = 0; i < kinfo->num_tqps; i++) {
601 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
603 	}
604 
605 	for (i = 0; i < kinfo->num_tqps; i++) {
606 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
607 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
608 	}
609 
610 	return buff;
611 }
612 
613 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
614 {
615 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
616 
617 	/* each tqp has TX & RX two queues */
618 	return kinfo->num_tqps * (2);
619 }
620 
621 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
622 {
623 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
624 	u8 *buff = data;
625 	int i = 0;
626 
627 	for (i = 0; i < kinfo->num_tqps; i++) {
628 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629 			struct hclge_tqp, q);
630 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
631 			 tqp->index);
632 		buff = buff + ETH_GSTRING_LEN;
633 	}
634 
635 	for (i = 0; i < kinfo->num_tqps; i++) {
636 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637 			struct hclge_tqp, q);
638 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
639 			 tqp->index);
640 		buff = buff + ETH_GSTRING_LEN;
641 	}
642 
643 	return buff;
644 }
645 
646 static u64 *hclge_comm_get_stats(const void *comm_stats,
647 				 const struct hclge_comm_stats_str strs[],
648 				 int size, u64 *data)
649 {
650 	u64 *buf = data;
651 	u32 i;
652 
653 	for (i = 0; i < size; i++)
654 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
655 
656 	return buf + size;
657 }
658 
659 static u8 *hclge_comm_get_strings(u32 stringset,
660 				  const struct hclge_comm_stats_str strs[],
661 				  int size, u8 *data)
662 {
663 	char *buff = (char *)data;
664 	u32 i;
665 
666 	if (stringset != ETH_SS_STATS)
667 		return buff;
668 
669 	for (i = 0; i < size; i++) {
670 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
671 		buff = buff + ETH_GSTRING_LEN;
672 	}
673 
674 	return (u8 *)buff;
675 }
676 
677 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
678 {
679 	struct hnae3_handle *handle;
680 	int status;
681 
682 	handle = &hdev->vport[0].nic;
683 	if (handle->client) {
684 		status = hclge_tqps_update_stats(handle);
685 		if (status) {
686 			dev_err(&hdev->pdev->dev,
687 				"Update TQPS stats fail, status = %d.\n",
688 				status);
689 		}
690 	}
691 
692 	status = hclge_mac_update_stats(hdev);
693 	if (status)
694 		dev_err(&hdev->pdev->dev,
695 			"Update MAC stats fail, status = %d.\n", status);
696 }
697 
698 static void hclge_update_stats(struct hnae3_handle *handle,
699 			       struct net_device_stats *net_stats)
700 {
701 	struct hclge_vport *vport = hclge_get_vport(handle);
702 	struct hclge_dev *hdev = vport->back;
703 	int status;
704 
705 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706 		return;
707 
708 	status = hclge_mac_update_stats(hdev);
709 	if (status)
710 		dev_err(&hdev->pdev->dev,
711 			"Update MAC stats fail, status = %d.\n",
712 			status);
713 
714 	status = hclge_tqps_update_stats(handle);
715 	if (status)
716 		dev_err(&hdev->pdev->dev,
717 			"Update TQPS stats fail, status = %d.\n",
718 			status);
719 
720 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
721 }
722 
723 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
724 {
725 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726 		HNAE3_SUPPORT_PHY_LOOPBACK |\
727 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
729 
730 	struct hclge_vport *vport = hclge_get_vport(handle);
731 	struct hclge_dev *hdev = vport->back;
732 	int count = 0;
733 
734 	/* Loopback test support rules:
735 	 * mac: only GE mode support
736 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
737 	 * phy: only support when phy device exist on board
738 	 */
739 	if (stringset == ETH_SS_TEST) {
740 		/* clear loopback bit flags at first */
741 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
742 		if (hdev->pdev->revision >= 0x21 ||
743 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
746 			count += 1;
747 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
748 		}
749 
750 		count += 2;
751 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
753 
754 		if (hdev->hw.mac.phydev) {
755 			count += 1;
756 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
757 		}
758 
759 	} else if (stringset == ETH_SS_STATS) {
760 		count = ARRAY_SIZE(g_mac_stats_string) +
761 			hclge_tqps_get_sset_count(handle, stringset);
762 	}
763 
764 	return count;
765 }
766 
767 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
768 			      u8 *data)
769 {
770 	u8 *p = (char *)data;
771 	int size;
772 
773 	if (stringset == ETH_SS_STATS) {
774 		size = ARRAY_SIZE(g_mac_stats_string);
775 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
776 					   size, p);
777 		p = hclge_tqps_get_strings(handle, p);
778 	} else if (stringset == ETH_SS_TEST) {
779 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
780 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
781 			       ETH_GSTRING_LEN);
782 			p += ETH_GSTRING_LEN;
783 		}
784 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
785 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
786 			       ETH_GSTRING_LEN);
787 			p += ETH_GSTRING_LEN;
788 		}
789 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
790 			memcpy(p,
791 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
792 			       ETH_GSTRING_LEN);
793 			p += ETH_GSTRING_LEN;
794 		}
795 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
796 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
797 			       ETH_GSTRING_LEN);
798 			p += ETH_GSTRING_LEN;
799 		}
800 	}
801 }
802 
803 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
804 {
805 	struct hclge_vport *vport = hclge_get_vport(handle);
806 	struct hclge_dev *hdev = vport->back;
807 	u64 *p;
808 
809 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
810 				 ARRAY_SIZE(g_mac_stats_string), data);
811 	p = hclge_tqps_get_stats(handle, p);
812 }
813 
814 static void hclge_get_mac_stat(struct hnae3_handle *handle,
815 			       struct hns3_mac_stats *mac_stats)
816 {
817 	struct hclge_vport *vport = hclge_get_vport(handle);
818 	struct hclge_dev *hdev = vport->back;
819 
820 	hclge_update_stats(handle, NULL);
821 
822 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
823 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
824 }
825 
826 static int hclge_parse_func_status(struct hclge_dev *hdev,
827 				   struct hclge_func_status_cmd *status)
828 {
829 #define HCLGE_MAC_ID_MASK	0xF
830 
831 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
832 		return -EINVAL;
833 
834 	/* Set the pf to main pf */
835 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
836 		hdev->flag |= HCLGE_FLAG_MAIN;
837 	else
838 		hdev->flag &= ~HCLGE_FLAG_MAIN;
839 
840 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
841 	return 0;
842 }
843 
844 static int hclge_query_function_status(struct hclge_dev *hdev)
845 {
846 #define HCLGE_QUERY_MAX_CNT	5
847 
848 	struct hclge_func_status_cmd *req;
849 	struct hclge_desc desc;
850 	int timeout = 0;
851 	int ret;
852 
853 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
854 	req = (struct hclge_func_status_cmd *)desc.data;
855 
856 	do {
857 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
858 		if (ret) {
859 			dev_err(&hdev->pdev->dev,
860 				"query function status failed %d.\n", ret);
861 			return ret;
862 		}
863 
864 		/* Check pf reset is done */
865 		if (req->pf_state)
866 			break;
867 		usleep_range(1000, 2000);
868 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
869 
870 	return hclge_parse_func_status(hdev, req);
871 }
872 
873 static int hclge_query_pf_resource(struct hclge_dev *hdev)
874 {
875 	struct hclge_pf_res_cmd *req;
876 	struct hclge_desc desc;
877 	int ret;
878 
879 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
880 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
881 	if (ret) {
882 		dev_err(&hdev->pdev->dev,
883 			"query pf resource failed %d.\n", ret);
884 		return ret;
885 	}
886 
887 	req = (struct hclge_pf_res_cmd *)desc.data;
888 	hdev->num_tqps = le16_to_cpu(req->tqp_num);
889 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
890 
891 	if (req->tx_buf_size)
892 		hdev->tx_buf_size =
893 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
894 	else
895 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
896 
897 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
898 
899 	if (req->dv_buf_size)
900 		hdev->dv_buf_size =
901 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
902 	else
903 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
904 
905 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
906 
907 	if (hnae3_dev_roce_supported(hdev)) {
908 		hdev->roce_base_msix_offset =
909 		hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
910 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
911 		hdev->num_roce_msi =
912 		hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
913 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
914 
915 		/* nic's msix numbers is always equals to the roce's. */
916 		hdev->num_nic_msi = hdev->num_roce_msi;
917 
918 		/* PF should have NIC vectors and Roce vectors,
919 		 * NIC vectors are queued before Roce vectors.
920 		 */
921 		hdev->num_msi = hdev->num_roce_msi +
922 				hdev->roce_base_msix_offset;
923 	} else {
924 		hdev->num_msi =
925 		hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
926 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
927 
928 		hdev->num_nic_msi = hdev->num_msi;
929 	}
930 
931 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
932 		dev_err(&hdev->pdev->dev,
933 			"Just %u msi resources, not enough for pf(min:2).\n",
934 			hdev->num_nic_msi);
935 		return -EINVAL;
936 	}
937 
938 	return 0;
939 }
940 
941 static int hclge_parse_speed(int speed_cmd, int *speed)
942 {
943 	switch (speed_cmd) {
944 	case 6:
945 		*speed = HCLGE_MAC_SPEED_10M;
946 		break;
947 	case 7:
948 		*speed = HCLGE_MAC_SPEED_100M;
949 		break;
950 	case 0:
951 		*speed = HCLGE_MAC_SPEED_1G;
952 		break;
953 	case 1:
954 		*speed = HCLGE_MAC_SPEED_10G;
955 		break;
956 	case 2:
957 		*speed = HCLGE_MAC_SPEED_25G;
958 		break;
959 	case 3:
960 		*speed = HCLGE_MAC_SPEED_40G;
961 		break;
962 	case 4:
963 		*speed = HCLGE_MAC_SPEED_50G;
964 		break;
965 	case 5:
966 		*speed = HCLGE_MAC_SPEED_100G;
967 		break;
968 	default:
969 		return -EINVAL;
970 	}
971 
972 	return 0;
973 }
974 
975 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
976 {
977 	struct hclge_vport *vport = hclge_get_vport(handle);
978 	struct hclge_dev *hdev = vport->back;
979 	u32 speed_ability = hdev->hw.mac.speed_ability;
980 	u32 speed_bit = 0;
981 
982 	switch (speed) {
983 	case HCLGE_MAC_SPEED_10M:
984 		speed_bit = HCLGE_SUPPORT_10M_BIT;
985 		break;
986 	case HCLGE_MAC_SPEED_100M:
987 		speed_bit = HCLGE_SUPPORT_100M_BIT;
988 		break;
989 	case HCLGE_MAC_SPEED_1G:
990 		speed_bit = HCLGE_SUPPORT_1G_BIT;
991 		break;
992 	case HCLGE_MAC_SPEED_10G:
993 		speed_bit = HCLGE_SUPPORT_10G_BIT;
994 		break;
995 	case HCLGE_MAC_SPEED_25G:
996 		speed_bit = HCLGE_SUPPORT_25G_BIT;
997 		break;
998 	case HCLGE_MAC_SPEED_40G:
999 		speed_bit = HCLGE_SUPPORT_40G_BIT;
1000 		break;
1001 	case HCLGE_MAC_SPEED_50G:
1002 		speed_bit = HCLGE_SUPPORT_50G_BIT;
1003 		break;
1004 	case HCLGE_MAC_SPEED_100G:
1005 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1006 		break;
1007 	default:
1008 		return -EINVAL;
1009 	}
1010 
1011 	if (speed_bit & speed_ability)
1012 		return 0;
1013 
1014 	return -EINVAL;
1015 }
1016 
1017 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1018 {
1019 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1020 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1021 				 mac->supported);
1022 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1023 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1024 				 mac->supported);
1025 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1026 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1027 				 mac->supported);
1028 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1029 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1030 				 mac->supported);
1031 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1032 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1033 				 mac->supported);
1034 }
1035 
1036 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1037 {
1038 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1040 				 mac->supported);
1041 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1042 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1043 				 mac->supported);
1044 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1045 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1046 				 mac->supported);
1047 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1048 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1049 				 mac->supported);
1050 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1052 				 mac->supported);
1053 }
1054 
1055 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1056 {
1057 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1058 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1059 				 mac->supported);
1060 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1061 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1062 				 mac->supported);
1063 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1064 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1065 				 mac->supported);
1066 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1067 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1068 				 mac->supported);
1069 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1070 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1071 				 mac->supported);
1072 }
1073 
1074 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1075 {
1076 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1077 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1078 				 mac->supported);
1079 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1080 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1081 				 mac->supported);
1082 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1083 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1084 				 mac->supported);
1085 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1086 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1087 				 mac->supported);
1088 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1089 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1090 				 mac->supported);
1091 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1092 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1093 				 mac->supported);
1094 }
1095 
1096 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1097 {
1098 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1099 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1100 
1101 	switch (mac->speed) {
1102 	case HCLGE_MAC_SPEED_10G:
1103 	case HCLGE_MAC_SPEED_40G:
1104 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1105 				 mac->supported);
1106 		mac->fec_ability =
1107 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1108 		break;
1109 	case HCLGE_MAC_SPEED_25G:
1110 	case HCLGE_MAC_SPEED_50G:
1111 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1112 				 mac->supported);
1113 		mac->fec_ability =
1114 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1115 			BIT(HNAE3_FEC_AUTO);
1116 		break;
1117 	case HCLGE_MAC_SPEED_100G:
1118 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1119 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1120 		break;
1121 	default:
1122 		mac->fec_ability = 0;
1123 		break;
1124 	}
1125 }
1126 
1127 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1128 					u8 speed_ability)
1129 {
1130 	struct hclge_mac *mac = &hdev->hw.mac;
1131 
1132 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1133 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1134 				 mac->supported);
1135 
1136 	hclge_convert_setting_sr(mac, speed_ability);
1137 	hclge_convert_setting_lr(mac, speed_ability);
1138 	hclge_convert_setting_cr(mac, speed_ability);
1139 	if (hdev->pdev->revision >= 0x21)
1140 		hclge_convert_setting_fec(mac);
1141 
1142 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1143 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1144 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1145 }
1146 
1147 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1148 					    u8 speed_ability)
1149 {
1150 	struct hclge_mac *mac = &hdev->hw.mac;
1151 
1152 	hclge_convert_setting_kr(mac, speed_ability);
1153 	if (hdev->pdev->revision >= 0x21)
1154 		hclge_convert_setting_fec(mac);
1155 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1156 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1157 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1158 }
1159 
1160 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1161 					 u8 speed_ability)
1162 {
1163 	unsigned long *supported = hdev->hw.mac.supported;
1164 
1165 	/* default to support all speed for GE port */
1166 	if (!speed_ability)
1167 		speed_ability = HCLGE_SUPPORT_GE;
1168 
1169 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1171 				 supported);
1172 
1173 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1174 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1175 				 supported);
1176 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1177 				 supported);
1178 	}
1179 
1180 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1181 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1182 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1183 	}
1184 
1185 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1186 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1187 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1188 	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1189 }
1190 
1191 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1192 {
1193 	u8 media_type = hdev->hw.mac.media_type;
1194 
1195 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1196 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1197 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1198 		hclge_parse_copper_link_mode(hdev, speed_ability);
1199 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1200 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1201 }
1202 
1203 static u32 hclge_get_max_speed(u8 speed_ability)
1204 {
1205 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1206 		return HCLGE_MAC_SPEED_100G;
1207 
1208 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1209 		return HCLGE_MAC_SPEED_50G;
1210 
1211 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1212 		return HCLGE_MAC_SPEED_40G;
1213 
1214 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1215 		return HCLGE_MAC_SPEED_25G;
1216 
1217 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1218 		return HCLGE_MAC_SPEED_10G;
1219 
1220 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1221 		return HCLGE_MAC_SPEED_1G;
1222 
1223 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1224 		return HCLGE_MAC_SPEED_100M;
1225 
1226 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1227 		return HCLGE_MAC_SPEED_10M;
1228 
1229 	return HCLGE_MAC_SPEED_1G;
1230 }
1231 
1232 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1233 {
1234 	struct hclge_cfg_param_cmd *req;
1235 	u64 mac_addr_tmp_high;
1236 	u64 mac_addr_tmp;
1237 	unsigned int i;
1238 
1239 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1240 
1241 	/* get the configuration */
1242 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243 					      HCLGE_CFG_VMDQ_M,
1244 					      HCLGE_CFG_VMDQ_S);
1245 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1246 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1247 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1248 					    HCLGE_CFG_TQP_DESC_N_M,
1249 					    HCLGE_CFG_TQP_DESC_N_S);
1250 
1251 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1252 					HCLGE_CFG_PHY_ADDR_M,
1253 					HCLGE_CFG_PHY_ADDR_S);
1254 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1255 					  HCLGE_CFG_MEDIA_TP_M,
1256 					  HCLGE_CFG_MEDIA_TP_S);
1257 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1258 					  HCLGE_CFG_RX_BUF_LEN_M,
1259 					  HCLGE_CFG_RX_BUF_LEN_S);
1260 	/* get mac_address */
1261 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1262 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1263 					    HCLGE_CFG_MAC_ADDR_H_M,
1264 					    HCLGE_CFG_MAC_ADDR_H_S);
1265 
1266 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1267 
1268 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1269 					     HCLGE_CFG_DEFAULT_SPEED_M,
1270 					     HCLGE_CFG_DEFAULT_SPEED_S);
1271 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1272 					    HCLGE_CFG_RSS_SIZE_M,
1273 					    HCLGE_CFG_RSS_SIZE_S);
1274 
1275 	for (i = 0; i < ETH_ALEN; i++)
1276 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1277 
1278 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1279 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1280 
1281 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282 					     HCLGE_CFG_SPEED_ABILITY_M,
1283 					     HCLGE_CFG_SPEED_ABILITY_S);
1284 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1285 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1286 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1287 	if (!cfg->umv_space)
1288 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1289 }
1290 
1291 /* hclge_get_cfg: query the static parameter from flash
1292  * @hdev: pointer to struct hclge_dev
1293  * @hcfg: the config structure to be getted
1294  */
1295 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1296 {
1297 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1298 	struct hclge_cfg_param_cmd *req;
1299 	unsigned int i;
1300 	int ret;
1301 
1302 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1303 		u32 offset = 0;
1304 
1305 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1306 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1307 					   true);
1308 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1309 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1310 		/* Len should be united by 4 bytes when send to hardware */
1311 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1312 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1313 		req->offset = cpu_to_le32(offset);
1314 	}
1315 
1316 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1317 	if (ret) {
1318 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1319 		return ret;
1320 	}
1321 
1322 	hclge_parse_cfg(hcfg, desc);
1323 
1324 	return 0;
1325 }
1326 
1327 static int hclge_get_cap(struct hclge_dev *hdev)
1328 {
1329 	int ret;
1330 
1331 	ret = hclge_query_function_status(hdev);
1332 	if (ret) {
1333 		dev_err(&hdev->pdev->dev,
1334 			"query function status error %d.\n", ret);
1335 		return ret;
1336 	}
1337 
1338 	/* get pf resource */
1339 	return hclge_query_pf_resource(hdev);
1340 }
1341 
1342 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1343 {
1344 #define HCLGE_MIN_TX_DESC	64
1345 #define HCLGE_MIN_RX_DESC	64
1346 
1347 	if (!is_kdump_kernel())
1348 		return;
1349 
1350 	dev_info(&hdev->pdev->dev,
1351 		 "Running kdump kernel. Using minimal resources\n");
1352 
1353 	/* minimal queue pairs equals to the number of vports */
1354 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1355 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1356 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1357 }
1358 
1359 static int hclge_configure(struct hclge_dev *hdev)
1360 {
1361 	struct hclge_cfg cfg;
1362 	unsigned int i;
1363 	int ret;
1364 
1365 	ret = hclge_get_cfg(hdev, &cfg);
1366 	if (ret)
1367 		return ret;
1368 
1369 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1370 	hdev->base_tqp_pid = 0;
1371 	hdev->rss_size_max = cfg.rss_size_max;
1372 	hdev->rx_buf_len = cfg.rx_buf_len;
1373 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1374 	hdev->hw.mac.media_type = cfg.media_type;
1375 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1376 	hdev->num_tx_desc = cfg.tqp_desc_num;
1377 	hdev->num_rx_desc = cfg.tqp_desc_num;
1378 	hdev->tm_info.num_pg = 1;
1379 	hdev->tc_max = cfg.tc_num;
1380 	hdev->tm_info.hw_pfc_map = 0;
1381 	hdev->wanted_umv_size = cfg.umv_space;
1382 
1383 	if (hnae3_dev_fd_supported(hdev)) {
1384 		hdev->fd_en = true;
1385 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1386 	}
1387 
1388 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1389 	if (ret) {
1390 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1391 			cfg.default_speed, ret);
1392 		return ret;
1393 	}
1394 
1395 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1396 
1397 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1398 
1399 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1400 	    (hdev->tc_max < 1)) {
1401 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1402 			 hdev->tc_max);
1403 		hdev->tc_max = 1;
1404 	}
1405 
1406 	/* Dev does not support DCB */
1407 	if (!hnae3_dev_dcb_supported(hdev)) {
1408 		hdev->tc_max = 1;
1409 		hdev->pfc_max = 0;
1410 	} else {
1411 		hdev->pfc_max = hdev->tc_max;
1412 	}
1413 
1414 	hdev->tm_info.num_tc = 1;
1415 
1416 	/* Currently not support uncontiuous tc */
1417 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1418 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1419 
1420 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1421 
1422 	hclge_init_kdump_kernel_config(hdev);
1423 
1424 	/* Set the init affinity based on pci func number */
1425 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1426 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1427 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1428 			&hdev->affinity_mask);
1429 
1430 	return ret;
1431 }
1432 
1433 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1434 			    u16 tso_mss_max)
1435 {
1436 	struct hclge_cfg_tso_status_cmd *req;
1437 	struct hclge_desc desc;
1438 
1439 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1440 
1441 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1442 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1443 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1444 
1445 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1446 }
1447 
1448 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1449 {
1450 	struct hclge_cfg_gro_status_cmd *req;
1451 	struct hclge_desc desc;
1452 	int ret;
1453 
1454 	if (!hnae3_dev_gro_supported(hdev))
1455 		return 0;
1456 
1457 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1458 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1459 
1460 	req->gro_en = en ? 1 : 0;
1461 
1462 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1463 	if (ret)
1464 		dev_err(&hdev->pdev->dev,
1465 			"GRO hardware config cmd failed, ret = %d\n", ret);
1466 
1467 	return ret;
1468 }
1469 
1470 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1471 {
1472 	struct hclge_tqp *tqp;
1473 	int i;
1474 
1475 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1476 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1477 	if (!hdev->htqp)
1478 		return -ENOMEM;
1479 
1480 	tqp = hdev->htqp;
1481 
1482 	for (i = 0; i < hdev->num_tqps; i++) {
1483 		tqp->dev = &hdev->pdev->dev;
1484 		tqp->index = i;
1485 
1486 		tqp->q.ae_algo = &ae_algo;
1487 		tqp->q.buf_size = hdev->rx_buf_len;
1488 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1489 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1490 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1491 			i * HCLGE_TQP_REG_SIZE;
1492 
1493 		tqp++;
1494 	}
1495 
1496 	return 0;
1497 }
1498 
1499 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1500 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1501 {
1502 	struct hclge_tqp_map_cmd *req;
1503 	struct hclge_desc desc;
1504 	int ret;
1505 
1506 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1507 
1508 	req = (struct hclge_tqp_map_cmd *)desc.data;
1509 	req->tqp_id = cpu_to_le16(tqp_pid);
1510 	req->tqp_vf = func_id;
1511 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1512 	if (!is_pf)
1513 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1514 	req->tqp_vid = cpu_to_le16(tqp_vid);
1515 
1516 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1517 	if (ret)
1518 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1519 
1520 	return ret;
1521 }
1522 
1523 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1524 {
1525 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1526 	struct hclge_dev *hdev = vport->back;
1527 	int i, alloced;
1528 
1529 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1530 	     alloced < num_tqps; i++) {
1531 		if (!hdev->htqp[i].alloced) {
1532 			hdev->htqp[i].q.handle = &vport->nic;
1533 			hdev->htqp[i].q.tqp_index = alloced;
1534 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1535 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1536 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1537 			hdev->htqp[i].alloced = true;
1538 			alloced++;
1539 		}
1540 	}
1541 	vport->alloc_tqps = alloced;
1542 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1543 				vport->alloc_tqps / hdev->tm_info.num_tc);
1544 
1545 	/* ensure one to one mapping between irq and queue at default */
1546 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1547 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1548 
1549 	return 0;
1550 }
1551 
1552 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1553 			    u16 num_tx_desc, u16 num_rx_desc)
1554 
1555 {
1556 	struct hnae3_handle *nic = &vport->nic;
1557 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1558 	struct hclge_dev *hdev = vport->back;
1559 	int ret;
1560 
1561 	kinfo->num_tx_desc = num_tx_desc;
1562 	kinfo->num_rx_desc = num_rx_desc;
1563 
1564 	kinfo->rx_buf_len = hdev->rx_buf_len;
1565 
1566 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1567 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1568 	if (!kinfo->tqp)
1569 		return -ENOMEM;
1570 
1571 	ret = hclge_assign_tqp(vport, num_tqps);
1572 	if (ret)
1573 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1574 
1575 	return ret;
1576 }
1577 
1578 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1579 				  struct hclge_vport *vport)
1580 {
1581 	struct hnae3_handle *nic = &vport->nic;
1582 	struct hnae3_knic_private_info *kinfo;
1583 	u16 i;
1584 
1585 	kinfo = &nic->kinfo;
1586 	for (i = 0; i < vport->alloc_tqps; i++) {
1587 		struct hclge_tqp *q =
1588 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1589 		bool is_pf;
1590 		int ret;
1591 
1592 		is_pf = !(vport->vport_id);
1593 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1594 					     i, is_pf);
1595 		if (ret)
1596 			return ret;
1597 	}
1598 
1599 	return 0;
1600 }
1601 
1602 static int hclge_map_tqp(struct hclge_dev *hdev)
1603 {
1604 	struct hclge_vport *vport = hdev->vport;
1605 	u16 i, num_vport;
1606 
1607 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1608 	for (i = 0; i < num_vport; i++)	{
1609 		int ret;
1610 
1611 		ret = hclge_map_tqp_to_vport(hdev, vport);
1612 		if (ret)
1613 			return ret;
1614 
1615 		vport++;
1616 	}
1617 
1618 	return 0;
1619 }
1620 
1621 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1622 {
1623 	struct hnae3_handle *nic = &vport->nic;
1624 	struct hclge_dev *hdev = vport->back;
1625 	int ret;
1626 
1627 	nic->pdev = hdev->pdev;
1628 	nic->ae_algo = &ae_algo;
1629 	nic->numa_node_mask = hdev->numa_node_mask;
1630 
1631 	ret = hclge_knic_setup(vport, num_tqps,
1632 			       hdev->num_tx_desc, hdev->num_rx_desc);
1633 	if (ret)
1634 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1635 
1636 	return ret;
1637 }
1638 
1639 static int hclge_alloc_vport(struct hclge_dev *hdev)
1640 {
1641 	struct pci_dev *pdev = hdev->pdev;
1642 	struct hclge_vport *vport;
1643 	u32 tqp_main_vport;
1644 	u32 tqp_per_vport;
1645 	int num_vport, i;
1646 	int ret;
1647 
1648 	/* We need to alloc a vport for main NIC of PF */
1649 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1650 
1651 	if (hdev->num_tqps < num_vport) {
1652 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1653 			hdev->num_tqps, num_vport);
1654 		return -EINVAL;
1655 	}
1656 
1657 	/* Alloc the same number of TQPs for every vport */
1658 	tqp_per_vport = hdev->num_tqps / num_vport;
1659 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1660 
1661 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1662 			     GFP_KERNEL);
1663 	if (!vport)
1664 		return -ENOMEM;
1665 
1666 	hdev->vport = vport;
1667 	hdev->num_alloc_vport = num_vport;
1668 
1669 	if (IS_ENABLED(CONFIG_PCI_IOV))
1670 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1671 
1672 	for (i = 0; i < num_vport; i++) {
1673 		vport->back = hdev;
1674 		vport->vport_id = i;
1675 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1676 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1677 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1678 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1679 		INIT_LIST_HEAD(&vport->vlan_list);
1680 		INIT_LIST_HEAD(&vport->uc_mac_list);
1681 		INIT_LIST_HEAD(&vport->mc_mac_list);
1682 		spin_lock_init(&vport->mac_list_lock);
1683 
1684 		if (i == 0)
1685 			ret = hclge_vport_setup(vport, tqp_main_vport);
1686 		else
1687 			ret = hclge_vport_setup(vport, tqp_per_vport);
1688 		if (ret) {
1689 			dev_err(&pdev->dev,
1690 				"vport setup failed for vport %d, %d\n",
1691 				i, ret);
1692 			return ret;
1693 		}
1694 
1695 		vport++;
1696 	}
1697 
1698 	return 0;
1699 }
1700 
1701 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1702 				    struct hclge_pkt_buf_alloc *buf_alloc)
1703 {
1704 /* TX buffer size is unit by 128 byte */
1705 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1706 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1707 	struct hclge_tx_buff_alloc_cmd *req;
1708 	struct hclge_desc desc;
1709 	int ret;
1710 	u8 i;
1711 
1712 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1713 
1714 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1715 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1716 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1717 
1718 		req->tx_pkt_buff[i] =
1719 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1720 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1721 	}
1722 
1723 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1724 	if (ret)
1725 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1726 			ret);
1727 
1728 	return ret;
1729 }
1730 
1731 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1732 				 struct hclge_pkt_buf_alloc *buf_alloc)
1733 {
1734 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1735 
1736 	if (ret)
1737 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1738 
1739 	return ret;
1740 }
1741 
1742 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1743 {
1744 	unsigned int i;
1745 	u32 cnt = 0;
1746 
1747 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1748 		if (hdev->hw_tc_map & BIT(i))
1749 			cnt++;
1750 	return cnt;
1751 }
1752 
1753 /* Get the number of pfc enabled TCs, which have private buffer */
1754 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1755 				  struct hclge_pkt_buf_alloc *buf_alloc)
1756 {
1757 	struct hclge_priv_buf *priv;
1758 	unsigned int i;
1759 	int cnt = 0;
1760 
1761 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1762 		priv = &buf_alloc->priv_buf[i];
1763 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1764 		    priv->enable)
1765 			cnt++;
1766 	}
1767 
1768 	return cnt;
1769 }
1770 
1771 /* Get the number of pfc disabled TCs, which have private buffer */
1772 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1773 				     struct hclge_pkt_buf_alloc *buf_alloc)
1774 {
1775 	struct hclge_priv_buf *priv;
1776 	unsigned int i;
1777 	int cnt = 0;
1778 
1779 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1780 		priv = &buf_alloc->priv_buf[i];
1781 		if (hdev->hw_tc_map & BIT(i) &&
1782 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1783 		    priv->enable)
1784 			cnt++;
1785 	}
1786 
1787 	return cnt;
1788 }
1789 
1790 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1791 {
1792 	struct hclge_priv_buf *priv;
1793 	u32 rx_priv = 0;
1794 	int i;
1795 
1796 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1797 		priv = &buf_alloc->priv_buf[i];
1798 		if (priv->enable)
1799 			rx_priv += priv->buf_size;
1800 	}
1801 	return rx_priv;
1802 }
1803 
1804 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1805 {
1806 	u32 i, total_tx_size = 0;
1807 
1808 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1809 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1810 
1811 	return total_tx_size;
1812 }
1813 
1814 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1815 				struct hclge_pkt_buf_alloc *buf_alloc,
1816 				u32 rx_all)
1817 {
1818 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1819 	u32 tc_num = hclge_get_tc_num(hdev);
1820 	u32 shared_buf, aligned_mps;
1821 	u32 rx_priv;
1822 	int i;
1823 
1824 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1825 
1826 	if (hnae3_dev_dcb_supported(hdev))
1827 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1828 					hdev->dv_buf_size;
1829 	else
1830 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1831 					+ hdev->dv_buf_size;
1832 
1833 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1834 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1835 			     HCLGE_BUF_SIZE_UNIT);
1836 
1837 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1838 	if (rx_all < rx_priv + shared_std)
1839 		return false;
1840 
1841 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1842 	buf_alloc->s_buf.buf_size = shared_buf;
1843 	if (hnae3_dev_dcb_supported(hdev)) {
1844 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1845 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1846 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1847 				  HCLGE_BUF_SIZE_UNIT);
1848 	} else {
1849 		buf_alloc->s_buf.self.high = aligned_mps +
1850 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1851 		buf_alloc->s_buf.self.low = aligned_mps;
1852 	}
1853 
1854 	if (hnae3_dev_dcb_supported(hdev)) {
1855 		hi_thrd = shared_buf - hdev->dv_buf_size;
1856 
1857 		if (tc_num <= NEED_RESERVE_TC_NUM)
1858 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1859 					/ BUF_MAX_PERCENT;
1860 
1861 		if (tc_num)
1862 			hi_thrd = hi_thrd / tc_num;
1863 
1864 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1865 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1866 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1867 	} else {
1868 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1869 		lo_thrd = aligned_mps;
1870 	}
1871 
1872 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1873 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1874 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1875 	}
1876 
1877 	return true;
1878 }
1879 
1880 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1881 				struct hclge_pkt_buf_alloc *buf_alloc)
1882 {
1883 	u32 i, total_size;
1884 
1885 	total_size = hdev->pkt_buf_size;
1886 
1887 	/* alloc tx buffer for all enabled tc */
1888 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1889 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1890 
1891 		if (hdev->hw_tc_map & BIT(i)) {
1892 			if (total_size < hdev->tx_buf_size)
1893 				return -ENOMEM;
1894 
1895 			priv->tx_buf_size = hdev->tx_buf_size;
1896 		} else {
1897 			priv->tx_buf_size = 0;
1898 		}
1899 
1900 		total_size -= priv->tx_buf_size;
1901 	}
1902 
1903 	return 0;
1904 }
1905 
1906 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1907 				  struct hclge_pkt_buf_alloc *buf_alloc)
1908 {
1909 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1910 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1911 	unsigned int i;
1912 
1913 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1914 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1915 
1916 		priv->enable = 0;
1917 		priv->wl.low = 0;
1918 		priv->wl.high = 0;
1919 		priv->buf_size = 0;
1920 
1921 		if (!(hdev->hw_tc_map & BIT(i)))
1922 			continue;
1923 
1924 		priv->enable = 1;
1925 
1926 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1927 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1928 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
1929 						HCLGE_BUF_SIZE_UNIT);
1930 		} else {
1931 			priv->wl.low = 0;
1932 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1933 					aligned_mps;
1934 		}
1935 
1936 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1937 	}
1938 
1939 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1940 }
1941 
1942 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1943 					  struct hclge_pkt_buf_alloc *buf_alloc)
1944 {
1945 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1946 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1947 	int i;
1948 
1949 	/* let the last to be cleared first */
1950 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1951 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1952 		unsigned int mask = BIT((unsigned int)i);
1953 
1954 		if (hdev->hw_tc_map & mask &&
1955 		    !(hdev->tm_info.hw_pfc_map & mask)) {
1956 			/* Clear the no pfc TC private buffer */
1957 			priv->wl.low = 0;
1958 			priv->wl.high = 0;
1959 			priv->buf_size = 0;
1960 			priv->enable = 0;
1961 			no_pfc_priv_num--;
1962 		}
1963 
1964 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1965 		    no_pfc_priv_num == 0)
1966 			break;
1967 	}
1968 
1969 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1970 }
1971 
1972 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1973 					struct hclge_pkt_buf_alloc *buf_alloc)
1974 {
1975 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1976 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1977 	int i;
1978 
1979 	/* let the last to be cleared first */
1980 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1981 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1982 		unsigned int mask = BIT((unsigned int)i);
1983 
1984 		if (hdev->hw_tc_map & mask &&
1985 		    hdev->tm_info.hw_pfc_map & mask) {
1986 			/* Reduce the number of pfc TC with private buffer */
1987 			priv->wl.low = 0;
1988 			priv->enable = 0;
1989 			priv->wl.high = 0;
1990 			priv->buf_size = 0;
1991 			pfc_priv_num--;
1992 		}
1993 
1994 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1995 		    pfc_priv_num == 0)
1996 			break;
1997 	}
1998 
1999 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2000 }
2001 
2002 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2003 				      struct hclge_pkt_buf_alloc *buf_alloc)
2004 {
2005 #define COMPENSATE_BUFFER	0x3C00
2006 #define COMPENSATE_HALF_MPS_NUM	5
2007 #define PRIV_WL_GAP		0x1800
2008 
2009 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2010 	u32 tc_num = hclge_get_tc_num(hdev);
2011 	u32 half_mps = hdev->mps >> 1;
2012 	u32 min_rx_priv;
2013 	unsigned int i;
2014 
2015 	if (tc_num)
2016 		rx_priv = rx_priv / tc_num;
2017 
2018 	if (tc_num <= NEED_RESERVE_TC_NUM)
2019 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2020 
2021 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2022 			COMPENSATE_HALF_MPS_NUM * half_mps;
2023 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2024 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2025 
2026 	if (rx_priv < min_rx_priv)
2027 		return false;
2028 
2029 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2030 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2031 
2032 		priv->enable = 0;
2033 		priv->wl.low = 0;
2034 		priv->wl.high = 0;
2035 		priv->buf_size = 0;
2036 
2037 		if (!(hdev->hw_tc_map & BIT(i)))
2038 			continue;
2039 
2040 		priv->enable = 1;
2041 		priv->buf_size = rx_priv;
2042 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2043 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2044 	}
2045 
2046 	buf_alloc->s_buf.buf_size = 0;
2047 
2048 	return true;
2049 }
2050 
2051 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2052  * @hdev: pointer to struct hclge_dev
2053  * @buf_alloc: pointer to buffer calculation data
2054  * @return: 0: calculate sucessful, negative: fail
2055  */
2056 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2057 				struct hclge_pkt_buf_alloc *buf_alloc)
2058 {
2059 	/* When DCB is not supported, rx private buffer is not allocated. */
2060 	if (!hnae3_dev_dcb_supported(hdev)) {
2061 		u32 rx_all = hdev->pkt_buf_size;
2062 
2063 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2064 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2065 			return -ENOMEM;
2066 
2067 		return 0;
2068 	}
2069 
2070 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2071 		return 0;
2072 
2073 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2074 		return 0;
2075 
2076 	/* try to decrease the buffer size */
2077 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2078 		return 0;
2079 
2080 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2081 		return 0;
2082 
2083 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2084 		return 0;
2085 
2086 	return -ENOMEM;
2087 }
2088 
2089 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2090 				   struct hclge_pkt_buf_alloc *buf_alloc)
2091 {
2092 	struct hclge_rx_priv_buff_cmd *req;
2093 	struct hclge_desc desc;
2094 	int ret;
2095 	int i;
2096 
2097 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2098 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2099 
2100 	/* Alloc private buffer TCs */
2101 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2102 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2103 
2104 		req->buf_num[i] =
2105 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2106 		req->buf_num[i] |=
2107 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2108 	}
2109 
2110 	req->shared_buf =
2111 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2112 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2113 
2114 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2115 	if (ret)
2116 		dev_err(&hdev->pdev->dev,
2117 			"rx private buffer alloc cmd failed %d\n", ret);
2118 
2119 	return ret;
2120 }
2121 
2122 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2123 				   struct hclge_pkt_buf_alloc *buf_alloc)
2124 {
2125 	struct hclge_rx_priv_wl_buf *req;
2126 	struct hclge_priv_buf *priv;
2127 	struct hclge_desc desc[2];
2128 	int i, j;
2129 	int ret;
2130 
2131 	for (i = 0; i < 2; i++) {
2132 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2133 					   false);
2134 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2135 
2136 		/* The first descriptor set the NEXT bit to 1 */
2137 		if (i == 0)
2138 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2139 		else
2140 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2141 
2142 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2143 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2144 
2145 			priv = &buf_alloc->priv_buf[idx];
2146 			req->tc_wl[j].high =
2147 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2148 			req->tc_wl[j].high |=
2149 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2150 			req->tc_wl[j].low =
2151 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2152 			req->tc_wl[j].low |=
2153 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2154 		}
2155 	}
2156 
2157 	/* Send 2 descriptor at one time */
2158 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2159 	if (ret)
2160 		dev_err(&hdev->pdev->dev,
2161 			"rx private waterline config cmd failed %d\n",
2162 			ret);
2163 	return ret;
2164 }
2165 
2166 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2167 				    struct hclge_pkt_buf_alloc *buf_alloc)
2168 {
2169 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2170 	struct hclge_rx_com_thrd *req;
2171 	struct hclge_desc desc[2];
2172 	struct hclge_tc_thrd *tc;
2173 	int i, j;
2174 	int ret;
2175 
2176 	for (i = 0; i < 2; i++) {
2177 		hclge_cmd_setup_basic_desc(&desc[i],
2178 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2179 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2180 
2181 		/* The first descriptor set the NEXT bit to 1 */
2182 		if (i == 0)
2183 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2184 		else
2185 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2186 
2187 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2188 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2189 
2190 			req->com_thrd[j].high =
2191 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2192 			req->com_thrd[j].high |=
2193 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2194 			req->com_thrd[j].low =
2195 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2196 			req->com_thrd[j].low |=
2197 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2198 		}
2199 	}
2200 
2201 	/* Send 2 descriptors at one time */
2202 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2203 	if (ret)
2204 		dev_err(&hdev->pdev->dev,
2205 			"common threshold config cmd failed %d\n", ret);
2206 	return ret;
2207 }
2208 
2209 static int hclge_common_wl_config(struct hclge_dev *hdev,
2210 				  struct hclge_pkt_buf_alloc *buf_alloc)
2211 {
2212 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2213 	struct hclge_rx_com_wl *req;
2214 	struct hclge_desc desc;
2215 	int ret;
2216 
2217 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2218 
2219 	req = (struct hclge_rx_com_wl *)desc.data;
2220 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2221 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2222 
2223 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2224 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2225 
2226 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2227 	if (ret)
2228 		dev_err(&hdev->pdev->dev,
2229 			"common waterline config cmd failed %d\n", ret);
2230 
2231 	return ret;
2232 }
2233 
2234 int hclge_buffer_alloc(struct hclge_dev *hdev)
2235 {
2236 	struct hclge_pkt_buf_alloc *pkt_buf;
2237 	int ret;
2238 
2239 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2240 	if (!pkt_buf)
2241 		return -ENOMEM;
2242 
2243 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2244 	if (ret) {
2245 		dev_err(&hdev->pdev->dev,
2246 			"could not calc tx buffer size for all TCs %d\n", ret);
2247 		goto out;
2248 	}
2249 
2250 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2251 	if (ret) {
2252 		dev_err(&hdev->pdev->dev,
2253 			"could not alloc tx buffers %d\n", ret);
2254 		goto out;
2255 	}
2256 
2257 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2258 	if (ret) {
2259 		dev_err(&hdev->pdev->dev,
2260 			"could not calc rx priv buffer size for all TCs %d\n",
2261 			ret);
2262 		goto out;
2263 	}
2264 
2265 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2266 	if (ret) {
2267 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2268 			ret);
2269 		goto out;
2270 	}
2271 
2272 	if (hnae3_dev_dcb_supported(hdev)) {
2273 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2274 		if (ret) {
2275 			dev_err(&hdev->pdev->dev,
2276 				"could not configure rx private waterline %d\n",
2277 				ret);
2278 			goto out;
2279 		}
2280 
2281 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2282 		if (ret) {
2283 			dev_err(&hdev->pdev->dev,
2284 				"could not configure common threshold %d\n",
2285 				ret);
2286 			goto out;
2287 		}
2288 	}
2289 
2290 	ret = hclge_common_wl_config(hdev, pkt_buf);
2291 	if (ret)
2292 		dev_err(&hdev->pdev->dev,
2293 			"could not configure common waterline %d\n", ret);
2294 
2295 out:
2296 	kfree(pkt_buf);
2297 	return ret;
2298 }
2299 
2300 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2301 {
2302 	struct hnae3_handle *roce = &vport->roce;
2303 	struct hnae3_handle *nic = &vport->nic;
2304 
2305 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2306 
2307 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2308 	    vport->back->num_msi_left == 0)
2309 		return -EINVAL;
2310 
2311 	roce->rinfo.base_vector = vport->back->roce_base_vector;
2312 
2313 	roce->rinfo.netdev = nic->kinfo.netdev;
2314 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
2315 
2316 	roce->pdev = nic->pdev;
2317 	roce->ae_algo = nic->ae_algo;
2318 	roce->numa_node_mask = nic->numa_node_mask;
2319 
2320 	return 0;
2321 }
2322 
2323 static int hclge_init_msi(struct hclge_dev *hdev)
2324 {
2325 	struct pci_dev *pdev = hdev->pdev;
2326 	int vectors;
2327 	int i;
2328 
2329 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2330 					hdev->num_msi,
2331 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2332 	if (vectors < 0) {
2333 		dev_err(&pdev->dev,
2334 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2335 			vectors);
2336 		return vectors;
2337 	}
2338 	if (vectors < hdev->num_msi)
2339 		dev_warn(&hdev->pdev->dev,
2340 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2341 			 hdev->num_msi, vectors);
2342 
2343 	hdev->num_msi = vectors;
2344 	hdev->num_msi_left = vectors;
2345 
2346 	hdev->base_msi_vector = pdev->irq;
2347 	hdev->roce_base_vector = hdev->base_msi_vector +
2348 				hdev->roce_base_msix_offset;
2349 
2350 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2351 					   sizeof(u16), GFP_KERNEL);
2352 	if (!hdev->vector_status) {
2353 		pci_free_irq_vectors(pdev);
2354 		return -ENOMEM;
2355 	}
2356 
2357 	for (i = 0; i < hdev->num_msi; i++)
2358 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2359 
2360 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2361 					sizeof(int), GFP_KERNEL);
2362 	if (!hdev->vector_irq) {
2363 		pci_free_irq_vectors(pdev);
2364 		return -ENOMEM;
2365 	}
2366 
2367 	return 0;
2368 }
2369 
2370 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2371 {
2372 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2373 		duplex = HCLGE_MAC_FULL;
2374 
2375 	return duplex;
2376 }
2377 
2378 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2379 				      u8 duplex)
2380 {
2381 	struct hclge_config_mac_speed_dup_cmd *req;
2382 	struct hclge_desc desc;
2383 	int ret;
2384 
2385 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2386 
2387 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2388 
2389 	if (duplex)
2390 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2391 
2392 	switch (speed) {
2393 	case HCLGE_MAC_SPEED_10M:
2394 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2395 				HCLGE_CFG_SPEED_S, 6);
2396 		break;
2397 	case HCLGE_MAC_SPEED_100M:
2398 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2399 				HCLGE_CFG_SPEED_S, 7);
2400 		break;
2401 	case HCLGE_MAC_SPEED_1G:
2402 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2403 				HCLGE_CFG_SPEED_S, 0);
2404 		break;
2405 	case HCLGE_MAC_SPEED_10G:
2406 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2407 				HCLGE_CFG_SPEED_S, 1);
2408 		break;
2409 	case HCLGE_MAC_SPEED_25G:
2410 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2411 				HCLGE_CFG_SPEED_S, 2);
2412 		break;
2413 	case HCLGE_MAC_SPEED_40G:
2414 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2415 				HCLGE_CFG_SPEED_S, 3);
2416 		break;
2417 	case HCLGE_MAC_SPEED_50G:
2418 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2419 				HCLGE_CFG_SPEED_S, 4);
2420 		break;
2421 	case HCLGE_MAC_SPEED_100G:
2422 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2423 				HCLGE_CFG_SPEED_S, 5);
2424 		break;
2425 	default:
2426 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2427 		return -EINVAL;
2428 	}
2429 
2430 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2431 		      1);
2432 
2433 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2434 	if (ret) {
2435 		dev_err(&hdev->pdev->dev,
2436 			"mac speed/duplex config cmd failed %d.\n", ret);
2437 		return ret;
2438 	}
2439 
2440 	return 0;
2441 }
2442 
2443 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2444 {
2445 	struct hclge_mac *mac = &hdev->hw.mac;
2446 	int ret;
2447 
2448 	duplex = hclge_check_speed_dup(duplex, speed);
2449 	if (!mac->support_autoneg && mac->speed == speed &&
2450 	    mac->duplex == duplex)
2451 		return 0;
2452 
2453 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2454 	if (ret)
2455 		return ret;
2456 
2457 	hdev->hw.mac.speed = speed;
2458 	hdev->hw.mac.duplex = duplex;
2459 
2460 	return 0;
2461 }
2462 
2463 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2464 				     u8 duplex)
2465 {
2466 	struct hclge_vport *vport = hclge_get_vport(handle);
2467 	struct hclge_dev *hdev = vport->back;
2468 
2469 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2470 }
2471 
2472 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2473 {
2474 	struct hclge_config_auto_neg_cmd *req;
2475 	struct hclge_desc desc;
2476 	u32 flag = 0;
2477 	int ret;
2478 
2479 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2480 
2481 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2482 	if (enable)
2483 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2484 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2485 
2486 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2487 	if (ret)
2488 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2489 			ret);
2490 
2491 	return ret;
2492 }
2493 
2494 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2495 {
2496 	struct hclge_vport *vport = hclge_get_vport(handle);
2497 	struct hclge_dev *hdev = vport->back;
2498 
2499 	if (!hdev->hw.mac.support_autoneg) {
2500 		if (enable) {
2501 			dev_err(&hdev->pdev->dev,
2502 				"autoneg is not supported by current port\n");
2503 			return -EOPNOTSUPP;
2504 		} else {
2505 			return 0;
2506 		}
2507 	}
2508 
2509 	return hclge_set_autoneg_en(hdev, enable);
2510 }
2511 
2512 static int hclge_get_autoneg(struct hnae3_handle *handle)
2513 {
2514 	struct hclge_vport *vport = hclge_get_vport(handle);
2515 	struct hclge_dev *hdev = vport->back;
2516 	struct phy_device *phydev = hdev->hw.mac.phydev;
2517 
2518 	if (phydev)
2519 		return phydev->autoneg;
2520 
2521 	return hdev->hw.mac.autoneg;
2522 }
2523 
2524 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2525 {
2526 	struct hclge_vport *vport = hclge_get_vport(handle);
2527 	struct hclge_dev *hdev = vport->back;
2528 	int ret;
2529 
2530 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2531 
2532 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2533 	if (ret)
2534 		return ret;
2535 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2536 }
2537 
2538 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2539 {
2540 	struct hclge_vport *vport = hclge_get_vport(handle);
2541 	struct hclge_dev *hdev = vport->back;
2542 
2543 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2544 		return hclge_set_autoneg_en(hdev, !halt);
2545 
2546 	return 0;
2547 }
2548 
2549 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2550 {
2551 	struct hclge_config_fec_cmd *req;
2552 	struct hclge_desc desc;
2553 	int ret;
2554 
2555 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2556 
2557 	req = (struct hclge_config_fec_cmd *)desc.data;
2558 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2559 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2560 	if (fec_mode & BIT(HNAE3_FEC_RS))
2561 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2562 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2563 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2564 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2565 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2566 
2567 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2568 	if (ret)
2569 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2570 
2571 	return ret;
2572 }
2573 
2574 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2575 {
2576 	struct hclge_vport *vport = hclge_get_vport(handle);
2577 	struct hclge_dev *hdev = vport->back;
2578 	struct hclge_mac *mac = &hdev->hw.mac;
2579 	int ret;
2580 
2581 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2582 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2583 		return -EINVAL;
2584 	}
2585 
2586 	ret = hclge_set_fec_hw(hdev, fec_mode);
2587 	if (ret)
2588 		return ret;
2589 
2590 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2591 	return 0;
2592 }
2593 
2594 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2595 			  u8 *fec_mode)
2596 {
2597 	struct hclge_vport *vport = hclge_get_vport(handle);
2598 	struct hclge_dev *hdev = vport->back;
2599 	struct hclge_mac *mac = &hdev->hw.mac;
2600 
2601 	if (fec_ability)
2602 		*fec_ability = mac->fec_ability;
2603 	if (fec_mode)
2604 		*fec_mode = mac->fec_mode;
2605 }
2606 
2607 static int hclge_mac_init(struct hclge_dev *hdev)
2608 {
2609 	struct hclge_mac *mac = &hdev->hw.mac;
2610 	int ret;
2611 
2612 	hdev->support_sfp_query = true;
2613 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2614 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2615 					 hdev->hw.mac.duplex);
2616 	if (ret)
2617 		return ret;
2618 
2619 	if (hdev->hw.mac.support_autoneg) {
2620 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2621 		if (ret)
2622 			return ret;
2623 	}
2624 
2625 	mac->link = 0;
2626 
2627 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2628 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2629 		if (ret)
2630 			return ret;
2631 	}
2632 
2633 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2634 	if (ret) {
2635 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2636 		return ret;
2637 	}
2638 
2639 	ret = hclge_set_default_loopback(hdev);
2640 	if (ret)
2641 		return ret;
2642 
2643 	ret = hclge_buffer_alloc(hdev);
2644 	if (ret)
2645 		dev_err(&hdev->pdev->dev,
2646 			"allocate buffer fail, ret=%d\n", ret);
2647 
2648 	return ret;
2649 }
2650 
2651 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2652 {
2653 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2654 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2655 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2656 				    hclge_wq, &hdev->service_task, 0);
2657 }
2658 
2659 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2660 {
2661 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2662 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2663 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2664 				    hclge_wq, &hdev->service_task, 0);
2665 }
2666 
2667 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2668 {
2669 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2670 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2671 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2672 				    hclge_wq, &hdev->service_task,
2673 				    delay_time);
2674 }
2675 
2676 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2677 {
2678 	struct hclge_link_status_cmd *req;
2679 	struct hclge_desc desc;
2680 	int ret;
2681 
2682 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2683 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2684 	if (ret) {
2685 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2686 			ret);
2687 		return ret;
2688 	}
2689 
2690 	req = (struct hclge_link_status_cmd *)desc.data;
2691 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2692 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2693 
2694 	return 0;
2695 }
2696 
2697 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2698 {
2699 	struct phy_device *phydev = hdev->hw.mac.phydev;
2700 
2701 	*link_status = HCLGE_LINK_STATUS_DOWN;
2702 
2703 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2704 		return 0;
2705 
2706 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2707 		return 0;
2708 
2709 	return hclge_get_mac_link_status(hdev, link_status);
2710 }
2711 
2712 static void hclge_update_link_status(struct hclge_dev *hdev)
2713 {
2714 	struct hnae3_client *rclient = hdev->roce_client;
2715 	struct hnae3_client *client = hdev->nic_client;
2716 	struct hnae3_handle *rhandle;
2717 	struct hnae3_handle *handle;
2718 	int state;
2719 	int ret;
2720 	int i;
2721 
2722 	if (!client)
2723 		return;
2724 
2725 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2726 		return;
2727 
2728 	ret = hclge_get_mac_phy_link(hdev, &state);
2729 	if (ret) {
2730 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2731 		return;
2732 	}
2733 
2734 	if (state != hdev->hw.mac.link) {
2735 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2736 			handle = &hdev->vport[i].nic;
2737 			client->ops->link_status_change(handle, state);
2738 			hclge_config_mac_tnl_int(hdev, state);
2739 			rhandle = &hdev->vport[i].roce;
2740 			if (rclient && rclient->ops->link_status_change)
2741 				rclient->ops->link_status_change(rhandle,
2742 								 state);
2743 		}
2744 		hdev->hw.mac.link = state;
2745 	}
2746 
2747 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2748 }
2749 
2750 static void hclge_update_port_capability(struct hclge_mac *mac)
2751 {
2752 	/* update fec ability by speed */
2753 	hclge_convert_setting_fec(mac);
2754 
2755 	/* firmware can not identify back plane type, the media type
2756 	 * read from configuration can help deal it
2757 	 */
2758 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2759 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2760 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2761 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2762 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2763 
2764 	if (mac->support_autoneg) {
2765 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2766 		linkmode_copy(mac->advertising, mac->supported);
2767 	} else {
2768 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2769 				   mac->supported);
2770 		linkmode_zero(mac->advertising);
2771 	}
2772 }
2773 
2774 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2775 {
2776 	struct hclge_sfp_info_cmd *resp;
2777 	struct hclge_desc desc;
2778 	int ret;
2779 
2780 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2781 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2782 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2783 	if (ret == -EOPNOTSUPP) {
2784 		dev_warn(&hdev->pdev->dev,
2785 			 "IMP do not support get SFP speed %d\n", ret);
2786 		return ret;
2787 	} else if (ret) {
2788 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2789 		return ret;
2790 	}
2791 
2792 	*speed = le32_to_cpu(resp->speed);
2793 
2794 	return 0;
2795 }
2796 
2797 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2798 {
2799 	struct hclge_sfp_info_cmd *resp;
2800 	struct hclge_desc desc;
2801 	int ret;
2802 
2803 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2804 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2805 
2806 	resp->query_type = QUERY_ACTIVE_SPEED;
2807 
2808 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2809 	if (ret == -EOPNOTSUPP) {
2810 		dev_warn(&hdev->pdev->dev,
2811 			 "IMP does not support get SFP info %d\n", ret);
2812 		return ret;
2813 	} else if (ret) {
2814 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2815 		return ret;
2816 	}
2817 
2818 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
2819 	 * set to mac->speed.
2820 	 */
2821 	if (!le32_to_cpu(resp->speed))
2822 		return 0;
2823 
2824 	mac->speed = le32_to_cpu(resp->speed);
2825 	/* if resp->speed_ability is 0, it means it's an old version
2826 	 * firmware, do not update these params
2827 	 */
2828 	if (resp->speed_ability) {
2829 		mac->module_type = le32_to_cpu(resp->module_type);
2830 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
2831 		mac->autoneg = resp->autoneg;
2832 		mac->support_autoneg = resp->autoneg_ability;
2833 		mac->speed_type = QUERY_ACTIVE_SPEED;
2834 		if (!resp->active_fec)
2835 			mac->fec_mode = 0;
2836 		else
2837 			mac->fec_mode = BIT(resp->active_fec);
2838 	} else {
2839 		mac->speed_type = QUERY_SFP_SPEED;
2840 	}
2841 
2842 	return 0;
2843 }
2844 
2845 static int hclge_update_port_info(struct hclge_dev *hdev)
2846 {
2847 	struct hclge_mac *mac = &hdev->hw.mac;
2848 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
2849 	int ret;
2850 
2851 	/* get the port info from SFP cmd if not copper port */
2852 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2853 		return 0;
2854 
2855 	/* if IMP does not support get SFP/qSFP info, return directly */
2856 	if (!hdev->support_sfp_query)
2857 		return 0;
2858 
2859 	if (hdev->pdev->revision >= 0x21)
2860 		ret = hclge_get_sfp_info(hdev, mac);
2861 	else
2862 		ret = hclge_get_sfp_speed(hdev, &speed);
2863 
2864 	if (ret == -EOPNOTSUPP) {
2865 		hdev->support_sfp_query = false;
2866 		return ret;
2867 	} else if (ret) {
2868 		return ret;
2869 	}
2870 
2871 	if (hdev->pdev->revision >= 0x21) {
2872 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2873 			hclge_update_port_capability(mac);
2874 			return 0;
2875 		}
2876 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2877 					       HCLGE_MAC_FULL);
2878 	} else {
2879 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2880 			return 0; /* do nothing if no SFP */
2881 
2882 		/* must config full duplex for SFP */
2883 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2884 	}
2885 }
2886 
2887 static int hclge_get_status(struct hnae3_handle *handle)
2888 {
2889 	struct hclge_vport *vport = hclge_get_vport(handle);
2890 	struct hclge_dev *hdev = vport->back;
2891 
2892 	hclge_update_link_status(hdev);
2893 
2894 	return hdev->hw.mac.link;
2895 }
2896 
2897 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2898 {
2899 	if (!pci_num_vf(hdev->pdev)) {
2900 		dev_err(&hdev->pdev->dev,
2901 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
2902 		return NULL;
2903 	}
2904 
2905 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2906 		dev_err(&hdev->pdev->dev,
2907 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
2908 			vf, pci_num_vf(hdev->pdev));
2909 		return NULL;
2910 	}
2911 
2912 	/* VF start from 1 in vport */
2913 	vf += HCLGE_VF_VPORT_START_NUM;
2914 	return &hdev->vport[vf];
2915 }
2916 
2917 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2918 			       struct ifla_vf_info *ivf)
2919 {
2920 	struct hclge_vport *vport = hclge_get_vport(handle);
2921 	struct hclge_dev *hdev = vport->back;
2922 
2923 	vport = hclge_get_vf_vport(hdev, vf);
2924 	if (!vport)
2925 		return -EINVAL;
2926 
2927 	ivf->vf = vf;
2928 	ivf->linkstate = vport->vf_info.link_state;
2929 	ivf->spoofchk = vport->vf_info.spoofchk;
2930 	ivf->trusted = vport->vf_info.trusted;
2931 	ivf->min_tx_rate = 0;
2932 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2933 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2934 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2935 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2936 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
2937 
2938 	return 0;
2939 }
2940 
2941 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2942 				   int link_state)
2943 {
2944 	struct hclge_vport *vport = hclge_get_vport(handle);
2945 	struct hclge_dev *hdev = vport->back;
2946 
2947 	vport = hclge_get_vf_vport(hdev, vf);
2948 	if (!vport)
2949 		return -EINVAL;
2950 
2951 	vport->vf_info.link_state = link_state;
2952 
2953 	return 0;
2954 }
2955 
2956 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2957 {
2958 	u32 cmdq_src_reg, msix_src_reg;
2959 
2960 	/* fetch the events from their corresponding regs */
2961 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2962 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2963 
2964 	/* Assumption: If by any chance reset and mailbox events are reported
2965 	 * together then we will only process reset event in this go and will
2966 	 * defer the processing of the mailbox events. Since, we would have not
2967 	 * cleared RX CMDQ event this time we would receive again another
2968 	 * interrupt from H/W just for the mailbox.
2969 	 *
2970 	 * check for vector0 reset event sources
2971 	 */
2972 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
2973 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2974 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2975 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2976 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2977 		hdev->rst_stats.imp_rst_cnt++;
2978 		return HCLGE_VECTOR0_EVENT_RST;
2979 	}
2980 
2981 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
2982 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2983 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2984 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2985 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2986 		hdev->rst_stats.global_rst_cnt++;
2987 		return HCLGE_VECTOR0_EVENT_RST;
2988 	}
2989 
2990 	/* check for vector0 msix event source */
2991 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2992 		*clearval = msix_src_reg;
2993 		return HCLGE_VECTOR0_EVENT_ERR;
2994 	}
2995 
2996 	/* check for vector0 mailbox(=CMDQ RX) event source */
2997 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2998 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2999 		*clearval = cmdq_src_reg;
3000 		return HCLGE_VECTOR0_EVENT_MBX;
3001 	}
3002 
3003 	/* print other vector0 event source */
3004 	dev_info(&hdev->pdev->dev,
3005 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3006 		 cmdq_src_reg, msix_src_reg);
3007 	*clearval = msix_src_reg;
3008 
3009 	return HCLGE_VECTOR0_EVENT_OTHER;
3010 }
3011 
3012 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3013 				    u32 regclr)
3014 {
3015 	switch (event_type) {
3016 	case HCLGE_VECTOR0_EVENT_RST:
3017 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3018 		break;
3019 	case HCLGE_VECTOR0_EVENT_MBX:
3020 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3021 		break;
3022 	default:
3023 		break;
3024 	}
3025 }
3026 
3027 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3028 {
3029 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3030 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3031 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3032 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3033 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3034 }
3035 
3036 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3037 {
3038 	writel(enable ? 1 : 0, vector->addr);
3039 }
3040 
3041 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3042 {
3043 	struct hclge_dev *hdev = data;
3044 	u32 clearval = 0;
3045 	u32 event_cause;
3046 
3047 	hclge_enable_vector(&hdev->misc_vector, false);
3048 	event_cause = hclge_check_event_cause(hdev, &clearval);
3049 
3050 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
3051 	switch (event_cause) {
3052 	case HCLGE_VECTOR0_EVENT_ERR:
3053 		/* we do not know what type of reset is required now. This could
3054 		 * only be decided after we fetch the type of errors which
3055 		 * caused this event. Therefore, we will do below for now:
3056 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3057 		 *    have defered type of reset to be used.
3058 		 * 2. Schedule the reset serivce task.
3059 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3060 		 *    will fetch the correct type of reset.  This would be done
3061 		 *    by first decoding the types of errors.
3062 		 */
3063 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3064 		/* fall through */
3065 	case HCLGE_VECTOR0_EVENT_RST:
3066 		hclge_reset_task_schedule(hdev);
3067 		break;
3068 	case HCLGE_VECTOR0_EVENT_MBX:
3069 		/* If we are here then,
3070 		 * 1. Either we are not handling any mbx task and we are not
3071 		 *    scheduled as well
3072 		 *                        OR
3073 		 * 2. We could be handling a mbx task but nothing more is
3074 		 *    scheduled.
3075 		 * In both cases, we should schedule mbx task as there are more
3076 		 * mbx messages reported by this interrupt.
3077 		 */
3078 		hclge_mbx_task_schedule(hdev);
3079 		break;
3080 	default:
3081 		dev_warn(&hdev->pdev->dev,
3082 			 "received unknown or unhandled event of vector0\n");
3083 		break;
3084 	}
3085 
3086 	hclge_clear_event_cause(hdev, event_cause, clearval);
3087 
3088 	/* Enable interrupt if it is not cause by reset. And when
3089 	 * clearval equal to 0, it means interrupt status may be
3090 	 * cleared by hardware before driver reads status register.
3091 	 * For this case, vector0 interrupt also should be enabled.
3092 	 */
3093 	if (!clearval ||
3094 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3095 		hclge_enable_vector(&hdev->misc_vector, true);
3096 	}
3097 
3098 	return IRQ_HANDLED;
3099 }
3100 
3101 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3102 {
3103 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3104 		dev_warn(&hdev->pdev->dev,
3105 			 "vector(vector_id %d) has been freed.\n", vector_id);
3106 		return;
3107 	}
3108 
3109 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3110 	hdev->num_msi_left += 1;
3111 	hdev->num_msi_used -= 1;
3112 }
3113 
3114 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3115 {
3116 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3117 
3118 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3119 
3120 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3121 	hdev->vector_status[0] = 0;
3122 
3123 	hdev->num_msi_left -= 1;
3124 	hdev->num_msi_used += 1;
3125 }
3126 
3127 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3128 				      const cpumask_t *mask)
3129 {
3130 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3131 					      affinity_notify);
3132 
3133 	cpumask_copy(&hdev->affinity_mask, mask);
3134 }
3135 
3136 static void hclge_irq_affinity_release(struct kref *ref)
3137 {
3138 }
3139 
3140 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3141 {
3142 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3143 			      &hdev->affinity_mask);
3144 
3145 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3146 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3147 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3148 				  &hdev->affinity_notify);
3149 }
3150 
3151 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3152 {
3153 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3154 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3155 }
3156 
3157 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3158 {
3159 	int ret;
3160 
3161 	hclge_get_misc_vector(hdev);
3162 
3163 	/* this would be explicitly freed in the end */
3164 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3165 		 HCLGE_NAME, pci_name(hdev->pdev));
3166 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3167 			  0, hdev->misc_vector.name, hdev);
3168 	if (ret) {
3169 		hclge_free_vector(hdev, 0);
3170 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3171 			hdev->misc_vector.vector_irq);
3172 	}
3173 
3174 	return ret;
3175 }
3176 
3177 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3178 {
3179 	free_irq(hdev->misc_vector.vector_irq, hdev);
3180 	hclge_free_vector(hdev, 0);
3181 }
3182 
3183 int hclge_notify_client(struct hclge_dev *hdev,
3184 			enum hnae3_reset_notify_type type)
3185 {
3186 	struct hnae3_client *client = hdev->nic_client;
3187 	u16 i;
3188 
3189 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3190 		return 0;
3191 
3192 	if (!client->ops->reset_notify)
3193 		return -EOPNOTSUPP;
3194 
3195 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3196 		struct hnae3_handle *handle = &hdev->vport[i].nic;
3197 		int ret;
3198 
3199 		ret = client->ops->reset_notify(handle, type);
3200 		if (ret) {
3201 			dev_err(&hdev->pdev->dev,
3202 				"notify nic client failed %d(%d)\n", type, ret);
3203 			return ret;
3204 		}
3205 	}
3206 
3207 	return 0;
3208 }
3209 
3210 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3211 				    enum hnae3_reset_notify_type type)
3212 {
3213 	struct hnae3_client *client = hdev->roce_client;
3214 	int ret = 0;
3215 	u16 i;
3216 
3217 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3218 		return 0;
3219 
3220 	if (!client->ops->reset_notify)
3221 		return -EOPNOTSUPP;
3222 
3223 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3224 		struct hnae3_handle *handle = &hdev->vport[i].roce;
3225 
3226 		ret = client->ops->reset_notify(handle, type);
3227 		if (ret) {
3228 			dev_err(&hdev->pdev->dev,
3229 				"notify roce client failed %d(%d)",
3230 				type, ret);
3231 			return ret;
3232 		}
3233 	}
3234 
3235 	return ret;
3236 }
3237 
3238 static int hclge_reset_wait(struct hclge_dev *hdev)
3239 {
3240 #define HCLGE_RESET_WATI_MS	100
3241 #define HCLGE_RESET_WAIT_CNT	350
3242 
3243 	u32 val, reg, reg_bit;
3244 	u32 cnt = 0;
3245 
3246 	switch (hdev->reset_type) {
3247 	case HNAE3_IMP_RESET:
3248 		reg = HCLGE_GLOBAL_RESET_REG;
3249 		reg_bit = HCLGE_IMP_RESET_BIT;
3250 		break;
3251 	case HNAE3_GLOBAL_RESET:
3252 		reg = HCLGE_GLOBAL_RESET_REG;
3253 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3254 		break;
3255 	case HNAE3_FUNC_RESET:
3256 		reg = HCLGE_FUN_RST_ING;
3257 		reg_bit = HCLGE_FUN_RST_ING_B;
3258 		break;
3259 	default:
3260 		dev_err(&hdev->pdev->dev,
3261 			"Wait for unsupported reset type: %d\n",
3262 			hdev->reset_type);
3263 		return -EINVAL;
3264 	}
3265 
3266 	val = hclge_read_dev(&hdev->hw, reg);
3267 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3268 		msleep(HCLGE_RESET_WATI_MS);
3269 		val = hclge_read_dev(&hdev->hw, reg);
3270 		cnt++;
3271 	}
3272 
3273 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3274 		dev_warn(&hdev->pdev->dev,
3275 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3276 		return -EBUSY;
3277 	}
3278 
3279 	return 0;
3280 }
3281 
3282 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3283 {
3284 	struct hclge_vf_rst_cmd *req;
3285 	struct hclge_desc desc;
3286 
3287 	req = (struct hclge_vf_rst_cmd *)desc.data;
3288 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3289 	req->dest_vfid = func_id;
3290 
3291 	if (reset)
3292 		req->vf_rst = 0x1;
3293 
3294 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3295 }
3296 
3297 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3298 {
3299 	int i;
3300 
3301 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3302 		struct hclge_vport *vport = &hdev->vport[i];
3303 		int ret;
3304 
3305 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3306 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3307 		if (ret) {
3308 			dev_err(&hdev->pdev->dev,
3309 				"set vf(%u) rst failed %d!\n",
3310 				vport->vport_id, ret);
3311 			return ret;
3312 		}
3313 
3314 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3315 			continue;
3316 
3317 		/* Inform VF to process the reset.
3318 		 * hclge_inform_reset_assert_to_vf may fail if VF
3319 		 * driver is not loaded.
3320 		 */
3321 		ret = hclge_inform_reset_assert_to_vf(vport);
3322 		if (ret)
3323 			dev_warn(&hdev->pdev->dev,
3324 				 "inform reset to vf(%u) failed %d!\n",
3325 				 vport->vport_id, ret);
3326 	}
3327 
3328 	return 0;
3329 }
3330 
3331 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3332 {
3333 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3334 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3335 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3336 		return;
3337 
3338 	hclge_mbx_handler(hdev);
3339 
3340 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3341 }
3342 
3343 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3344 {
3345 	struct hclge_pf_rst_sync_cmd *req;
3346 	struct hclge_desc desc;
3347 	int cnt = 0;
3348 	int ret;
3349 
3350 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3351 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3352 
3353 	do {
3354 		/* vf need to down netdev by mbx during PF or FLR reset */
3355 		hclge_mailbox_service_task(hdev);
3356 
3357 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3358 		/* for compatible with old firmware, wait
3359 		 * 100 ms for VF to stop IO
3360 		 */
3361 		if (ret == -EOPNOTSUPP) {
3362 			msleep(HCLGE_RESET_SYNC_TIME);
3363 			return;
3364 		} else if (ret) {
3365 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3366 				 ret);
3367 			return;
3368 		} else if (req->all_vf_ready) {
3369 			return;
3370 		}
3371 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3372 		hclge_cmd_reuse_desc(&desc, true);
3373 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3374 
3375 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3376 }
3377 
3378 void hclge_report_hw_error(struct hclge_dev *hdev,
3379 			   enum hnae3_hw_error_type type)
3380 {
3381 	struct hnae3_client *client = hdev->nic_client;
3382 	u16 i;
3383 
3384 	if (!client || !client->ops->process_hw_error ||
3385 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3386 		return;
3387 
3388 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3389 		client->ops->process_hw_error(&hdev->vport[i].nic, type);
3390 }
3391 
3392 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3393 {
3394 	u32 reg_val;
3395 
3396 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3397 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3398 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3399 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3400 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3401 	}
3402 
3403 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3404 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3405 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3406 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3407 	}
3408 }
3409 
3410 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3411 {
3412 	struct hclge_desc desc;
3413 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3414 	int ret;
3415 
3416 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3417 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3418 	req->fun_reset_vfid = func_id;
3419 
3420 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3421 	if (ret)
3422 		dev_err(&hdev->pdev->dev,
3423 			"send function reset cmd fail, status =%d\n", ret);
3424 
3425 	return ret;
3426 }
3427 
3428 static void hclge_do_reset(struct hclge_dev *hdev)
3429 {
3430 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3431 	struct pci_dev *pdev = hdev->pdev;
3432 	u32 val;
3433 
3434 	if (hclge_get_hw_reset_stat(handle)) {
3435 		dev_info(&pdev->dev, "hardware reset not finish\n");
3436 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3437 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3438 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3439 		return;
3440 	}
3441 
3442 	switch (hdev->reset_type) {
3443 	case HNAE3_GLOBAL_RESET:
3444 		dev_info(&pdev->dev, "global reset requested\n");
3445 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3446 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3447 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3448 		break;
3449 	case HNAE3_FUNC_RESET:
3450 		dev_info(&pdev->dev, "PF reset requested\n");
3451 		/* schedule again to check later */
3452 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3453 		hclge_reset_task_schedule(hdev);
3454 		break;
3455 	default:
3456 		dev_warn(&pdev->dev,
3457 			 "unsupported reset type: %d\n", hdev->reset_type);
3458 		break;
3459 	}
3460 }
3461 
3462 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3463 						   unsigned long *addr)
3464 {
3465 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3466 	struct hclge_dev *hdev = ae_dev->priv;
3467 
3468 	/* first, resolve any unknown reset type to the known type(s) */
3469 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3470 		u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3471 					HCLGE_MISC_VECTOR_INT_STS);
3472 		/* we will intentionally ignore any errors from this function
3473 		 *  as we will end up in *some* reset request in any case
3474 		 */
3475 		if (hclge_handle_hw_msix_error(hdev, addr))
3476 			dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3477 				 msix_sts_reg);
3478 
3479 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3480 		/* We defered the clearing of the error event which caused
3481 		 * interrupt since it was not posssible to do that in
3482 		 * interrupt context (and this is the reason we introduced
3483 		 * new UNKNOWN reset type). Now, the errors have been
3484 		 * handled and cleared in hardware we can safely enable
3485 		 * interrupts. This is an exception to the norm.
3486 		 */
3487 		hclge_enable_vector(&hdev->misc_vector, true);
3488 	}
3489 
3490 	/* return the highest priority reset level amongst all */
3491 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3492 		rst_level = HNAE3_IMP_RESET;
3493 		clear_bit(HNAE3_IMP_RESET, addr);
3494 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3495 		clear_bit(HNAE3_FUNC_RESET, addr);
3496 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3497 		rst_level = HNAE3_GLOBAL_RESET;
3498 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3499 		clear_bit(HNAE3_FUNC_RESET, addr);
3500 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3501 		rst_level = HNAE3_FUNC_RESET;
3502 		clear_bit(HNAE3_FUNC_RESET, addr);
3503 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3504 		rst_level = HNAE3_FLR_RESET;
3505 		clear_bit(HNAE3_FLR_RESET, addr);
3506 	}
3507 
3508 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3509 	    rst_level < hdev->reset_type)
3510 		return HNAE3_NONE_RESET;
3511 
3512 	return rst_level;
3513 }
3514 
3515 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3516 {
3517 	u32 clearval = 0;
3518 
3519 	switch (hdev->reset_type) {
3520 	case HNAE3_IMP_RESET:
3521 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3522 		break;
3523 	case HNAE3_GLOBAL_RESET:
3524 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3525 		break;
3526 	default:
3527 		break;
3528 	}
3529 
3530 	if (!clearval)
3531 		return;
3532 
3533 	/* For revision 0x20, the reset interrupt source
3534 	 * can only be cleared after hardware reset done
3535 	 */
3536 	if (hdev->pdev->revision == 0x20)
3537 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3538 				clearval);
3539 
3540 	hclge_enable_vector(&hdev->misc_vector, true);
3541 }
3542 
3543 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3544 {
3545 	u32 reg_val;
3546 
3547 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3548 	if (enable)
3549 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3550 	else
3551 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3552 
3553 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3554 }
3555 
3556 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3557 {
3558 	int ret;
3559 
3560 	ret = hclge_set_all_vf_rst(hdev, true);
3561 	if (ret)
3562 		return ret;
3563 
3564 	hclge_func_reset_sync_vf(hdev);
3565 
3566 	return 0;
3567 }
3568 
3569 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3570 {
3571 	u32 reg_val;
3572 	int ret = 0;
3573 
3574 	switch (hdev->reset_type) {
3575 	case HNAE3_FUNC_RESET:
3576 		ret = hclge_func_reset_notify_vf(hdev);
3577 		if (ret)
3578 			return ret;
3579 
3580 		ret = hclge_func_reset_cmd(hdev, 0);
3581 		if (ret) {
3582 			dev_err(&hdev->pdev->dev,
3583 				"asserting function reset fail %d!\n", ret);
3584 			return ret;
3585 		}
3586 
3587 		/* After performaning pf reset, it is not necessary to do the
3588 		 * mailbox handling or send any command to firmware, because
3589 		 * any mailbox handling or command to firmware is only valid
3590 		 * after hclge_cmd_init is called.
3591 		 */
3592 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3593 		hdev->rst_stats.pf_rst_cnt++;
3594 		break;
3595 	case HNAE3_FLR_RESET:
3596 		ret = hclge_func_reset_notify_vf(hdev);
3597 		if (ret)
3598 			return ret;
3599 		break;
3600 	case HNAE3_IMP_RESET:
3601 		hclge_handle_imp_error(hdev);
3602 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3603 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3604 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3605 		break;
3606 	default:
3607 		break;
3608 	}
3609 
3610 	/* inform hardware that preparatory work is done */
3611 	msleep(HCLGE_RESET_SYNC_TIME);
3612 	hclge_reset_handshake(hdev, true);
3613 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3614 
3615 	return ret;
3616 }
3617 
3618 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3619 {
3620 #define MAX_RESET_FAIL_CNT 5
3621 
3622 	if (hdev->reset_pending) {
3623 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3624 			 hdev->reset_pending);
3625 		return true;
3626 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3627 		   HCLGE_RESET_INT_M) {
3628 		dev_info(&hdev->pdev->dev,
3629 			 "reset failed because new reset interrupt\n");
3630 		hclge_clear_reset_cause(hdev);
3631 		return false;
3632 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3633 		hdev->rst_stats.reset_fail_cnt++;
3634 		set_bit(hdev->reset_type, &hdev->reset_pending);
3635 		dev_info(&hdev->pdev->dev,
3636 			 "re-schedule reset task(%u)\n",
3637 			 hdev->rst_stats.reset_fail_cnt);
3638 		return true;
3639 	}
3640 
3641 	hclge_clear_reset_cause(hdev);
3642 
3643 	/* recover the handshake status when reset fail */
3644 	hclge_reset_handshake(hdev, true);
3645 
3646 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3647 
3648 	hclge_dbg_dump_rst_info(hdev);
3649 
3650 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3651 
3652 	return false;
3653 }
3654 
3655 static int hclge_set_rst_done(struct hclge_dev *hdev)
3656 {
3657 	struct hclge_pf_rst_done_cmd *req;
3658 	struct hclge_desc desc;
3659 	int ret;
3660 
3661 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
3662 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3663 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3664 
3665 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3666 	/* To be compatible with the old firmware, which does not support
3667 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3668 	 * return success
3669 	 */
3670 	if (ret == -EOPNOTSUPP) {
3671 		dev_warn(&hdev->pdev->dev,
3672 			 "current firmware does not support command(0x%x)!\n",
3673 			 HCLGE_OPC_PF_RST_DONE);
3674 		return 0;
3675 	} else if (ret) {
3676 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3677 			ret);
3678 	}
3679 
3680 	return ret;
3681 }
3682 
3683 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3684 {
3685 	int ret = 0;
3686 
3687 	switch (hdev->reset_type) {
3688 	case HNAE3_FUNC_RESET:
3689 		/* fall through */
3690 	case HNAE3_FLR_RESET:
3691 		ret = hclge_set_all_vf_rst(hdev, false);
3692 		break;
3693 	case HNAE3_GLOBAL_RESET:
3694 		/* fall through */
3695 	case HNAE3_IMP_RESET:
3696 		ret = hclge_set_rst_done(hdev);
3697 		break;
3698 	default:
3699 		break;
3700 	}
3701 
3702 	/* clear up the handshake status after re-initialize done */
3703 	hclge_reset_handshake(hdev, false);
3704 
3705 	return ret;
3706 }
3707 
3708 static int hclge_reset_stack(struct hclge_dev *hdev)
3709 {
3710 	int ret;
3711 
3712 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3713 	if (ret)
3714 		return ret;
3715 
3716 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3717 	if (ret)
3718 		return ret;
3719 
3720 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3721 }
3722 
3723 static int hclge_reset_prepare(struct hclge_dev *hdev)
3724 {
3725 	int ret;
3726 
3727 	hdev->rst_stats.reset_cnt++;
3728 	/* perform reset of the stack & ae device for a client */
3729 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3730 	if (ret)
3731 		return ret;
3732 
3733 	rtnl_lock();
3734 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3735 	rtnl_unlock();
3736 	if (ret)
3737 		return ret;
3738 
3739 	return hclge_reset_prepare_wait(hdev);
3740 }
3741 
3742 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3743 {
3744 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3745 	enum hnae3_reset_type reset_level;
3746 	int ret;
3747 
3748 	hdev->rst_stats.hw_reset_done_cnt++;
3749 
3750 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3751 	if (ret)
3752 		return ret;
3753 
3754 	rtnl_lock();
3755 	ret = hclge_reset_stack(hdev);
3756 	rtnl_unlock();
3757 	if (ret)
3758 		return ret;
3759 
3760 	hclge_clear_reset_cause(hdev);
3761 
3762 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3763 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3764 	 * times
3765 	 */
3766 	if (ret &&
3767 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3768 		return ret;
3769 
3770 	ret = hclge_reset_prepare_up(hdev);
3771 	if (ret)
3772 		return ret;
3773 
3774 	rtnl_lock();
3775 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3776 	rtnl_unlock();
3777 	if (ret)
3778 		return ret;
3779 
3780 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3781 	if (ret)
3782 		return ret;
3783 
3784 	hdev->last_reset_time = jiffies;
3785 	hdev->rst_stats.reset_fail_cnt = 0;
3786 	hdev->rst_stats.reset_done_cnt++;
3787 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3788 
3789 	/* if default_reset_request has a higher level reset request,
3790 	 * it should be handled as soon as possible. since some errors
3791 	 * need this kind of reset to fix.
3792 	 */
3793 	reset_level = hclge_get_reset_level(ae_dev,
3794 					    &hdev->default_reset_request);
3795 	if (reset_level != HNAE3_NONE_RESET)
3796 		set_bit(reset_level, &hdev->reset_request);
3797 
3798 	return 0;
3799 }
3800 
3801 static void hclge_reset(struct hclge_dev *hdev)
3802 {
3803 	if (hclge_reset_prepare(hdev))
3804 		goto err_reset;
3805 
3806 	if (hclge_reset_wait(hdev))
3807 		goto err_reset;
3808 
3809 	if (hclge_reset_rebuild(hdev))
3810 		goto err_reset;
3811 
3812 	return;
3813 
3814 err_reset:
3815 	if (hclge_reset_err_handle(hdev))
3816 		hclge_reset_task_schedule(hdev);
3817 }
3818 
3819 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3820 {
3821 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3822 	struct hclge_dev *hdev = ae_dev->priv;
3823 
3824 	/* We might end up getting called broadly because of 2 below cases:
3825 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3826 	 *    normalcy is to reset.
3827 	 * 2. A new reset request from the stack due to timeout
3828 	 *
3829 	 * For the first case,error event might not have ae handle available.
3830 	 * check if this is a new reset request and we are not here just because
3831 	 * last reset attempt did not succeed and watchdog hit us again. We will
3832 	 * know this if last reset request did not occur very recently (watchdog
3833 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3834 	 * In case of new request we reset the "reset level" to PF reset.
3835 	 * And if it is a repeat reset request of the most recent one then we
3836 	 * want to make sure we throttle the reset request. Therefore, we will
3837 	 * not allow it again before 3*HZ times.
3838 	 */
3839 	if (!handle)
3840 		handle = &hdev->vport[0].nic;
3841 
3842 	if (time_before(jiffies, (hdev->last_reset_time +
3843 				  HCLGE_RESET_INTERVAL))) {
3844 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3845 		return;
3846 	} else if (hdev->default_reset_request) {
3847 		hdev->reset_level =
3848 			hclge_get_reset_level(ae_dev,
3849 					      &hdev->default_reset_request);
3850 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3851 		hdev->reset_level = HNAE3_FUNC_RESET;
3852 	}
3853 
3854 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3855 		 hdev->reset_level);
3856 
3857 	/* request reset & schedule reset task */
3858 	set_bit(hdev->reset_level, &hdev->reset_request);
3859 	hclge_reset_task_schedule(hdev);
3860 
3861 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3862 		hdev->reset_level++;
3863 }
3864 
3865 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3866 					enum hnae3_reset_type rst_type)
3867 {
3868 	struct hclge_dev *hdev = ae_dev->priv;
3869 
3870 	set_bit(rst_type, &hdev->default_reset_request);
3871 }
3872 
3873 static void hclge_reset_timer(struct timer_list *t)
3874 {
3875 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3876 
3877 	/* if default_reset_request has no value, it means that this reset
3878 	 * request has already be handled, so just return here
3879 	 */
3880 	if (!hdev->default_reset_request)
3881 		return;
3882 
3883 	dev_info(&hdev->pdev->dev,
3884 		 "triggering reset in reset timer\n");
3885 	hclge_reset_event(hdev->pdev, NULL);
3886 }
3887 
3888 static void hclge_reset_subtask(struct hclge_dev *hdev)
3889 {
3890 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3891 
3892 	/* check if there is any ongoing reset in the hardware. This status can
3893 	 * be checked from reset_pending. If there is then, we need to wait for
3894 	 * hardware to complete reset.
3895 	 *    a. If we are able to figure out in reasonable time that hardware
3896 	 *       has fully resetted then, we can proceed with driver, client
3897 	 *       reset.
3898 	 *    b. else, we can come back later to check this status so re-sched
3899 	 *       now.
3900 	 */
3901 	hdev->last_reset_time = jiffies;
3902 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3903 	if (hdev->reset_type != HNAE3_NONE_RESET)
3904 		hclge_reset(hdev);
3905 
3906 	/* check if we got any *new* reset requests to be honored */
3907 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3908 	if (hdev->reset_type != HNAE3_NONE_RESET)
3909 		hclge_do_reset(hdev);
3910 
3911 	hdev->reset_type = HNAE3_NONE_RESET;
3912 }
3913 
3914 static void hclge_reset_service_task(struct hclge_dev *hdev)
3915 {
3916 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3917 		return;
3918 
3919 	down(&hdev->reset_sem);
3920 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3921 
3922 	hclge_reset_subtask(hdev);
3923 
3924 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3925 	up(&hdev->reset_sem);
3926 }
3927 
3928 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3929 {
3930 	int i;
3931 
3932 	/* start from vport 1 for PF is always alive */
3933 	for (i = 1; i < hdev->num_alloc_vport; i++) {
3934 		struct hclge_vport *vport = &hdev->vport[i];
3935 
3936 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3937 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3938 
3939 		/* If vf is not alive, set to default value */
3940 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3941 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3942 	}
3943 }
3944 
3945 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3946 {
3947 	unsigned long delta = round_jiffies_relative(HZ);
3948 
3949 	/* Always handle the link updating to make sure link state is
3950 	 * updated when it is triggered by mbx.
3951 	 */
3952 	hclge_update_link_status(hdev);
3953 	hclge_sync_mac_table(hdev);
3954 	hclge_sync_promisc_mode(hdev);
3955 
3956 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3957 		delta = jiffies - hdev->last_serv_processed;
3958 
3959 		if (delta < round_jiffies_relative(HZ)) {
3960 			delta = round_jiffies_relative(HZ) - delta;
3961 			goto out;
3962 		}
3963 	}
3964 
3965 	hdev->serv_processed_cnt++;
3966 	hclge_update_vport_alive(hdev);
3967 
3968 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3969 		hdev->last_serv_processed = jiffies;
3970 		goto out;
3971 	}
3972 
3973 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3974 		hclge_update_stats_for_all(hdev);
3975 
3976 	hclge_update_port_info(hdev);
3977 	hclge_sync_vlan_filter(hdev);
3978 
3979 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
3980 		hclge_rfs_filter_expire(hdev);
3981 
3982 	hdev->last_serv_processed = jiffies;
3983 
3984 out:
3985 	hclge_task_schedule(hdev, delta);
3986 }
3987 
3988 static void hclge_service_task(struct work_struct *work)
3989 {
3990 	struct hclge_dev *hdev =
3991 		container_of(work, struct hclge_dev, service_task.work);
3992 
3993 	hclge_reset_service_task(hdev);
3994 	hclge_mailbox_service_task(hdev);
3995 	hclge_periodic_service_task(hdev);
3996 
3997 	/* Handle reset and mbx again in case periodical task delays the
3998 	 * handling by calling hclge_task_schedule() in
3999 	 * hclge_periodic_service_task().
4000 	 */
4001 	hclge_reset_service_task(hdev);
4002 	hclge_mailbox_service_task(hdev);
4003 }
4004 
4005 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4006 {
4007 	/* VF handle has no client */
4008 	if (!handle->client)
4009 		return container_of(handle, struct hclge_vport, nic);
4010 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4011 		return container_of(handle, struct hclge_vport, roce);
4012 	else
4013 		return container_of(handle, struct hclge_vport, nic);
4014 }
4015 
4016 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4017 			    struct hnae3_vector_info *vector_info)
4018 {
4019 	struct hclge_vport *vport = hclge_get_vport(handle);
4020 	struct hnae3_vector_info *vector = vector_info;
4021 	struct hclge_dev *hdev = vport->back;
4022 	int alloc = 0;
4023 	int i, j;
4024 
4025 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4026 	vector_num = min(hdev->num_msi_left, vector_num);
4027 
4028 	for (j = 0; j < vector_num; j++) {
4029 		for (i = 1; i < hdev->num_msi; i++) {
4030 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4031 				vector->vector = pci_irq_vector(hdev->pdev, i);
4032 				vector->io_addr = hdev->hw.io_base +
4033 					HCLGE_VECTOR_REG_BASE +
4034 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
4035 					vport->vport_id *
4036 					HCLGE_VECTOR_VF_OFFSET;
4037 				hdev->vector_status[i] = vport->vport_id;
4038 				hdev->vector_irq[i] = vector->vector;
4039 
4040 				vector++;
4041 				alloc++;
4042 
4043 				break;
4044 			}
4045 		}
4046 	}
4047 	hdev->num_msi_left -= alloc;
4048 	hdev->num_msi_used += alloc;
4049 
4050 	return alloc;
4051 }
4052 
4053 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4054 {
4055 	int i;
4056 
4057 	for (i = 0; i < hdev->num_msi; i++)
4058 		if (vector == hdev->vector_irq[i])
4059 			return i;
4060 
4061 	return -EINVAL;
4062 }
4063 
4064 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4065 {
4066 	struct hclge_vport *vport = hclge_get_vport(handle);
4067 	struct hclge_dev *hdev = vport->back;
4068 	int vector_id;
4069 
4070 	vector_id = hclge_get_vector_index(hdev, vector);
4071 	if (vector_id < 0) {
4072 		dev_err(&hdev->pdev->dev,
4073 			"Get vector index fail. vector = %d\n", vector);
4074 		return vector_id;
4075 	}
4076 
4077 	hclge_free_vector(hdev, vector_id);
4078 
4079 	return 0;
4080 }
4081 
4082 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4083 {
4084 	return HCLGE_RSS_KEY_SIZE;
4085 }
4086 
4087 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4088 {
4089 	return HCLGE_RSS_IND_TBL_SIZE;
4090 }
4091 
4092 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4093 				  const u8 hfunc, const u8 *key)
4094 {
4095 	struct hclge_rss_config_cmd *req;
4096 	unsigned int key_offset = 0;
4097 	struct hclge_desc desc;
4098 	int key_counts;
4099 	int key_size;
4100 	int ret;
4101 
4102 	key_counts = HCLGE_RSS_KEY_SIZE;
4103 	req = (struct hclge_rss_config_cmd *)desc.data;
4104 
4105 	while (key_counts) {
4106 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4107 					   false);
4108 
4109 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4110 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4111 
4112 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4113 		memcpy(req->hash_key,
4114 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4115 
4116 		key_counts -= key_size;
4117 		key_offset++;
4118 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4119 		if (ret) {
4120 			dev_err(&hdev->pdev->dev,
4121 				"Configure RSS config fail, status = %d\n",
4122 				ret);
4123 			return ret;
4124 		}
4125 	}
4126 	return 0;
4127 }
4128 
4129 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4130 {
4131 	struct hclge_rss_indirection_table_cmd *req;
4132 	struct hclge_desc desc;
4133 	int i, j;
4134 	int ret;
4135 
4136 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4137 
4138 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4139 		hclge_cmd_setup_basic_desc
4140 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4141 
4142 		req->start_table_index =
4143 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4144 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4145 
4146 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4147 			req->rss_result[j] =
4148 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4149 
4150 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4151 		if (ret) {
4152 			dev_err(&hdev->pdev->dev,
4153 				"Configure rss indir table fail,status = %d\n",
4154 				ret);
4155 			return ret;
4156 		}
4157 	}
4158 	return 0;
4159 }
4160 
4161 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4162 				 u16 *tc_size, u16 *tc_offset)
4163 {
4164 	struct hclge_rss_tc_mode_cmd *req;
4165 	struct hclge_desc desc;
4166 	int ret;
4167 	int i;
4168 
4169 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4170 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4171 
4172 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4173 		u16 mode = 0;
4174 
4175 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4176 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4177 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4178 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4179 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4180 
4181 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4182 	}
4183 
4184 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4185 	if (ret)
4186 		dev_err(&hdev->pdev->dev,
4187 			"Configure rss tc mode fail, status = %d\n", ret);
4188 
4189 	return ret;
4190 }
4191 
4192 static void hclge_get_rss_type(struct hclge_vport *vport)
4193 {
4194 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4195 	    vport->rss_tuple_sets.ipv4_udp_en ||
4196 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4197 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4198 	    vport->rss_tuple_sets.ipv6_udp_en ||
4199 	    vport->rss_tuple_sets.ipv6_sctp_en)
4200 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4201 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4202 		 vport->rss_tuple_sets.ipv6_fragment_en)
4203 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4204 	else
4205 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4206 }
4207 
4208 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4209 {
4210 	struct hclge_rss_input_tuple_cmd *req;
4211 	struct hclge_desc desc;
4212 	int ret;
4213 
4214 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4215 
4216 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4217 
4218 	/* Get the tuple cfg from pf */
4219 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4220 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4221 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4222 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4223 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4224 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4225 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4226 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4227 	hclge_get_rss_type(&hdev->vport[0]);
4228 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4229 	if (ret)
4230 		dev_err(&hdev->pdev->dev,
4231 			"Configure rss input fail, status = %d\n", ret);
4232 	return ret;
4233 }
4234 
4235 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4236 			 u8 *key, u8 *hfunc)
4237 {
4238 	struct hclge_vport *vport = hclge_get_vport(handle);
4239 	int i;
4240 
4241 	/* Get hash algorithm */
4242 	if (hfunc) {
4243 		switch (vport->rss_algo) {
4244 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4245 			*hfunc = ETH_RSS_HASH_TOP;
4246 			break;
4247 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4248 			*hfunc = ETH_RSS_HASH_XOR;
4249 			break;
4250 		default:
4251 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4252 			break;
4253 		}
4254 	}
4255 
4256 	/* Get the RSS Key required by the user */
4257 	if (key)
4258 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4259 
4260 	/* Get indirect table */
4261 	if (indir)
4262 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4263 			indir[i] =  vport->rss_indirection_tbl[i];
4264 
4265 	return 0;
4266 }
4267 
4268 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4269 			 const  u8 *key, const  u8 hfunc)
4270 {
4271 	struct hclge_vport *vport = hclge_get_vport(handle);
4272 	struct hclge_dev *hdev = vport->back;
4273 	u8 hash_algo;
4274 	int ret, i;
4275 
4276 	/* Set the RSS Hash Key if specififed by the user */
4277 	if (key) {
4278 		switch (hfunc) {
4279 		case ETH_RSS_HASH_TOP:
4280 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4281 			break;
4282 		case ETH_RSS_HASH_XOR:
4283 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4284 			break;
4285 		case ETH_RSS_HASH_NO_CHANGE:
4286 			hash_algo = vport->rss_algo;
4287 			break;
4288 		default:
4289 			return -EINVAL;
4290 		}
4291 
4292 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4293 		if (ret)
4294 			return ret;
4295 
4296 		/* Update the shadow RSS key with user specified qids */
4297 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4298 		vport->rss_algo = hash_algo;
4299 	}
4300 
4301 	/* Update the shadow RSS table with user specified qids */
4302 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4303 		vport->rss_indirection_tbl[i] = indir[i];
4304 
4305 	/* Update the hardware */
4306 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4307 }
4308 
4309 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4310 {
4311 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4312 
4313 	if (nfc->data & RXH_L4_B_2_3)
4314 		hash_sets |= HCLGE_D_PORT_BIT;
4315 	else
4316 		hash_sets &= ~HCLGE_D_PORT_BIT;
4317 
4318 	if (nfc->data & RXH_IP_SRC)
4319 		hash_sets |= HCLGE_S_IP_BIT;
4320 	else
4321 		hash_sets &= ~HCLGE_S_IP_BIT;
4322 
4323 	if (nfc->data & RXH_IP_DST)
4324 		hash_sets |= HCLGE_D_IP_BIT;
4325 	else
4326 		hash_sets &= ~HCLGE_D_IP_BIT;
4327 
4328 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4329 		hash_sets |= HCLGE_V_TAG_BIT;
4330 
4331 	return hash_sets;
4332 }
4333 
4334 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4335 			       struct ethtool_rxnfc *nfc)
4336 {
4337 	struct hclge_vport *vport = hclge_get_vport(handle);
4338 	struct hclge_dev *hdev = vport->back;
4339 	struct hclge_rss_input_tuple_cmd *req;
4340 	struct hclge_desc desc;
4341 	u8 tuple_sets;
4342 	int ret;
4343 
4344 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4345 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4346 		return -EINVAL;
4347 
4348 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4349 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4350 
4351 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4352 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4353 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4354 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4355 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4356 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4357 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4358 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4359 
4360 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4361 	switch (nfc->flow_type) {
4362 	case TCP_V4_FLOW:
4363 		req->ipv4_tcp_en = tuple_sets;
4364 		break;
4365 	case TCP_V6_FLOW:
4366 		req->ipv6_tcp_en = tuple_sets;
4367 		break;
4368 	case UDP_V4_FLOW:
4369 		req->ipv4_udp_en = tuple_sets;
4370 		break;
4371 	case UDP_V6_FLOW:
4372 		req->ipv6_udp_en = tuple_sets;
4373 		break;
4374 	case SCTP_V4_FLOW:
4375 		req->ipv4_sctp_en = tuple_sets;
4376 		break;
4377 	case SCTP_V6_FLOW:
4378 		if ((nfc->data & RXH_L4_B_0_1) ||
4379 		    (nfc->data & RXH_L4_B_2_3))
4380 			return -EINVAL;
4381 
4382 		req->ipv6_sctp_en = tuple_sets;
4383 		break;
4384 	case IPV4_FLOW:
4385 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4386 		break;
4387 	case IPV6_FLOW:
4388 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4389 		break;
4390 	default:
4391 		return -EINVAL;
4392 	}
4393 
4394 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4395 	if (ret) {
4396 		dev_err(&hdev->pdev->dev,
4397 			"Set rss tuple fail, status = %d\n", ret);
4398 		return ret;
4399 	}
4400 
4401 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4402 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4403 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4404 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4405 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4406 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4407 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4408 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4409 	hclge_get_rss_type(vport);
4410 	return 0;
4411 }
4412 
4413 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4414 			       struct ethtool_rxnfc *nfc)
4415 {
4416 	struct hclge_vport *vport = hclge_get_vport(handle);
4417 	u8 tuple_sets;
4418 
4419 	nfc->data = 0;
4420 
4421 	switch (nfc->flow_type) {
4422 	case TCP_V4_FLOW:
4423 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4424 		break;
4425 	case UDP_V4_FLOW:
4426 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4427 		break;
4428 	case TCP_V6_FLOW:
4429 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4430 		break;
4431 	case UDP_V6_FLOW:
4432 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4433 		break;
4434 	case SCTP_V4_FLOW:
4435 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4436 		break;
4437 	case SCTP_V6_FLOW:
4438 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4439 		break;
4440 	case IPV4_FLOW:
4441 	case IPV6_FLOW:
4442 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4443 		break;
4444 	default:
4445 		return -EINVAL;
4446 	}
4447 
4448 	if (!tuple_sets)
4449 		return 0;
4450 
4451 	if (tuple_sets & HCLGE_D_PORT_BIT)
4452 		nfc->data |= RXH_L4_B_2_3;
4453 	if (tuple_sets & HCLGE_S_PORT_BIT)
4454 		nfc->data |= RXH_L4_B_0_1;
4455 	if (tuple_sets & HCLGE_D_IP_BIT)
4456 		nfc->data |= RXH_IP_DST;
4457 	if (tuple_sets & HCLGE_S_IP_BIT)
4458 		nfc->data |= RXH_IP_SRC;
4459 
4460 	return 0;
4461 }
4462 
4463 static int hclge_get_tc_size(struct hnae3_handle *handle)
4464 {
4465 	struct hclge_vport *vport = hclge_get_vport(handle);
4466 	struct hclge_dev *hdev = vport->back;
4467 
4468 	return hdev->rss_size_max;
4469 }
4470 
4471 int hclge_rss_init_hw(struct hclge_dev *hdev)
4472 {
4473 	struct hclge_vport *vport = hdev->vport;
4474 	u8 *rss_indir = vport[0].rss_indirection_tbl;
4475 	u16 rss_size = vport[0].alloc_rss_size;
4476 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4477 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4478 	u8 *key = vport[0].rss_hash_key;
4479 	u8 hfunc = vport[0].rss_algo;
4480 	u16 tc_valid[HCLGE_MAX_TC_NUM];
4481 	u16 roundup_size;
4482 	unsigned int i;
4483 	int ret;
4484 
4485 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4486 	if (ret)
4487 		return ret;
4488 
4489 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4490 	if (ret)
4491 		return ret;
4492 
4493 	ret = hclge_set_rss_input_tuple(hdev);
4494 	if (ret)
4495 		return ret;
4496 
4497 	/* Each TC have the same queue size, and tc_size set to hardware is
4498 	 * the log2 of roundup power of two of rss_size, the acutal queue
4499 	 * size is limited by indirection table.
4500 	 */
4501 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4502 		dev_err(&hdev->pdev->dev,
4503 			"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4504 			rss_size);
4505 		return -EINVAL;
4506 	}
4507 
4508 	roundup_size = roundup_pow_of_two(rss_size);
4509 	roundup_size = ilog2(roundup_size);
4510 
4511 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4512 		tc_valid[i] = 0;
4513 
4514 		if (!(hdev->hw_tc_map & BIT(i)))
4515 			continue;
4516 
4517 		tc_valid[i] = 1;
4518 		tc_size[i] = roundup_size;
4519 		tc_offset[i] = rss_size * i;
4520 	}
4521 
4522 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4523 }
4524 
4525 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4526 {
4527 	struct hclge_vport *vport = hdev->vport;
4528 	int i, j;
4529 
4530 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4531 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4532 			vport[j].rss_indirection_tbl[i] =
4533 				i % vport[j].alloc_rss_size;
4534 	}
4535 }
4536 
4537 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4538 {
4539 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4540 	struct hclge_vport *vport = hdev->vport;
4541 
4542 	if (hdev->pdev->revision >= 0x21)
4543 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4544 
4545 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4546 		vport[i].rss_tuple_sets.ipv4_tcp_en =
4547 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4548 		vport[i].rss_tuple_sets.ipv4_udp_en =
4549 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4550 		vport[i].rss_tuple_sets.ipv4_sctp_en =
4551 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4552 		vport[i].rss_tuple_sets.ipv4_fragment_en =
4553 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4554 		vport[i].rss_tuple_sets.ipv6_tcp_en =
4555 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4556 		vport[i].rss_tuple_sets.ipv6_udp_en =
4557 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4558 		vport[i].rss_tuple_sets.ipv6_sctp_en =
4559 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4560 		vport[i].rss_tuple_sets.ipv6_fragment_en =
4561 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4562 
4563 		vport[i].rss_algo = rss_algo;
4564 
4565 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
4566 		       HCLGE_RSS_KEY_SIZE);
4567 	}
4568 
4569 	hclge_rss_indir_init_cfg(hdev);
4570 }
4571 
4572 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4573 				int vector_id, bool en,
4574 				struct hnae3_ring_chain_node *ring_chain)
4575 {
4576 	struct hclge_dev *hdev = vport->back;
4577 	struct hnae3_ring_chain_node *node;
4578 	struct hclge_desc desc;
4579 	struct hclge_ctrl_vector_chain_cmd *req =
4580 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
4581 	enum hclge_cmd_status status;
4582 	enum hclge_opcode_type op;
4583 	u16 tqp_type_and_id;
4584 	int i;
4585 
4586 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4587 	hclge_cmd_setup_basic_desc(&desc, op, false);
4588 	req->int_vector_id = vector_id;
4589 
4590 	i = 0;
4591 	for (node = ring_chain; node; node = node->next) {
4592 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4593 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4594 				HCLGE_INT_TYPE_S,
4595 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4596 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4597 				HCLGE_TQP_ID_S, node->tqp_index);
4598 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4599 				HCLGE_INT_GL_IDX_S,
4600 				hnae3_get_field(node->int_gl_idx,
4601 						HNAE3_RING_GL_IDX_M,
4602 						HNAE3_RING_GL_IDX_S));
4603 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4604 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4605 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4606 			req->vfid = vport->vport_id;
4607 
4608 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
4609 			if (status) {
4610 				dev_err(&hdev->pdev->dev,
4611 					"Map TQP fail, status is %d.\n",
4612 					status);
4613 				return -EIO;
4614 			}
4615 			i = 0;
4616 
4617 			hclge_cmd_setup_basic_desc(&desc,
4618 						   op,
4619 						   false);
4620 			req->int_vector_id = vector_id;
4621 		}
4622 	}
4623 
4624 	if (i > 0) {
4625 		req->int_cause_num = i;
4626 		req->vfid = vport->vport_id;
4627 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
4628 		if (status) {
4629 			dev_err(&hdev->pdev->dev,
4630 				"Map TQP fail, status is %d.\n", status);
4631 			return -EIO;
4632 		}
4633 	}
4634 
4635 	return 0;
4636 }
4637 
4638 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4639 				    struct hnae3_ring_chain_node *ring_chain)
4640 {
4641 	struct hclge_vport *vport = hclge_get_vport(handle);
4642 	struct hclge_dev *hdev = vport->back;
4643 	int vector_id;
4644 
4645 	vector_id = hclge_get_vector_index(hdev, vector);
4646 	if (vector_id < 0) {
4647 		dev_err(&hdev->pdev->dev,
4648 			"failed to get vector index. vector=%d\n", vector);
4649 		return vector_id;
4650 	}
4651 
4652 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4653 }
4654 
4655 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4656 				       struct hnae3_ring_chain_node *ring_chain)
4657 {
4658 	struct hclge_vport *vport = hclge_get_vport(handle);
4659 	struct hclge_dev *hdev = vport->back;
4660 	int vector_id, ret;
4661 
4662 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4663 		return 0;
4664 
4665 	vector_id = hclge_get_vector_index(hdev, vector);
4666 	if (vector_id < 0) {
4667 		dev_err(&handle->pdev->dev,
4668 			"Get vector index fail. ret =%d\n", vector_id);
4669 		return vector_id;
4670 	}
4671 
4672 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4673 	if (ret)
4674 		dev_err(&handle->pdev->dev,
4675 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4676 			vector_id, ret);
4677 
4678 	return ret;
4679 }
4680 
4681 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4682 				      struct hclge_promisc_param *param)
4683 {
4684 	struct hclge_promisc_cfg_cmd *req;
4685 	struct hclge_desc desc;
4686 	int ret;
4687 
4688 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4689 
4690 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
4691 	req->vf_id = param->vf_id;
4692 
4693 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4694 	 * pdev revision(0x20), new revision support them. The
4695 	 * value of this two fields will not return error when driver
4696 	 * send command to fireware in revision(0x20).
4697 	 */
4698 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4699 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4700 
4701 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4702 	if (ret)
4703 		dev_err(&hdev->pdev->dev,
4704 			"failed to set vport %d promisc mode, ret = %d.\n",
4705 			param->vf_id, ret);
4706 
4707 	return ret;
4708 }
4709 
4710 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4711 				     bool en_uc, bool en_mc, bool en_bc,
4712 				     int vport_id)
4713 {
4714 	if (!param)
4715 		return;
4716 
4717 	memset(param, 0, sizeof(struct hclge_promisc_param));
4718 	if (en_uc)
4719 		param->enable = HCLGE_PROMISC_EN_UC;
4720 	if (en_mc)
4721 		param->enable |= HCLGE_PROMISC_EN_MC;
4722 	if (en_bc)
4723 		param->enable |= HCLGE_PROMISC_EN_BC;
4724 	param->vf_id = vport_id;
4725 }
4726 
4727 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4728 				 bool en_mc_pmc, bool en_bc_pmc)
4729 {
4730 	struct hclge_dev *hdev = vport->back;
4731 	struct hclge_promisc_param param;
4732 
4733 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4734 				 vport->vport_id);
4735 	return hclge_cmd_set_promisc_mode(hdev, &param);
4736 }
4737 
4738 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4739 				  bool en_mc_pmc)
4740 {
4741 	struct hclge_vport *vport = hclge_get_vport(handle);
4742 	bool en_bc_pmc = true;
4743 
4744 	/* For revision 0x20, if broadcast promisc enabled, vlan filter is
4745 	 * always bypassed. So broadcast promisc should be disabled until
4746 	 * user enable promisc mode
4747 	 */
4748 	if (handle->pdev->revision == 0x20)
4749 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4750 
4751 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4752 					    en_bc_pmc);
4753 }
4754 
4755 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4756 {
4757 	struct hclge_vport *vport = hclge_get_vport(handle);
4758 	struct hclge_dev *hdev = vport->back;
4759 
4760 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4761 }
4762 
4763 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4764 {
4765 	struct hclge_get_fd_mode_cmd *req;
4766 	struct hclge_desc desc;
4767 	int ret;
4768 
4769 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4770 
4771 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
4772 
4773 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4774 	if (ret) {
4775 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4776 		return ret;
4777 	}
4778 
4779 	*fd_mode = req->mode;
4780 
4781 	return ret;
4782 }
4783 
4784 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4785 				   u32 *stage1_entry_num,
4786 				   u32 *stage2_entry_num,
4787 				   u16 *stage1_counter_num,
4788 				   u16 *stage2_counter_num)
4789 {
4790 	struct hclge_get_fd_allocation_cmd *req;
4791 	struct hclge_desc desc;
4792 	int ret;
4793 
4794 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4795 
4796 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4797 
4798 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4799 	if (ret) {
4800 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4801 			ret);
4802 		return ret;
4803 	}
4804 
4805 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4806 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4807 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4808 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4809 
4810 	return ret;
4811 }
4812 
4813 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4814 				   enum HCLGE_FD_STAGE stage_num)
4815 {
4816 	struct hclge_set_fd_key_config_cmd *req;
4817 	struct hclge_fd_key_cfg *stage;
4818 	struct hclge_desc desc;
4819 	int ret;
4820 
4821 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4822 
4823 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4824 	stage = &hdev->fd_cfg.key_cfg[stage_num];
4825 	req->stage = stage_num;
4826 	req->key_select = stage->key_sel;
4827 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4828 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4829 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4830 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4831 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4832 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4833 
4834 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4835 	if (ret)
4836 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4837 
4838 	return ret;
4839 }
4840 
4841 static int hclge_init_fd_config(struct hclge_dev *hdev)
4842 {
4843 #define LOW_2_WORDS		0x03
4844 	struct hclge_fd_key_cfg *key_cfg;
4845 	int ret;
4846 
4847 	if (!hnae3_dev_fd_supported(hdev))
4848 		return 0;
4849 
4850 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4851 	if (ret)
4852 		return ret;
4853 
4854 	switch (hdev->fd_cfg.fd_mode) {
4855 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4856 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4857 		break;
4858 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4859 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4860 		break;
4861 	default:
4862 		dev_err(&hdev->pdev->dev,
4863 			"Unsupported flow director mode %u\n",
4864 			hdev->fd_cfg.fd_mode);
4865 		return -EOPNOTSUPP;
4866 	}
4867 
4868 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4869 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4870 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4871 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4872 	key_cfg->outer_sipv6_word_en = 0;
4873 	key_cfg->outer_dipv6_word_en = 0;
4874 
4875 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4876 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4877 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4878 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4879 
4880 	/* If use max 400bit key, we can support tuples for ether type */
4881 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
4882 		key_cfg->tuple_active |=
4883 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4884 
4885 	/* roce_type is used to filter roce frames
4886 	 * dst_vport is used to specify the rule
4887 	 */
4888 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4889 
4890 	ret = hclge_get_fd_allocation(hdev,
4891 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4892 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4893 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4894 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4895 	if (ret)
4896 		return ret;
4897 
4898 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4899 }
4900 
4901 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4902 				int loc, u8 *key, bool is_add)
4903 {
4904 	struct hclge_fd_tcam_config_1_cmd *req1;
4905 	struct hclge_fd_tcam_config_2_cmd *req2;
4906 	struct hclge_fd_tcam_config_3_cmd *req3;
4907 	struct hclge_desc desc[3];
4908 	int ret;
4909 
4910 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4911 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4912 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4913 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4914 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4915 
4916 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4917 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4918 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4919 
4920 	req1->stage = stage;
4921 	req1->xy_sel = sel_x ? 1 : 0;
4922 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4923 	req1->index = cpu_to_le32(loc);
4924 	req1->entry_vld = sel_x ? is_add : 0;
4925 
4926 	if (key) {
4927 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4928 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4929 		       sizeof(req2->tcam_data));
4930 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4931 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4932 	}
4933 
4934 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
4935 	if (ret)
4936 		dev_err(&hdev->pdev->dev,
4937 			"config tcam key fail, ret=%d\n",
4938 			ret);
4939 
4940 	return ret;
4941 }
4942 
4943 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4944 			      struct hclge_fd_ad_data *action)
4945 {
4946 	struct hclge_fd_ad_config_cmd *req;
4947 	struct hclge_desc desc;
4948 	u64 ad_data = 0;
4949 	int ret;
4950 
4951 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4952 
4953 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
4954 	req->index = cpu_to_le32(loc);
4955 	req->stage = stage;
4956 
4957 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4958 		      action->write_rule_id_to_bd);
4959 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4960 			action->rule_id);
4961 	ad_data <<= 32;
4962 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4963 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4964 		      action->forward_to_direct_queue);
4965 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4966 			action->queue_id);
4967 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4968 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4969 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4970 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4971 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4972 			action->counter_id);
4973 
4974 	req->ad_data = cpu_to_le64(ad_data);
4975 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4976 	if (ret)
4977 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4978 
4979 	return ret;
4980 }
4981 
4982 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4983 				   struct hclge_fd_rule *rule)
4984 {
4985 	u16 tmp_x_s, tmp_y_s;
4986 	u32 tmp_x_l, tmp_y_l;
4987 	int i;
4988 
4989 	if (rule->unused_tuple & tuple_bit)
4990 		return true;
4991 
4992 	switch (tuple_bit) {
4993 	case BIT(INNER_DST_MAC):
4994 		for (i = 0; i < ETH_ALEN; i++) {
4995 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4996 			       rule->tuples_mask.dst_mac[i]);
4997 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4998 			       rule->tuples_mask.dst_mac[i]);
4999 		}
5000 
5001 		return true;
5002 	case BIT(INNER_SRC_MAC):
5003 		for (i = 0; i < ETH_ALEN; i++) {
5004 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5005 			       rule->tuples.src_mac[i]);
5006 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5007 			       rule->tuples.src_mac[i]);
5008 		}
5009 
5010 		return true;
5011 	case BIT(INNER_VLAN_TAG_FST):
5012 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5013 		       rule->tuples_mask.vlan_tag1);
5014 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5015 		       rule->tuples_mask.vlan_tag1);
5016 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5017 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5018 
5019 		return true;
5020 	case BIT(INNER_ETH_TYPE):
5021 		calc_x(tmp_x_s, rule->tuples.ether_proto,
5022 		       rule->tuples_mask.ether_proto);
5023 		calc_y(tmp_y_s, rule->tuples.ether_proto,
5024 		       rule->tuples_mask.ether_proto);
5025 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5026 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5027 
5028 		return true;
5029 	case BIT(INNER_IP_TOS):
5030 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5031 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5032 
5033 		return true;
5034 	case BIT(INNER_IP_PROTO):
5035 		calc_x(*key_x, rule->tuples.ip_proto,
5036 		       rule->tuples_mask.ip_proto);
5037 		calc_y(*key_y, rule->tuples.ip_proto,
5038 		       rule->tuples_mask.ip_proto);
5039 
5040 		return true;
5041 	case BIT(INNER_SRC_IP):
5042 		calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5043 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5044 		calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5045 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5046 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5047 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5048 
5049 		return true;
5050 	case BIT(INNER_DST_IP):
5051 		calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5052 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5053 		calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5054 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5055 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5056 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5057 
5058 		return true;
5059 	case BIT(INNER_SRC_PORT):
5060 		calc_x(tmp_x_s, rule->tuples.src_port,
5061 		       rule->tuples_mask.src_port);
5062 		calc_y(tmp_y_s, rule->tuples.src_port,
5063 		       rule->tuples_mask.src_port);
5064 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5065 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5066 
5067 		return true;
5068 	case BIT(INNER_DST_PORT):
5069 		calc_x(tmp_x_s, rule->tuples.dst_port,
5070 		       rule->tuples_mask.dst_port);
5071 		calc_y(tmp_y_s, rule->tuples.dst_port,
5072 		       rule->tuples_mask.dst_port);
5073 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5074 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5075 
5076 		return true;
5077 	default:
5078 		return false;
5079 	}
5080 }
5081 
5082 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5083 				 u8 vf_id, u8 network_port_id)
5084 {
5085 	u32 port_number = 0;
5086 
5087 	if (port_type == HOST_PORT) {
5088 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5089 				pf_id);
5090 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5091 				vf_id);
5092 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5093 	} else {
5094 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5095 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5096 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5097 	}
5098 
5099 	return port_number;
5100 }
5101 
5102 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5103 				       __le32 *key_x, __le32 *key_y,
5104 				       struct hclge_fd_rule *rule)
5105 {
5106 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5107 	u8 cur_pos = 0, tuple_size, shift_bits;
5108 	unsigned int i;
5109 
5110 	for (i = 0; i < MAX_META_DATA; i++) {
5111 		tuple_size = meta_data_key_info[i].key_length;
5112 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5113 
5114 		switch (tuple_bit) {
5115 		case BIT(ROCE_TYPE):
5116 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5117 			cur_pos += tuple_size;
5118 			break;
5119 		case BIT(DST_VPORT):
5120 			port_number = hclge_get_port_number(HOST_PORT, 0,
5121 							    rule->vf_id, 0);
5122 			hnae3_set_field(meta_data,
5123 					GENMASK(cur_pos + tuple_size, cur_pos),
5124 					cur_pos, port_number);
5125 			cur_pos += tuple_size;
5126 			break;
5127 		default:
5128 			break;
5129 		}
5130 	}
5131 
5132 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5133 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5134 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5135 
5136 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5137 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5138 }
5139 
5140 /* A complete key is combined with meta data key and tuple key.
5141  * Meta data key is stored at the MSB region, and tuple key is stored at
5142  * the LSB region, unused bits will be filled 0.
5143  */
5144 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5145 			    struct hclge_fd_rule *rule)
5146 {
5147 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5148 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5149 	u8 *cur_key_x, *cur_key_y;
5150 	u8 meta_data_region;
5151 	u8 tuple_size;
5152 	int ret;
5153 	u32 i;
5154 
5155 	memset(key_x, 0, sizeof(key_x));
5156 	memset(key_y, 0, sizeof(key_y));
5157 	cur_key_x = key_x;
5158 	cur_key_y = key_y;
5159 
5160 	for (i = 0 ; i < MAX_TUPLE; i++) {
5161 		bool tuple_valid;
5162 		u32 check_tuple;
5163 
5164 		tuple_size = tuple_key_info[i].key_length / 8;
5165 		check_tuple = key_cfg->tuple_active & BIT(i);
5166 
5167 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5168 						     cur_key_y, rule);
5169 		if (tuple_valid) {
5170 			cur_key_x += tuple_size;
5171 			cur_key_y += tuple_size;
5172 		}
5173 	}
5174 
5175 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5176 			MAX_META_DATA_LENGTH / 8;
5177 
5178 	hclge_fd_convert_meta_data(key_cfg,
5179 				   (__le32 *)(key_x + meta_data_region),
5180 				   (__le32 *)(key_y + meta_data_region),
5181 				   rule);
5182 
5183 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5184 				   true);
5185 	if (ret) {
5186 		dev_err(&hdev->pdev->dev,
5187 			"fd key_y config fail, loc=%u, ret=%d\n",
5188 			rule->queue_id, ret);
5189 		return ret;
5190 	}
5191 
5192 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5193 				   true);
5194 	if (ret)
5195 		dev_err(&hdev->pdev->dev,
5196 			"fd key_x config fail, loc=%u, ret=%d\n",
5197 			rule->queue_id, ret);
5198 	return ret;
5199 }
5200 
5201 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5202 			       struct hclge_fd_rule *rule)
5203 {
5204 	struct hclge_fd_ad_data ad_data;
5205 
5206 	ad_data.ad_id = rule->location;
5207 
5208 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5209 		ad_data.drop_packet = true;
5210 		ad_data.forward_to_direct_queue = false;
5211 		ad_data.queue_id = 0;
5212 	} else {
5213 		ad_data.drop_packet = false;
5214 		ad_data.forward_to_direct_queue = true;
5215 		ad_data.queue_id = rule->queue_id;
5216 	}
5217 
5218 	ad_data.use_counter = false;
5219 	ad_data.counter_id = 0;
5220 
5221 	ad_data.use_next_stage = false;
5222 	ad_data.next_input_key = 0;
5223 
5224 	ad_data.write_rule_id_to_bd = true;
5225 	ad_data.rule_id = rule->location;
5226 
5227 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5228 }
5229 
5230 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5231 				       u32 *unused_tuple)
5232 {
5233 	if (!spec || !unused_tuple)
5234 		return -EINVAL;
5235 
5236 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5237 
5238 	if (!spec->ip4src)
5239 		*unused_tuple |= BIT(INNER_SRC_IP);
5240 
5241 	if (!spec->ip4dst)
5242 		*unused_tuple |= BIT(INNER_DST_IP);
5243 
5244 	if (!spec->psrc)
5245 		*unused_tuple |= BIT(INNER_SRC_PORT);
5246 
5247 	if (!spec->pdst)
5248 		*unused_tuple |= BIT(INNER_DST_PORT);
5249 
5250 	if (!spec->tos)
5251 		*unused_tuple |= BIT(INNER_IP_TOS);
5252 
5253 	return 0;
5254 }
5255 
5256 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5257 				    u32 *unused_tuple)
5258 {
5259 	if (!spec || !unused_tuple)
5260 		return -EINVAL;
5261 
5262 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5263 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5264 
5265 	if (!spec->ip4src)
5266 		*unused_tuple |= BIT(INNER_SRC_IP);
5267 
5268 	if (!spec->ip4dst)
5269 		*unused_tuple |= BIT(INNER_DST_IP);
5270 
5271 	if (!spec->tos)
5272 		*unused_tuple |= BIT(INNER_IP_TOS);
5273 
5274 	if (!spec->proto)
5275 		*unused_tuple |= BIT(INNER_IP_PROTO);
5276 
5277 	if (spec->l4_4_bytes)
5278 		return -EOPNOTSUPP;
5279 
5280 	if (spec->ip_ver != ETH_RX_NFC_IP4)
5281 		return -EOPNOTSUPP;
5282 
5283 	return 0;
5284 }
5285 
5286 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5287 				       u32 *unused_tuple)
5288 {
5289 	if (!spec || !unused_tuple)
5290 		return -EINVAL;
5291 
5292 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5293 		BIT(INNER_IP_TOS);
5294 
5295 	/* check whether src/dst ip address used */
5296 	if (!spec->ip6src[0] && !spec->ip6src[1] &&
5297 	    !spec->ip6src[2] && !spec->ip6src[3])
5298 		*unused_tuple |= BIT(INNER_SRC_IP);
5299 
5300 	if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5301 	    !spec->ip6dst[2] && !spec->ip6dst[3])
5302 		*unused_tuple |= BIT(INNER_DST_IP);
5303 
5304 	if (!spec->psrc)
5305 		*unused_tuple |= BIT(INNER_SRC_PORT);
5306 
5307 	if (!spec->pdst)
5308 		*unused_tuple |= BIT(INNER_DST_PORT);
5309 
5310 	if (spec->tclass)
5311 		return -EOPNOTSUPP;
5312 
5313 	return 0;
5314 }
5315 
5316 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5317 				    u32 *unused_tuple)
5318 {
5319 	if (!spec || !unused_tuple)
5320 		return -EINVAL;
5321 
5322 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5323 		BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5324 
5325 	/* check whether src/dst ip address used */
5326 	if (!spec->ip6src[0] && !spec->ip6src[1] &&
5327 	    !spec->ip6src[2] && !spec->ip6src[3])
5328 		*unused_tuple |= BIT(INNER_SRC_IP);
5329 
5330 	if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5331 	    !spec->ip6dst[2] && !spec->ip6dst[3])
5332 		*unused_tuple |= BIT(INNER_DST_IP);
5333 
5334 	if (!spec->l4_proto)
5335 		*unused_tuple |= BIT(INNER_IP_PROTO);
5336 
5337 	if (spec->tclass)
5338 		return -EOPNOTSUPP;
5339 
5340 	if (spec->l4_4_bytes)
5341 		return -EOPNOTSUPP;
5342 
5343 	return 0;
5344 }
5345 
5346 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5347 {
5348 	if (!spec || !unused_tuple)
5349 		return -EINVAL;
5350 
5351 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5352 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5353 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5354 
5355 	if (is_zero_ether_addr(spec->h_source))
5356 		*unused_tuple |= BIT(INNER_SRC_MAC);
5357 
5358 	if (is_zero_ether_addr(spec->h_dest))
5359 		*unused_tuple |= BIT(INNER_DST_MAC);
5360 
5361 	if (!spec->h_proto)
5362 		*unused_tuple |= BIT(INNER_ETH_TYPE);
5363 
5364 	return 0;
5365 }
5366 
5367 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5368 				    struct ethtool_rx_flow_spec *fs,
5369 				    u32 *unused_tuple)
5370 {
5371 	if (fs->flow_type & FLOW_EXT) {
5372 		if (fs->h_ext.vlan_etype) {
5373 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5374 			return -EOPNOTSUPP;
5375 		}
5376 
5377 		if (!fs->h_ext.vlan_tci)
5378 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5379 
5380 		if (fs->m_ext.vlan_tci &&
5381 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5382 			dev_err(&hdev->pdev->dev,
5383 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5384 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5385 			return -EINVAL;
5386 		}
5387 	} else {
5388 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5389 	}
5390 
5391 	if (fs->flow_type & FLOW_MAC_EXT) {
5392 		if (hdev->fd_cfg.fd_mode !=
5393 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5394 			dev_err(&hdev->pdev->dev,
5395 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
5396 			return -EOPNOTSUPP;
5397 		}
5398 
5399 		if (is_zero_ether_addr(fs->h_ext.h_dest))
5400 			*unused_tuple |= BIT(INNER_DST_MAC);
5401 		else
5402 			*unused_tuple &= ~BIT(INNER_DST_MAC);
5403 	}
5404 
5405 	return 0;
5406 }
5407 
5408 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5409 			       struct ethtool_rx_flow_spec *fs,
5410 			       u32 *unused_tuple)
5411 {
5412 	u32 flow_type;
5413 	int ret;
5414 
5415 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5416 		dev_err(&hdev->pdev->dev,
5417 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
5418 			fs->location,
5419 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5420 		return -EINVAL;
5421 	}
5422 
5423 	if ((fs->flow_type & FLOW_EXT) &&
5424 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5425 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5426 		return -EOPNOTSUPP;
5427 	}
5428 
5429 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5430 	switch (flow_type) {
5431 	case SCTP_V4_FLOW:
5432 	case TCP_V4_FLOW:
5433 	case UDP_V4_FLOW:
5434 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5435 						  unused_tuple);
5436 		break;
5437 	case IP_USER_FLOW:
5438 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5439 					       unused_tuple);
5440 		break;
5441 	case SCTP_V6_FLOW:
5442 	case TCP_V6_FLOW:
5443 	case UDP_V6_FLOW:
5444 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5445 						  unused_tuple);
5446 		break;
5447 	case IPV6_USER_FLOW:
5448 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5449 					       unused_tuple);
5450 		break;
5451 	case ETHER_FLOW:
5452 		if (hdev->fd_cfg.fd_mode !=
5453 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5454 			dev_err(&hdev->pdev->dev,
5455 				"ETHER_FLOW is not supported in current fd mode!\n");
5456 			return -EOPNOTSUPP;
5457 		}
5458 
5459 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5460 						 unused_tuple);
5461 		break;
5462 	default:
5463 		dev_err(&hdev->pdev->dev,
5464 			"unsupported protocol type, protocol type = %#x\n",
5465 			flow_type);
5466 		return -EOPNOTSUPP;
5467 	}
5468 
5469 	if (ret) {
5470 		dev_err(&hdev->pdev->dev,
5471 			"failed to check flow union tuple, ret = %d\n",
5472 			ret);
5473 		return ret;
5474 	}
5475 
5476 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5477 }
5478 
5479 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5480 {
5481 	struct hclge_fd_rule *rule = NULL;
5482 	struct hlist_node *node2;
5483 
5484 	spin_lock_bh(&hdev->fd_rule_lock);
5485 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5486 		if (rule->location >= location)
5487 			break;
5488 	}
5489 
5490 	spin_unlock_bh(&hdev->fd_rule_lock);
5491 
5492 	return  rule && rule->location == location;
5493 }
5494 
5495 /* make sure being called after lock up with fd_rule_lock */
5496 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5497 				     struct hclge_fd_rule *new_rule,
5498 				     u16 location,
5499 				     bool is_add)
5500 {
5501 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
5502 	struct hlist_node *node2;
5503 
5504 	if (is_add && !new_rule)
5505 		return -EINVAL;
5506 
5507 	hlist_for_each_entry_safe(rule, node2,
5508 				  &hdev->fd_rule_list, rule_node) {
5509 		if (rule->location >= location)
5510 			break;
5511 		parent = rule;
5512 	}
5513 
5514 	if (rule && rule->location == location) {
5515 		hlist_del(&rule->rule_node);
5516 		kfree(rule);
5517 		hdev->hclge_fd_rule_num--;
5518 
5519 		if (!is_add) {
5520 			if (!hdev->hclge_fd_rule_num)
5521 				hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5522 			clear_bit(location, hdev->fd_bmap);
5523 
5524 			return 0;
5525 		}
5526 	} else if (!is_add) {
5527 		dev_err(&hdev->pdev->dev,
5528 			"delete fail, rule %u is inexistent\n",
5529 			location);
5530 		return -EINVAL;
5531 	}
5532 
5533 	INIT_HLIST_NODE(&new_rule->rule_node);
5534 
5535 	if (parent)
5536 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5537 	else
5538 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5539 
5540 	set_bit(location, hdev->fd_bmap);
5541 	hdev->hclge_fd_rule_num++;
5542 	hdev->fd_active_type = new_rule->rule_type;
5543 
5544 	return 0;
5545 }
5546 
5547 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5548 			      struct ethtool_rx_flow_spec *fs,
5549 			      struct hclge_fd_rule *rule)
5550 {
5551 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5552 
5553 	switch (flow_type) {
5554 	case SCTP_V4_FLOW:
5555 	case TCP_V4_FLOW:
5556 	case UDP_V4_FLOW:
5557 		rule->tuples.src_ip[IPV4_INDEX] =
5558 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5559 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5560 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5561 
5562 		rule->tuples.dst_ip[IPV4_INDEX] =
5563 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5564 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5565 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5566 
5567 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5568 		rule->tuples_mask.src_port =
5569 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5570 
5571 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5572 		rule->tuples_mask.dst_port =
5573 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5574 
5575 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5576 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5577 
5578 		rule->tuples.ether_proto = ETH_P_IP;
5579 		rule->tuples_mask.ether_proto = 0xFFFF;
5580 
5581 		break;
5582 	case IP_USER_FLOW:
5583 		rule->tuples.src_ip[IPV4_INDEX] =
5584 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5585 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5586 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5587 
5588 		rule->tuples.dst_ip[IPV4_INDEX] =
5589 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5590 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5591 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5592 
5593 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5594 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5595 
5596 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5597 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5598 
5599 		rule->tuples.ether_proto = ETH_P_IP;
5600 		rule->tuples_mask.ether_proto = 0xFFFF;
5601 
5602 		break;
5603 	case SCTP_V6_FLOW:
5604 	case TCP_V6_FLOW:
5605 	case UDP_V6_FLOW:
5606 		be32_to_cpu_array(rule->tuples.src_ip,
5607 				  fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5608 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5609 				  fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5610 
5611 		be32_to_cpu_array(rule->tuples.dst_ip,
5612 				  fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5613 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5614 				  fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5615 
5616 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5617 		rule->tuples_mask.src_port =
5618 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5619 
5620 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5621 		rule->tuples_mask.dst_port =
5622 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5623 
5624 		rule->tuples.ether_proto = ETH_P_IPV6;
5625 		rule->tuples_mask.ether_proto = 0xFFFF;
5626 
5627 		break;
5628 	case IPV6_USER_FLOW:
5629 		be32_to_cpu_array(rule->tuples.src_ip,
5630 				  fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5631 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5632 				  fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5633 
5634 		be32_to_cpu_array(rule->tuples.dst_ip,
5635 				  fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5636 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5637 				  fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5638 
5639 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5640 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5641 
5642 		rule->tuples.ether_proto = ETH_P_IPV6;
5643 		rule->tuples_mask.ether_proto = 0xFFFF;
5644 
5645 		break;
5646 	case ETHER_FLOW:
5647 		ether_addr_copy(rule->tuples.src_mac,
5648 				fs->h_u.ether_spec.h_source);
5649 		ether_addr_copy(rule->tuples_mask.src_mac,
5650 				fs->m_u.ether_spec.h_source);
5651 
5652 		ether_addr_copy(rule->tuples.dst_mac,
5653 				fs->h_u.ether_spec.h_dest);
5654 		ether_addr_copy(rule->tuples_mask.dst_mac,
5655 				fs->m_u.ether_spec.h_dest);
5656 
5657 		rule->tuples.ether_proto =
5658 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
5659 		rule->tuples_mask.ether_proto =
5660 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
5661 
5662 		break;
5663 	default:
5664 		return -EOPNOTSUPP;
5665 	}
5666 
5667 	switch (flow_type) {
5668 	case SCTP_V4_FLOW:
5669 	case SCTP_V6_FLOW:
5670 		rule->tuples.ip_proto = IPPROTO_SCTP;
5671 		rule->tuples_mask.ip_proto = 0xFF;
5672 		break;
5673 	case TCP_V4_FLOW:
5674 	case TCP_V6_FLOW:
5675 		rule->tuples.ip_proto = IPPROTO_TCP;
5676 		rule->tuples_mask.ip_proto = 0xFF;
5677 		break;
5678 	case UDP_V4_FLOW:
5679 	case UDP_V6_FLOW:
5680 		rule->tuples.ip_proto = IPPROTO_UDP;
5681 		rule->tuples_mask.ip_proto = 0xFF;
5682 		break;
5683 	default:
5684 		break;
5685 	}
5686 
5687 	if (fs->flow_type & FLOW_EXT) {
5688 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5689 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5690 	}
5691 
5692 	if (fs->flow_type & FLOW_MAC_EXT) {
5693 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5694 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5695 	}
5696 
5697 	return 0;
5698 }
5699 
5700 /* make sure being called after lock up with fd_rule_lock */
5701 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5702 				struct hclge_fd_rule *rule)
5703 {
5704 	int ret;
5705 
5706 	if (!rule) {
5707 		dev_err(&hdev->pdev->dev,
5708 			"The flow director rule is NULL\n");
5709 		return -EINVAL;
5710 	}
5711 
5712 	/* it will never fail here, so needn't to check return value */
5713 	hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5714 
5715 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5716 	if (ret)
5717 		goto clear_rule;
5718 
5719 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5720 	if (ret)
5721 		goto clear_rule;
5722 
5723 	return 0;
5724 
5725 clear_rule:
5726 	hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5727 	return ret;
5728 }
5729 
5730 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5731 			      struct ethtool_rxnfc *cmd)
5732 {
5733 	struct hclge_vport *vport = hclge_get_vport(handle);
5734 	struct hclge_dev *hdev = vport->back;
5735 	u16 dst_vport_id = 0, q_index = 0;
5736 	struct ethtool_rx_flow_spec *fs;
5737 	struct hclge_fd_rule *rule;
5738 	u32 unused = 0;
5739 	u8 action;
5740 	int ret;
5741 
5742 	if (!hnae3_dev_fd_supported(hdev)) {
5743 		dev_err(&hdev->pdev->dev,
5744 			"flow table director is not supported\n");
5745 		return -EOPNOTSUPP;
5746 	}
5747 
5748 	if (!hdev->fd_en) {
5749 		dev_err(&hdev->pdev->dev,
5750 			"please enable flow director first\n");
5751 		return -EOPNOTSUPP;
5752 	}
5753 
5754 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5755 
5756 	ret = hclge_fd_check_spec(hdev, fs, &unused);
5757 	if (ret)
5758 		return ret;
5759 
5760 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5761 		action = HCLGE_FD_ACTION_DROP_PACKET;
5762 	} else {
5763 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5764 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5765 		u16 tqps;
5766 
5767 		if (vf > hdev->num_req_vfs) {
5768 			dev_err(&hdev->pdev->dev,
5769 				"Error: vf id (%u) > max vf num (%u)\n",
5770 				vf, hdev->num_req_vfs);
5771 			return -EINVAL;
5772 		}
5773 
5774 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5775 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5776 
5777 		if (ring >= tqps) {
5778 			dev_err(&hdev->pdev->dev,
5779 				"Error: queue id (%u) > max tqp num (%u)\n",
5780 				ring, tqps - 1);
5781 			return -EINVAL;
5782 		}
5783 
5784 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5785 		q_index = ring;
5786 	}
5787 
5788 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5789 	if (!rule)
5790 		return -ENOMEM;
5791 
5792 	ret = hclge_fd_get_tuple(hdev, fs, rule);
5793 	if (ret) {
5794 		kfree(rule);
5795 		return ret;
5796 	}
5797 
5798 	rule->flow_type = fs->flow_type;
5799 	rule->location = fs->location;
5800 	rule->unused_tuple = unused;
5801 	rule->vf_id = dst_vport_id;
5802 	rule->queue_id = q_index;
5803 	rule->action = action;
5804 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
5805 
5806 	/* to avoid rule conflict, when user configure rule by ethtool,
5807 	 * we need to clear all arfs rules
5808 	 */
5809 	spin_lock_bh(&hdev->fd_rule_lock);
5810 	hclge_clear_arfs_rules(handle);
5811 
5812 	ret = hclge_fd_config_rule(hdev, rule);
5813 
5814 	spin_unlock_bh(&hdev->fd_rule_lock);
5815 
5816 	return ret;
5817 }
5818 
5819 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5820 			      struct ethtool_rxnfc *cmd)
5821 {
5822 	struct hclge_vport *vport = hclge_get_vport(handle);
5823 	struct hclge_dev *hdev = vport->back;
5824 	struct ethtool_rx_flow_spec *fs;
5825 	int ret;
5826 
5827 	if (!hnae3_dev_fd_supported(hdev))
5828 		return -EOPNOTSUPP;
5829 
5830 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5831 
5832 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5833 		return -EINVAL;
5834 
5835 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
5836 		dev_err(&hdev->pdev->dev,
5837 			"Delete fail, rule %u is inexistent\n", fs->location);
5838 		return -ENOENT;
5839 	}
5840 
5841 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5842 				   NULL, false);
5843 	if (ret)
5844 		return ret;
5845 
5846 	spin_lock_bh(&hdev->fd_rule_lock);
5847 	ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5848 
5849 	spin_unlock_bh(&hdev->fd_rule_lock);
5850 
5851 	return ret;
5852 }
5853 
5854 /* make sure being called after lock up with fd_rule_lock */
5855 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5856 				     bool clear_list)
5857 {
5858 	struct hclge_vport *vport = hclge_get_vport(handle);
5859 	struct hclge_dev *hdev = vport->back;
5860 	struct hclge_fd_rule *rule;
5861 	struct hlist_node *node;
5862 	u16 location;
5863 
5864 	if (!hnae3_dev_fd_supported(hdev))
5865 		return;
5866 
5867 	for_each_set_bit(location, hdev->fd_bmap,
5868 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5869 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5870 				     NULL, false);
5871 
5872 	if (clear_list) {
5873 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5874 					  rule_node) {
5875 			hlist_del(&rule->rule_node);
5876 			kfree(rule);
5877 		}
5878 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5879 		hdev->hclge_fd_rule_num = 0;
5880 		bitmap_zero(hdev->fd_bmap,
5881 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5882 	}
5883 }
5884 
5885 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5886 {
5887 	struct hclge_vport *vport = hclge_get_vport(handle);
5888 	struct hclge_dev *hdev = vport->back;
5889 	struct hclge_fd_rule *rule;
5890 	struct hlist_node *node;
5891 	int ret;
5892 
5893 	/* Return ok here, because reset error handling will check this
5894 	 * return value. If error is returned here, the reset process will
5895 	 * fail.
5896 	 */
5897 	if (!hnae3_dev_fd_supported(hdev))
5898 		return 0;
5899 
5900 	/* if fd is disabled, should not restore it when reset */
5901 	if (!hdev->fd_en)
5902 		return 0;
5903 
5904 	spin_lock_bh(&hdev->fd_rule_lock);
5905 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5906 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5907 		if (!ret)
5908 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5909 
5910 		if (ret) {
5911 			dev_warn(&hdev->pdev->dev,
5912 				 "Restore rule %u failed, remove it\n",
5913 				 rule->location);
5914 			clear_bit(rule->location, hdev->fd_bmap);
5915 			hlist_del(&rule->rule_node);
5916 			kfree(rule);
5917 			hdev->hclge_fd_rule_num--;
5918 		}
5919 	}
5920 
5921 	if (hdev->hclge_fd_rule_num)
5922 		hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5923 
5924 	spin_unlock_bh(&hdev->fd_rule_lock);
5925 
5926 	return 0;
5927 }
5928 
5929 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5930 				 struct ethtool_rxnfc *cmd)
5931 {
5932 	struct hclge_vport *vport = hclge_get_vport(handle);
5933 	struct hclge_dev *hdev = vport->back;
5934 
5935 	if (!hnae3_dev_fd_supported(hdev))
5936 		return -EOPNOTSUPP;
5937 
5938 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
5939 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5940 
5941 	return 0;
5942 }
5943 
5944 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
5945 				     struct ethtool_tcpip4_spec *spec,
5946 				     struct ethtool_tcpip4_spec *spec_mask)
5947 {
5948 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5949 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5950 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5951 
5952 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5953 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5954 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5955 
5956 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
5957 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5958 			0 : cpu_to_be16(rule->tuples_mask.src_port);
5959 
5960 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
5961 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
5962 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
5963 
5964 	spec->tos = rule->tuples.ip_tos;
5965 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
5966 			0 : rule->tuples_mask.ip_tos;
5967 }
5968 
5969 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
5970 				  struct ethtool_usrip4_spec *spec,
5971 				  struct ethtool_usrip4_spec *spec_mask)
5972 {
5973 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5974 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5975 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5976 
5977 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5978 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5979 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5980 
5981 	spec->tos = rule->tuples.ip_tos;
5982 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
5983 			0 : rule->tuples_mask.ip_tos;
5984 
5985 	spec->proto = rule->tuples.ip_proto;
5986 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5987 			0 : rule->tuples_mask.ip_proto;
5988 
5989 	spec->ip_ver = ETH_RX_NFC_IP4;
5990 }
5991 
5992 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
5993 				     struct ethtool_tcpip6_spec *spec,
5994 				     struct ethtool_tcpip6_spec *spec_mask)
5995 {
5996 	cpu_to_be32_array(spec->ip6src,
5997 			  rule->tuples.src_ip, IPV6_SIZE);
5998 	cpu_to_be32_array(spec->ip6dst,
5999 			  rule->tuples.dst_ip, IPV6_SIZE);
6000 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6001 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6002 	else
6003 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6004 				  IPV6_SIZE);
6005 
6006 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6007 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6008 	else
6009 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6010 				  IPV6_SIZE);
6011 
6012 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6013 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6014 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6015 
6016 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6017 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6018 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6019 }
6020 
6021 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6022 				  struct ethtool_usrip6_spec *spec,
6023 				  struct ethtool_usrip6_spec *spec_mask)
6024 {
6025 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6026 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6027 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6028 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6029 	else
6030 		cpu_to_be32_array(spec_mask->ip6src,
6031 				  rule->tuples_mask.src_ip, IPV6_SIZE);
6032 
6033 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6034 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6035 	else
6036 		cpu_to_be32_array(spec_mask->ip6dst,
6037 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
6038 
6039 	spec->l4_proto = rule->tuples.ip_proto;
6040 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6041 			0 : rule->tuples_mask.ip_proto;
6042 }
6043 
6044 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6045 				    struct ethhdr *spec,
6046 				    struct ethhdr *spec_mask)
6047 {
6048 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6049 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6050 
6051 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6052 		eth_zero_addr(spec_mask->h_source);
6053 	else
6054 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6055 
6056 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
6057 		eth_zero_addr(spec_mask->h_dest);
6058 	else
6059 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6060 
6061 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6062 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6063 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6064 }
6065 
6066 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6067 				  struct hclge_fd_rule *rule)
6068 {
6069 	if (fs->flow_type & FLOW_EXT) {
6070 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6071 		fs->m_ext.vlan_tci =
6072 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6073 				cpu_to_be16(VLAN_VID_MASK) :
6074 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
6075 	}
6076 
6077 	if (fs->flow_type & FLOW_MAC_EXT) {
6078 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6079 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6080 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6081 		else
6082 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6083 					rule->tuples_mask.dst_mac);
6084 	}
6085 }
6086 
6087 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6088 				  struct ethtool_rxnfc *cmd)
6089 {
6090 	struct hclge_vport *vport = hclge_get_vport(handle);
6091 	struct hclge_fd_rule *rule = NULL;
6092 	struct hclge_dev *hdev = vport->back;
6093 	struct ethtool_rx_flow_spec *fs;
6094 	struct hlist_node *node2;
6095 
6096 	if (!hnae3_dev_fd_supported(hdev))
6097 		return -EOPNOTSUPP;
6098 
6099 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6100 
6101 	spin_lock_bh(&hdev->fd_rule_lock);
6102 
6103 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6104 		if (rule->location >= fs->location)
6105 			break;
6106 	}
6107 
6108 	if (!rule || fs->location != rule->location) {
6109 		spin_unlock_bh(&hdev->fd_rule_lock);
6110 
6111 		return -ENOENT;
6112 	}
6113 
6114 	fs->flow_type = rule->flow_type;
6115 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6116 	case SCTP_V4_FLOW:
6117 	case TCP_V4_FLOW:
6118 	case UDP_V4_FLOW:
6119 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6120 					 &fs->m_u.tcp_ip4_spec);
6121 		break;
6122 	case IP_USER_FLOW:
6123 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6124 				      &fs->m_u.usr_ip4_spec);
6125 		break;
6126 	case SCTP_V6_FLOW:
6127 	case TCP_V6_FLOW:
6128 	case UDP_V6_FLOW:
6129 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6130 					 &fs->m_u.tcp_ip6_spec);
6131 		break;
6132 	case IPV6_USER_FLOW:
6133 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6134 				      &fs->m_u.usr_ip6_spec);
6135 		break;
6136 	/* The flow type of fd rule has been checked before adding in to rule
6137 	 * list. As other flow types have been handled, it must be ETHER_FLOW
6138 	 * for the default case
6139 	 */
6140 	default:
6141 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6142 					&fs->m_u.ether_spec);
6143 		break;
6144 	}
6145 
6146 	hclge_fd_get_ext_info(fs, rule);
6147 
6148 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6149 		fs->ring_cookie = RX_CLS_FLOW_DISC;
6150 	} else {
6151 		u64 vf_id;
6152 
6153 		fs->ring_cookie = rule->queue_id;
6154 		vf_id = rule->vf_id;
6155 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6156 		fs->ring_cookie |= vf_id;
6157 	}
6158 
6159 	spin_unlock_bh(&hdev->fd_rule_lock);
6160 
6161 	return 0;
6162 }
6163 
6164 static int hclge_get_all_rules(struct hnae3_handle *handle,
6165 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
6166 {
6167 	struct hclge_vport *vport = hclge_get_vport(handle);
6168 	struct hclge_dev *hdev = vport->back;
6169 	struct hclge_fd_rule *rule;
6170 	struct hlist_node *node2;
6171 	int cnt = 0;
6172 
6173 	if (!hnae3_dev_fd_supported(hdev))
6174 		return -EOPNOTSUPP;
6175 
6176 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6177 
6178 	spin_lock_bh(&hdev->fd_rule_lock);
6179 	hlist_for_each_entry_safe(rule, node2,
6180 				  &hdev->fd_rule_list, rule_node) {
6181 		if (cnt == cmd->rule_cnt) {
6182 			spin_unlock_bh(&hdev->fd_rule_lock);
6183 			return -EMSGSIZE;
6184 		}
6185 
6186 		rule_locs[cnt] = rule->location;
6187 		cnt++;
6188 	}
6189 
6190 	spin_unlock_bh(&hdev->fd_rule_lock);
6191 
6192 	cmd->rule_cnt = cnt;
6193 
6194 	return 0;
6195 }
6196 
6197 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6198 				     struct hclge_fd_rule_tuples *tuples)
6199 {
6200 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6201 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6202 
6203 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6204 	tuples->ip_proto = fkeys->basic.ip_proto;
6205 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6206 
6207 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6208 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6209 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6210 	} else {
6211 		int i;
6212 
6213 		for (i = 0; i < IPV6_SIZE; i++) {
6214 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6215 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6216 		}
6217 	}
6218 }
6219 
6220 /* traverse all rules, check whether an existed rule has the same tuples */
6221 static struct hclge_fd_rule *
6222 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6223 			  const struct hclge_fd_rule_tuples *tuples)
6224 {
6225 	struct hclge_fd_rule *rule = NULL;
6226 	struct hlist_node *node;
6227 
6228 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6229 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6230 			return rule;
6231 	}
6232 
6233 	return NULL;
6234 }
6235 
6236 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6237 				     struct hclge_fd_rule *rule)
6238 {
6239 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6240 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6241 			     BIT(INNER_SRC_PORT);
6242 	rule->action = 0;
6243 	rule->vf_id = 0;
6244 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6245 	if (tuples->ether_proto == ETH_P_IP) {
6246 		if (tuples->ip_proto == IPPROTO_TCP)
6247 			rule->flow_type = TCP_V4_FLOW;
6248 		else
6249 			rule->flow_type = UDP_V4_FLOW;
6250 	} else {
6251 		if (tuples->ip_proto == IPPROTO_TCP)
6252 			rule->flow_type = TCP_V6_FLOW;
6253 		else
6254 			rule->flow_type = UDP_V6_FLOW;
6255 	}
6256 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6257 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6258 }
6259 
6260 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6261 				      u16 flow_id, struct flow_keys *fkeys)
6262 {
6263 	struct hclge_vport *vport = hclge_get_vport(handle);
6264 	struct hclge_fd_rule_tuples new_tuples = {};
6265 	struct hclge_dev *hdev = vport->back;
6266 	struct hclge_fd_rule *rule;
6267 	u16 tmp_queue_id;
6268 	u16 bit_id;
6269 	int ret;
6270 
6271 	if (!hnae3_dev_fd_supported(hdev))
6272 		return -EOPNOTSUPP;
6273 
6274 	/* when there is already fd rule existed add by user,
6275 	 * arfs should not work
6276 	 */
6277 	spin_lock_bh(&hdev->fd_rule_lock);
6278 	if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6279 		spin_unlock_bh(&hdev->fd_rule_lock);
6280 		return -EOPNOTSUPP;
6281 	}
6282 
6283 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6284 
6285 	/* check is there flow director filter existed for this flow,
6286 	 * if not, create a new filter for it;
6287 	 * if filter exist with different queue id, modify the filter;
6288 	 * if filter exist with same queue id, do nothing
6289 	 */
6290 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6291 	if (!rule) {
6292 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6293 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6294 			spin_unlock_bh(&hdev->fd_rule_lock);
6295 			return -ENOSPC;
6296 		}
6297 
6298 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6299 		if (!rule) {
6300 			spin_unlock_bh(&hdev->fd_rule_lock);
6301 			return -ENOMEM;
6302 		}
6303 
6304 		set_bit(bit_id, hdev->fd_bmap);
6305 		rule->location = bit_id;
6306 		rule->flow_id = flow_id;
6307 		rule->queue_id = queue_id;
6308 		hclge_fd_build_arfs_rule(&new_tuples, rule);
6309 		ret = hclge_fd_config_rule(hdev, rule);
6310 
6311 		spin_unlock_bh(&hdev->fd_rule_lock);
6312 
6313 		if (ret)
6314 			return ret;
6315 
6316 		return rule->location;
6317 	}
6318 
6319 	spin_unlock_bh(&hdev->fd_rule_lock);
6320 
6321 	if (rule->queue_id == queue_id)
6322 		return rule->location;
6323 
6324 	tmp_queue_id = rule->queue_id;
6325 	rule->queue_id = queue_id;
6326 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6327 	if (ret) {
6328 		rule->queue_id = tmp_queue_id;
6329 		return ret;
6330 	}
6331 
6332 	return rule->location;
6333 }
6334 
6335 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6336 {
6337 #ifdef CONFIG_RFS_ACCEL
6338 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6339 	struct hclge_fd_rule *rule;
6340 	struct hlist_node *node;
6341 	HLIST_HEAD(del_list);
6342 
6343 	spin_lock_bh(&hdev->fd_rule_lock);
6344 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6345 		spin_unlock_bh(&hdev->fd_rule_lock);
6346 		return;
6347 	}
6348 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6349 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6350 					rule->flow_id, rule->location)) {
6351 			hlist_del_init(&rule->rule_node);
6352 			hlist_add_head(&rule->rule_node, &del_list);
6353 			hdev->hclge_fd_rule_num--;
6354 			clear_bit(rule->location, hdev->fd_bmap);
6355 		}
6356 	}
6357 	spin_unlock_bh(&hdev->fd_rule_lock);
6358 
6359 	hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6360 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6361 				     rule->location, NULL, false);
6362 		kfree(rule);
6363 	}
6364 #endif
6365 }
6366 
6367 /* make sure being called after lock up with fd_rule_lock */
6368 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6369 {
6370 #ifdef CONFIG_RFS_ACCEL
6371 	struct hclge_vport *vport = hclge_get_vport(handle);
6372 	struct hclge_dev *hdev = vport->back;
6373 
6374 	if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6375 		hclge_del_all_fd_entries(handle, true);
6376 #endif
6377 }
6378 
6379 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6380 {
6381 	struct hclge_vport *vport = hclge_get_vport(handle);
6382 	struct hclge_dev *hdev = vport->back;
6383 
6384 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6385 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6386 }
6387 
6388 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6389 {
6390 	struct hclge_vport *vport = hclge_get_vport(handle);
6391 	struct hclge_dev *hdev = vport->back;
6392 
6393 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6394 }
6395 
6396 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6397 {
6398 	struct hclge_vport *vport = hclge_get_vport(handle);
6399 	struct hclge_dev *hdev = vport->back;
6400 
6401 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6402 }
6403 
6404 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6405 {
6406 	struct hclge_vport *vport = hclge_get_vport(handle);
6407 	struct hclge_dev *hdev = vport->back;
6408 
6409 	return hdev->rst_stats.hw_reset_done_cnt;
6410 }
6411 
6412 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6413 {
6414 	struct hclge_vport *vport = hclge_get_vport(handle);
6415 	struct hclge_dev *hdev = vport->back;
6416 	bool clear;
6417 
6418 	hdev->fd_en = enable;
6419 	clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6420 
6421 	if (!enable) {
6422 		spin_lock_bh(&hdev->fd_rule_lock);
6423 		hclge_del_all_fd_entries(handle, clear);
6424 		spin_unlock_bh(&hdev->fd_rule_lock);
6425 	} else {
6426 		hclge_restore_fd_entries(handle);
6427 	}
6428 }
6429 
6430 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6431 {
6432 	struct hclge_desc desc;
6433 	struct hclge_config_mac_mode_cmd *req =
6434 		(struct hclge_config_mac_mode_cmd *)desc.data;
6435 	u32 loop_en = 0;
6436 	int ret;
6437 
6438 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6439 
6440 	if (enable) {
6441 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6442 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6443 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6444 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6445 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6446 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6447 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6448 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6449 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6450 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6451 	}
6452 
6453 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6454 
6455 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6456 	if (ret)
6457 		dev_err(&hdev->pdev->dev,
6458 			"mac enable fail, ret =%d.\n", ret);
6459 }
6460 
6461 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6462 				     u8 switch_param, u8 param_mask)
6463 {
6464 	struct hclge_mac_vlan_switch_cmd *req;
6465 	struct hclge_desc desc;
6466 	u32 func_id;
6467 	int ret;
6468 
6469 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6470 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6471 
6472 	/* read current config parameter */
6473 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6474 				   true);
6475 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6476 	req->func_id = cpu_to_le32(func_id);
6477 
6478 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6479 	if (ret) {
6480 		dev_err(&hdev->pdev->dev,
6481 			"read mac vlan switch parameter fail, ret = %d\n", ret);
6482 		return ret;
6483 	}
6484 
6485 	/* modify and write new config parameter */
6486 	hclge_cmd_reuse_desc(&desc, false);
6487 	req->switch_param = (req->switch_param & param_mask) | switch_param;
6488 	req->param_mask = param_mask;
6489 
6490 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6491 	if (ret)
6492 		dev_err(&hdev->pdev->dev,
6493 			"set mac vlan switch parameter fail, ret = %d\n", ret);
6494 	return ret;
6495 }
6496 
6497 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6498 				       int link_ret)
6499 {
6500 #define HCLGE_PHY_LINK_STATUS_NUM  200
6501 
6502 	struct phy_device *phydev = hdev->hw.mac.phydev;
6503 	int i = 0;
6504 	int ret;
6505 
6506 	do {
6507 		ret = phy_read_status(phydev);
6508 		if (ret) {
6509 			dev_err(&hdev->pdev->dev,
6510 				"phy update link status fail, ret = %d\n", ret);
6511 			return;
6512 		}
6513 
6514 		if (phydev->link == link_ret)
6515 			break;
6516 
6517 		msleep(HCLGE_LINK_STATUS_MS);
6518 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6519 }
6520 
6521 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6522 {
6523 #define HCLGE_MAC_LINK_STATUS_NUM  100
6524 
6525 	int link_status;
6526 	int i = 0;
6527 	int ret;
6528 
6529 	do {
6530 		ret = hclge_get_mac_link_status(hdev, &link_status);
6531 		if (ret)
6532 			return ret;
6533 		if (link_status == link_ret)
6534 			return 0;
6535 
6536 		msleep(HCLGE_LINK_STATUS_MS);
6537 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6538 	return -EBUSY;
6539 }
6540 
6541 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6542 					  bool is_phy)
6543 {
6544 	int link_ret;
6545 
6546 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6547 
6548 	if (is_phy)
6549 		hclge_phy_link_status_wait(hdev, link_ret);
6550 
6551 	return hclge_mac_link_status_wait(hdev, link_ret);
6552 }
6553 
6554 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6555 {
6556 	struct hclge_config_mac_mode_cmd *req;
6557 	struct hclge_desc desc;
6558 	u32 loop_en;
6559 	int ret;
6560 
6561 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6562 	/* 1 Read out the MAC mode config at first */
6563 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6564 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6565 	if (ret) {
6566 		dev_err(&hdev->pdev->dev,
6567 			"mac loopback get fail, ret =%d.\n", ret);
6568 		return ret;
6569 	}
6570 
6571 	/* 2 Then setup the loopback flag */
6572 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6573 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6574 
6575 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6576 
6577 	/* 3 Config mac work mode with loopback flag
6578 	 * and its original configure parameters
6579 	 */
6580 	hclge_cmd_reuse_desc(&desc, false);
6581 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6582 	if (ret)
6583 		dev_err(&hdev->pdev->dev,
6584 			"mac loopback set fail, ret =%d.\n", ret);
6585 	return ret;
6586 }
6587 
6588 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6589 				     enum hnae3_loop loop_mode)
6590 {
6591 #define HCLGE_SERDES_RETRY_MS	10
6592 #define HCLGE_SERDES_RETRY_NUM	100
6593 
6594 	struct hclge_serdes_lb_cmd *req;
6595 	struct hclge_desc desc;
6596 	int ret, i = 0;
6597 	u8 loop_mode_b;
6598 
6599 	req = (struct hclge_serdes_lb_cmd *)desc.data;
6600 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6601 
6602 	switch (loop_mode) {
6603 	case HNAE3_LOOP_SERIAL_SERDES:
6604 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6605 		break;
6606 	case HNAE3_LOOP_PARALLEL_SERDES:
6607 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6608 		break;
6609 	default:
6610 		dev_err(&hdev->pdev->dev,
6611 			"unsupported serdes loopback mode %d\n", loop_mode);
6612 		return -ENOTSUPP;
6613 	}
6614 
6615 	if (en) {
6616 		req->enable = loop_mode_b;
6617 		req->mask = loop_mode_b;
6618 	} else {
6619 		req->mask = loop_mode_b;
6620 	}
6621 
6622 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6623 	if (ret) {
6624 		dev_err(&hdev->pdev->dev,
6625 			"serdes loopback set fail, ret = %d\n", ret);
6626 		return ret;
6627 	}
6628 
6629 	do {
6630 		msleep(HCLGE_SERDES_RETRY_MS);
6631 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6632 					   true);
6633 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6634 		if (ret) {
6635 			dev_err(&hdev->pdev->dev,
6636 				"serdes loopback get, ret = %d\n", ret);
6637 			return ret;
6638 		}
6639 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
6640 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6641 
6642 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6643 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6644 		return -EBUSY;
6645 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6646 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6647 		return -EIO;
6648 	}
6649 	return ret;
6650 }
6651 
6652 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6653 				     enum hnae3_loop loop_mode)
6654 {
6655 	int ret;
6656 
6657 	ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6658 	if (ret)
6659 		return ret;
6660 
6661 	hclge_cfg_mac_mode(hdev, en);
6662 
6663 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6664 	if (ret)
6665 		dev_err(&hdev->pdev->dev,
6666 			"serdes loopback config mac mode timeout\n");
6667 
6668 	return ret;
6669 }
6670 
6671 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6672 				     struct phy_device *phydev)
6673 {
6674 	int ret;
6675 
6676 	if (!phydev->suspended) {
6677 		ret = phy_suspend(phydev);
6678 		if (ret)
6679 			return ret;
6680 	}
6681 
6682 	ret = phy_resume(phydev);
6683 	if (ret)
6684 		return ret;
6685 
6686 	return phy_loopback(phydev, true);
6687 }
6688 
6689 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6690 				      struct phy_device *phydev)
6691 {
6692 	int ret;
6693 
6694 	ret = phy_loopback(phydev, false);
6695 	if (ret)
6696 		return ret;
6697 
6698 	return phy_suspend(phydev);
6699 }
6700 
6701 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6702 {
6703 	struct phy_device *phydev = hdev->hw.mac.phydev;
6704 	int ret;
6705 
6706 	if (!phydev)
6707 		return -ENOTSUPP;
6708 
6709 	if (en)
6710 		ret = hclge_enable_phy_loopback(hdev, phydev);
6711 	else
6712 		ret = hclge_disable_phy_loopback(hdev, phydev);
6713 	if (ret) {
6714 		dev_err(&hdev->pdev->dev,
6715 			"set phy loopback fail, ret = %d\n", ret);
6716 		return ret;
6717 	}
6718 
6719 	hclge_cfg_mac_mode(hdev, en);
6720 
6721 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6722 	if (ret)
6723 		dev_err(&hdev->pdev->dev,
6724 			"phy loopback config mac mode timeout\n");
6725 
6726 	return ret;
6727 }
6728 
6729 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6730 			    int stream_id, bool enable)
6731 {
6732 	struct hclge_desc desc;
6733 	struct hclge_cfg_com_tqp_queue_cmd *req =
6734 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6735 	int ret;
6736 
6737 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6738 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6739 	req->stream_id = cpu_to_le16(stream_id);
6740 	if (enable)
6741 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6742 
6743 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6744 	if (ret)
6745 		dev_err(&hdev->pdev->dev,
6746 			"Tqp enable fail, status =%d.\n", ret);
6747 	return ret;
6748 }
6749 
6750 static int hclge_set_loopback(struct hnae3_handle *handle,
6751 			      enum hnae3_loop loop_mode, bool en)
6752 {
6753 	struct hclge_vport *vport = hclge_get_vport(handle);
6754 	struct hnae3_knic_private_info *kinfo;
6755 	struct hclge_dev *hdev = vport->back;
6756 	int i, ret;
6757 
6758 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6759 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6760 	 * the same, the packets are looped back in the SSU. If SSU loopback
6761 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6762 	 */
6763 	if (hdev->pdev->revision >= 0x21) {
6764 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6765 
6766 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6767 						HCLGE_SWITCH_ALW_LPBK_MASK);
6768 		if (ret)
6769 			return ret;
6770 	}
6771 
6772 	switch (loop_mode) {
6773 	case HNAE3_LOOP_APP:
6774 		ret = hclge_set_app_loopback(hdev, en);
6775 		break;
6776 	case HNAE3_LOOP_SERIAL_SERDES:
6777 	case HNAE3_LOOP_PARALLEL_SERDES:
6778 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6779 		break;
6780 	case HNAE3_LOOP_PHY:
6781 		ret = hclge_set_phy_loopback(hdev, en);
6782 		break;
6783 	default:
6784 		ret = -ENOTSUPP;
6785 		dev_err(&hdev->pdev->dev,
6786 			"loop_mode %d is not supported\n", loop_mode);
6787 		break;
6788 	}
6789 
6790 	if (ret)
6791 		return ret;
6792 
6793 	kinfo = &vport->nic.kinfo;
6794 	for (i = 0; i < kinfo->num_tqps; i++) {
6795 		ret = hclge_tqp_enable(hdev, i, 0, en);
6796 		if (ret)
6797 			return ret;
6798 	}
6799 
6800 	return 0;
6801 }
6802 
6803 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6804 {
6805 	int ret;
6806 
6807 	ret = hclge_set_app_loopback(hdev, false);
6808 	if (ret)
6809 		return ret;
6810 
6811 	ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6812 	if (ret)
6813 		return ret;
6814 
6815 	return hclge_cfg_serdes_loopback(hdev, false,
6816 					 HNAE3_LOOP_PARALLEL_SERDES);
6817 }
6818 
6819 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6820 {
6821 	struct hclge_vport *vport = hclge_get_vport(handle);
6822 	struct hnae3_knic_private_info *kinfo;
6823 	struct hnae3_queue *queue;
6824 	struct hclge_tqp *tqp;
6825 	int i;
6826 
6827 	kinfo = &vport->nic.kinfo;
6828 	for (i = 0; i < kinfo->num_tqps; i++) {
6829 		queue = handle->kinfo.tqp[i];
6830 		tqp = container_of(queue, struct hclge_tqp, q);
6831 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6832 	}
6833 }
6834 
6835 static void hclge_flush_link_update(struct hclge_dev *hdev)
6836 {
6837 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
6838 
6839 	unsigned long last = hdev->serv_processed_cnt;
6840 	int i = 0;
6841 
6842 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6843 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6844 	       last == hdev->serv_processed_cnt)
6845 		usleep_range(1, 1);
6846 }
6847 
6848 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6849 {
6850 	struct hclge_vport *vport = hclge_get_vport(handle);
6851 	struct hclge_dev *hdev = vport->back;
6852 
6853 	if (enable) {
6854 		hclge_task_schedule(hdev, 0);
6855 	} else {
6856 		/* Set the DOWN flag here to disable link updating */
6857 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
6858 
6859 		/* flush memory to make sure DOWN is seen by service task */
6860 		smp_mb__before_atomic();
6861 		hclge_flush_link_update(hdev);
6862 	}
6863 }
6864 
6865 static int hclge_ae_start(struct hnae3_handle *handle)
6866 {
6867 	struct hclge_vport *vport = hclge_get_vport(handle);
6868 	struct hclge_dev *hdev = vport->back;
6869 
6870 	/* mac enable */
6871 	hclge_cfg_mac_mode(hdev, true);
6872 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6873 	hdev->hw.mac.link = 0;
6874 
6875 	/* reset tqp stats */
6876 	hclge_reset_tqp_stats(handle);
6877 
6878 	hclge_mac_start_phy(hdev);
6879 
6880 	return 0;
6881 }
6882 
6883 static void hclge_ae_stop(struct hnae3_handle *handle)
6884 {
6885 	struct hclge_vport *vport = hclge_get_vport(handle);
6886 	struct hclge_dev *hdev = vport->back;
6887 	int i;
6888 
6889 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
6890 	spin_lock_bh(&hdev->fd_rule_lock);
6891 	hclge_clear_arfs_rules(handle);
6892 	spin_unlock_bh(&hdev->fd_rule_lock);
6893 
6894 	/* If it is not PF reset, the firmware will disable the MAC,
6895 	 * so it only need to stop phy here.
6896 	 */
6897 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6898 	    hdev->reset_type != HNAE3_FUNC_RESET) {
6899 		hclge_mac_stop_phy(hdev);
6900 		hclge_update_link_status(hdev);
6901 		return;
6902 	}
6903 
6904 	for (i = 0; i < handle->kinfo.num_tqps; i++)
6905 		hclge_reset_tqp(handle, i);
6906 
6907 	hclge_config_mac_tnl_int(hdev, false);
6908 
6909 	/* Mac disable */
6910 	hclge_cfg_mac_mode(hdev, false);
6911 
6912 	hclge_mac_stop_phy(hdev);
6913 
6914 	/* reset tqp stats */
6915 	hclge_reset_tqp_stats(handle);
6916 	hclge_update_link_status(hdev);
6917 }
6918 
6919 int hclge_vport_start(struct hclge_vport *vport)
6920 {
6921 	struct hclge_dev *hdev = vport->back;
6922 
6923 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6924 	vport->last_active_jiffies = jiffies;
6925 
6926 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
6927 		if (vport->vport_id) {
6928 			hclge_restore_mac_table_common(vport);
6929 			hclge_restore_vport_vlan_table(vport);
6930 		} else {
6931 			hclge_restore_hw_table(hdev);
6932 		}
6933 	}
6934 
6935 	clear_bit(vport->vport_id, hdev->vport_config_block);
6936 
6937 	return 0;
6938 }
6939 
6940 void hclge_vport_stop(struct hclge_vport *vport)
6941 {
6942 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6943 }
6944 
6945 static int hclge_client_start(struct hnae3_handle *handle)
6946 {
6947 	struct hclge_vport *vport = hclge_get_vport(handle);
6948 
6949 	return hclge_vport_start(vport);
6950 }
6951 
6952 static void hclge_client_stop(struct hnae3_handle *handle)
6953 {
6954 	struct hclge_vport *vport = hclge_get_vport(handle);
6955 
6956 	hclge_vport_stop(vport);
6957 }
6958 
6959 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6960 					 u16 cmdq_resp, u8  resp_code,
6961 					 enum hclge_mac_vlan_tbl_opcode op)
6962 {
6963 	struct hclge_dev *hdev = vport->back;
6964 
6965 	if (cmdq_resp) {
6966 		dev_err(&hdev->pdev->dev,
6967 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6968 			cmdq_resp);
6969 		return -EIO;
6970 	}
6971 
6972 	if (op == HCLGE_MAC_VLAN_ADD) {
6973 		if (!resp_code || resp_code == 1)
6974 			return 0;
6975 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
6976 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
6977 			return -ENOSPC;
6978 
6979 		dev_err(&hdev->pdev->dev,
6980 			"add mac addr failed for undefined, code=%u.\n",
6981 			resp_code);
6982 		return -EIO;
6983 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
6984 		if (!resp_code) {
6985 			return 0;
6986 		} else if (resp_code == 1) {
6987 			dev_dbg(&hdev->pdev->dev,
6988 				"remove mac addr failed for miss.\n");
6989 			return -ENOENT;
6990 		}
6991 
6992 		dev_err(&hdev->pdev->dev,
6993 			"remove mac addr failed for undefined, code=%u.\n",
6994 			resp_code);
6995 		return -EIO;
6996 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
6997 		if (!resp_code) {
6998 			return 0;
6999 		} else if (resp_code == 1) {
7000 			dev_dbg(&hdev->pdev->dev,
7001 				"lookup mac addr failed for miss.\n");
7002 			return -ENOENT;
7003 		}
7004 
7005 		dev_err(&hdev->pdev->dev,
7006 			"lookup mac addr failed for undefined, code=%u.\n",
7007 			resp_code);
7008 		return -EIO;
7009 	}
7010 
7011 	dev_err(&hdev->pdev->dev,
7012 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7013 
7014 	return -EINVAL;
7015 }
7016 
7017 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7018 {
7019 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7020 
7021 	unsigned int word_num;
7022 	unsigned int bit_num;
7023 
7024 	if (vfid > 255 || vfid < 0)
7025 		return -EIO;
7026 
7027 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7028 		word_num = vfid / 32;
7029 		bit_num  = vfid % 32;
7030 		if (clr)
7031 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7032 		else
7033 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7034 	} else {
7035 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7036 		bit_num  = vfid % 32;
7037 		if (clr)
7038 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7039 		else
7040 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7041 	}
7042 
7043 	return 0;
7044 }
7045 
7046 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7047 {
7048 #define HCLGE_DESC_NUMBER 3
7049 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7050 	int i, j;
7051 
7052 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7053 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7054 			if (desc[i].data[j])
7055 				return false;
7056 
7057 	return true;
7058 }
7059 
7060 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7061 				   const u8 *addr, bool is_mc)
7062 {
7063 	const unsigned char *mac_addr = addr;
7064 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7065 		       (mac_addr[0]) | (mac_addr[1] << 8);
7066 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
7067 
7068 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7069 	if (is_mc) {
7070 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7071 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7072 	}
7073 
7074 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7075 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7076 }
7077 
7078 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7079 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
7080 {
7081 	struct hclge_dev *hdev = vport->back;
7082 	struct hclge_desc desc;
7083 	u8 resp_code;
7084 	u16 retval;
7085 	int ret;
7086 
7087 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7088 
7089 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7090 
7091 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7092 	if (ret) {
7093 		dev_err(&hdev->pdev->dev,
7094 			"del mac addr failed for cmd_send, ret =%d.\n",
7095 			ret);
7096 		return ret;
7097 	}
7098 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7099 	retval = le16_to_cpu(desc.retval);
7100 
7101 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7102 					     HCLGE_MAC_VLAN_REMOVE);
7103 }
7104 
7105 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7106 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
7107 				     struct hclge_desc *desc,
7108 				     bool is_mc)
7109 {
7110 	struct hclge_dev *hdev = vport->back;
7111 	u8 resp_code;
7112 	u16 retval;
7113 	int ret;
7114 
7115 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7116 	if (is_mc) {
7117 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7118 		memcpy(desc[0].data,
7119 		       req,
7120 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7121 		hclge_cmd_setup_basic_desc(&desc[1],
7122 					   HCLGE_OPC_MAC_VLAN_ADD,
7123 					   true);
7124 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7125 		hclge_cmd_setup_basic_desc(&desc[2],
7126 					   HCLGE_OPC_MAC_VLAN_ADD,
7127 					   true);
7128 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
7129 	} else {
7130 		memcpy(desc[0].data,
7131 		       req,
7132 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7133 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
7134 	}
7135 	if (ret) {
7136 		dev_err(&hdev->pdev->dev,
7137 			"lookup mac addr failed for cmd_send, ret =%d.\n",
7138 			ret);
7139 		return ret;
7140 	}
7141 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7142 	retval = le16_to_cpu(desc[0].retval);
7143 
7144 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7145 					     HCLGE_MAC_VLAN_LKUP);
7146 }
7147 
7148 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7149 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
7150 				  struct hclge_desc *mc_desc)
7151 {
7152 	struct hclge_dev *hdev = vport->back;
7153 	int cfg_status;
7154 	u8 resp_code;
7155 	u16 retval;
7156 	int ret;
7157 
7158 	if (!mc_desc) {
7159 		struct hclge_desc desc;
7160 
7161 		hclge_cmd_setup_basic_desc(&desc,
7162 					   HCLGE_OPC_MAC_VLAN_ADD,
7163 					   false);
7164 		memcpy(desc.data, req,
7165 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7166 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7167 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7168 		retval = le16_to_cpu(desc.retval);
7169 
7170 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7171 							   resp_code,
7172 							   HCLGE_MAC_VLAN_ADD);
7173 	} else {
7174 		hclge_cmd_reuse_desc(&mc_desc[0], false);
7175 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7176 		hclge_cmd_reuse_desc(&mc_desc[1], false);
7177 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7178 		hclge_cmd_reuse_desc(&mc_desc[2], false);
7179 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7180 		memcpy(mc_desc[0].data, req,
7181 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7182 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7183 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7184 		retval = le16_to_cpu(mc_desc[0].retval);
7185 
7186 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7187 							   resp_code,
7188 							   HCLGE_MAC_VLAN_ADD);
7189 	}
7190 
7191 	if (ret) {
7192 		dev_err(&hdev->pdev->dev,
7193 			"add mac addr failed for cmd_send, ret =%d.\n",
7194 			ret);
7195 		return ret;
7196 	}
7197 
7198 	return cfg_status;
7199 }
7200 
7201 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7202 			       u16 *allocated_size)
7203 {
7204 	struct hclge_umv_spc_alc_cmd *req;
7205 	struct hclge_desc desc;
7206 	int ret;
7207 
7208 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7209 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7210 
7211 	req->space_size = cpu_to_le32(space_size);
7212 
7213 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7214 	if (ret) {
7215 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7216 			ret);
7217 		return ret;
7218 	}
7219 
7220 	*allocated_size = le32_to_cpu(desc.data[1]);
7221 
7222 	return 0;
7223 }
7224 
7225 static int hclge_init_umv_space(struct hclge_dev *hdev)
7226 {
7227 	u16 allocated_size = 0;
7228 	int ret;
7229 
7230 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7231 	if (ret)
7232 		return ret;
7233 
7234 	if (allocated_size < hdev->wanted_umv_size)
7235 		dev_warn(&hdev->pdev->dev,
7236 			 "failed to alloc umv space, want %u, get %u\n",
7237 			 hdev->wanted_umv_size, allocated_size);
7238 
7239 	hdev->max_umv_size = allocated_size;
7240 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7241 	hdev->share_umv_size = hdev->priv_umv_size +
7242 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7243 
7244 	return 0;
7245 }
7246 
7247 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7248 {
7249 	struct hclge_vport *vport;
7250 	int i;
7251 
7252 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7253 		vport = &hdev->vport[i];
7254 		vport->used_umv_num = 0;
7255 	}
7256 
7257 	mutex_lock(&hdev->vport_lock);
7258 	hdev->share_umv_size = hdev->priv_umv_size +
7259 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7260 	mutex_unlock(&hdev->vport_lock);
7261 }
7262 
7263 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7264 {
7265 	struct hclge_dev *hdev = vport->back;
7266 	bool is_full;
7267 
7268 	if (need_lock)
7269 		mutex_lock(&hdev->vport_lock);
7270 
7271 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7272 		   hdev->share_umv_size == 0);
7273 
7274 	if (need_lock)
7275 		mutex_unlock(&hdev->vport_lock);
7276 
7277 	return is_full;
7278 }
7279 
7280 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7281 {
7282 	struct hclge_dev *hdev = vport->back;
7283 
7284 	if (is_free) {
7285 		if (vport->used_umv_num > hdev->priv_umv_size)
7286 			hdev->share_umv_size++;
7287 
7288 		if (vport->used_umv_num > 0)
7289 			vport->used_umv_num--;
7290 	} else {
7291 		if (vport->used_umv_num >= hdev->priv_umv_size &&
7292 		    hdev->share_umv_size > 0)
7293 			hdev->share_umv_size--;
7294 		vport->used_umv_num++;
7295 	}
7296 }
7297 
7298 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7299 						  const u8 *mac_addr)
7300 {
7301 	struct hclge_mac_node *mac_node, *tmp;
7302 
7303 	list_for_each_entry_safe(mac_node, tmp, list, node)
7304 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7305 			return mac_node;
7306 
7307 	return NULL;
7308 }
7309 
7310 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7311 				  enum HCLGE_MAC_NODE_STATE state)
7312 {
7313 	switch (state) {
7314 	/* from set_rx_mode or tmp_add_list */
7315 	case HCLGE_MAC_TO_ADD:
7316 		if (mac_node->state == HCLGE_MAC_TO_DEL)
7317 			mac_node->state = HCLGE_MAC_ACTIVE;
7318 		break;
7319 	/* only from set_rx_mode */
7320 	case HCLGE_MAC_TO_DEL:
7321 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
7322 			list_del(&mac_node->node);
7323 			kfree(mac_node);
7324 		} else {
7325 			mac_node->state = HCLGE_MAC_TO_DEL;
7326 		}
7327 		break;
7328 	/* only from tmp_add_list, the mac_node->state won't be
7329 	 * ACTIVE.
7330 	 */
7331 	case HCLGE_MAC_ACTIVE:
7332 		if (mac_node->state == HCLGE_MAC_TO_ADD)
7333 			mac_node->state = HCLGE_MAC_ACTIVE;
7334 
7335 		break;
7336 	}
7337 }
7338 
7339 int hclge_update_mac_list(struct hclge_vport *vport,
7340 			  enum HCLGE_MAC_NODE_STATE state,
7341 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
7342 			  const unsigned char *addr)
7343 {
7344 	struct hclge_dev *hdev = vport->back;
7345 	struct hclge_mac_node *mac_node;
7346 	struct list_head *list;
7347 
7348 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7349 		&vport->uc_mac_list : &vport->mc_mac_list;
7350 
7351 	spin_lock_bh(&vport->mac_list_lock);
7352 
7353 	/* if the mac addr is already in the mac list, no need to add a new
7354 	 * one into it, just check the mac addr state, convert it to a new
7355 	 * new state, or just remove it, or do nothing.
7356 	 */
7357 	mac_node = hclge_find_mac_node(list, addr);
7358 	if (mac_node) {
7359 		hclge_update_mac_node(mac_node, state);
7360 		spin_unlock_bh(&vport->mac_list_lock);
7361 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7362 		return 0;
7363 	}
7364 
7365 	/* if this address is never added, unnecessary to delete */
7366 	if (state == HCLGE_MAC_TO_DEL) {
7367 		spin_unlock_bh(&vport->mac_list_lock);
7368 		dev_err(&hdev->pdev->dev,
7369 			"failed to delete address %pM from mac list\n",
7370 			addr);
7371 		return -ENOENT;
7372 	}
7373 
7374 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7375 	if (!mac_node) {
7376 		spin_unlock_bh(&vport->mac_list_lock);
7377 		return -ENOMEM;
7378 	}
7379 
7380 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7381 
7382 	mac_node->state = state;
7383 	ether_addr_copy(mac_node->mac_addr, addr);
7384 	list_add_tail(&mac_node->node, list);
7385 
7386 	spin_unlock_bh(&vport->mac_list_lock);
7387 
7388 	return 0;
7389 }
7390 
7391 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7392 			     const unsigned char *addr)
7393 {
7394 	struct hclge_vport *vport = hclge_get_vport(handle);
7395 
7396 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7397 				     addr);
7398 }
7399 
7400 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7401 			     const unsigned char *addr)
7402 {
7403 	struct hclge_dev *hdev = vport->back;
7404 	struct hclge_mac_vlan_tbl_entry_cmd req;
7405 	struct hclge_desc desc;
7406 	u16 egress_port = 0;
7407 	int ret;
7408 
7409 	/* mac addr check */
7410 	if (is_zero_ether_addr(addr) ||
7411 	    is_broadcast_ether_addr(addr) ||
7412 	    is_multicast_ether_addr(addr)) {
7413 		dev_err(&hdev->pdev->dev,
7414 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7415 			 addr, is_zero_ether_addr(addr),
7416 			 is_broadcast_ether_addr(addr),
7417 			 is_multicast_ether_addr(addr));
7418 		return -EINVAL;
7419 	}
7420 
7421 	memset(&req, 0, sizeof(req));
7422 
7423 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7424 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7425 
7426 	req.egress_port = cpu_to_le16(egress_port);
7427 
7428 	hclge_prepare_mac_addr(&req, addr, false);
7429 
7430 	/* Lookup the mac address in the mac_vlan table, and add
7431 	 * it if the entry is inexistent. Repeated unicast entry
7432 	 * is not allowed in the mac vlan table.
7433 	 */
7434 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7435 	if (ret == -ENOENT) {
7436 		mutex_lock(&hdev->vport_lock);
7437 		if (!hclge_is_umv_space_full(vport, false)) {
7438 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7439 			if (!ret)
7440 				hclge_update_umv_space(vport, false);
7441 			mutex_unlock(&hdev->vport_lock);
7442 			return ret;
7443 		}
7444 		mutex_unlock(&hdev->vport_lock);
7445 
7446 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7447 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7448 				hdev->priv_umv_size);
7449 
7450 		return -ENOSPC;
7451 	}
7452 
7453 	/* check if we just hit the duplicate */
7454 	if (!ret) {
7455 		dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7456 			 vport->vport_id, addr);
7457 		return 0;
7458 	}
7459 
7460 	dev_err(&hdev->pdev->dev,
7461 		"PF failed to add unicast entry(%pM) in the MAC table\n",
7462 		addr);
7463 
7464 	return ret;
7465 }
7466 
7467 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7468 			    const unsigned char *addr)
7469 {
7470 	struct hclge_vport *vport = hclge_get_vport(handle);
7471 
7472 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7473 				     addr);
7474 }
7475 
7476 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7477 			    const unsigned char *addr)
7478 {
7479 	struct hclge_dev *hdev = vport->back;
7480 	struct hclge_mac_vlan_tbl_entry_cmd req;
7481 	int ret;
7482 
7483 	/* mac addr check */
7484 	if (is_zero_ether_addr(addr) ||
7485 	    is_broadcast_ether_addr(addr) ||
7486 	    is_multicast_ether_addr(addr)) {
7487 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7488 			addr);
7489 		return -EINVAL;
7490 	}
7491 
7492 	memset(&req, 0, sizeof(req));
7493 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7494 	hclge_prepare_mac_addr(&req, addr, false);
7495 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
7496 	if (!ret) {
7497 		mutex_lock(&hdev->vport_lock);
7498 		hclge_update_umv_space(vport, true);
7499 		mutex_unlock(&hdev->vport_lock);
7500 	} else if (ret == -ENOENT) {
7501 		ret = 0;
7502 	}
7503 
7504 	return ret;
7505 }
7506 
7507 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7508 			     const unsigned char *addr)
7509 {
7510 	struct hclge_vport *vport = hclge_get_vport(handle);
7511 
7512 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7513 				     addr);
7514 }
7515 
7516 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7517 			     const unsigned char *addr)
7518 {
7519 	struct hclge_dev *hdev = vport->back;
7520 	struct hclge_mac_vlan_tbl_entry_cmd req;
7521 	struct hclge_desc desc[3];
7522 	int status;
7523 
7524 	/* mac addr check */
7525 	if (!is_multicast_ether_addr(addr)) {
7526 		dev_err(&hdev->pdev->dev,
7527 			"Add mc mac err! invalid mac:%pM.\n",
7528 			 addr);
7529 		return -EINVAL;
7530 	}
7531 	memset(&req, 0, sizeof(req));
7532 	hclge_prepare_mac_addr(&req, addr, true);
7533 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7534 	if (status) {
7535 		/* This mac addr do not exist, add new entry for it */
7536 		memset(desc[0].data, 0, sizeof(desc[0].data));
7537 		memset(desc[1].data, 0, sizeof(desc[0].data));
7538 		memset(desc[2].data, 0, sizeof(desc[0].data));
7539 	}
7540 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7541 	if (status)
7542 		return status;
7543 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7544 
7545 	/* if already overflow, not to print each time */
7546 	if (status == -ENOSPC &&
7547 	    !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
7548 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7549 
7550 	return status;
7551 }
7552 
7553 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7554 			    const unsigned char *addr)
7555 {
7556 	struct hclge_vport *vport = hclge_get_vport(handle);
7557 
7558 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7559 				     addr);
7560 }
7561 
7562 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7563 			    const unsigned char *addr)
7564 {
7565 	struct hclge_dev *hdev = vport->back;
7566 	struct hclge_mac_vlan_tbl_entry_cmd req;
7567 	enum hclge_cmd_status status;
7568 	struct hclge_desc desc[3];
7569 
7570 	/* mac addr check */
7571 	if (!is_multicast_ether_addr(addr)) {
7572 		dev_dbg(&hdev->pdev->dev,
7573 			"Remove mc mac err! invalid mac:%pM.\n",
7574 			 addr);
7575 		return -EINVAL;
7576 	}
7577 
7578 	memset(&req, 0, sizeof(req));
7579 	hclge_prepare_mac_addr(&req, addr, true);
7580 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7581 	if (!status) {
7582 		/* This mac addr exist, remove this handle's VFID for it */
7583 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7584 		if (status)
7585 			return status;
7586 
7587 		if (hclge_is_all_function_id_zero(desc))
7588 			/* All the vfid is zero, so need to delete this entry */
7589 			status = hclge_remove_mac_vlan_tbl(vport, &req);
7590 		else
7591 			/* Not all the vfid is zero, update the vfid */
7592 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7593 
7594 	} else if (status == -ENOENT) {
7595 		status = 0;
7596 	}
7597 
7598 	return status;
7599 }
7600 
7601 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7602 				      struct list_head *list,
7603 				      int (*sync)(struct hclge_vport *,
7604 						  const unsigned char *))
7605 {
7606 	struct hclge_mac_node *mac_node, *tmp;
7607 	int ret;
7608 
7609 	list_for_each_entry_safe(mac_node, tmp, list, node) {
7610 		ret = sync(vport, mac_node->mac_addr);
7611 		if (!ret) {
7612 			mac_node->state = HCLGE_MAC_ACTIVE;
7613 		} else {
7614 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7615 				&vport->state);
7616 			break;
7617 		}
7618 	}
7619 }
7620 
7621 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7622 					struct list_head *list,
7623 					int (*unsync)(struct hclge_vport *,
7624 						      const unsigned char *))
7625 {
7626 	struct hclge_mac_node *mac_node, *tmp;
7627 	int ret;
7628 
7629 	list_for_each_entry_safe(mac_node, tmp, list, node) {
7630 		ret = unsync(vport, mac_node->mac_addr);
7631 		if (!ret || ret == -ENOENT) {
7632 			list_del(&mac_node->node);
7633 			kfree(mac_node);
7634 		} else {
7635 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7636 				&vport->state);
7637 			break;
7638 		}
7639 	}
7640 }
7641 
7642 static bool hclge_sync_from_add_list(struct list_head *add_list,
7643 				     struct list_head *mac_list)
7644 {
7645 	struct hclge_mac_node *mac_node, *tmp, *new_node;
7646 	bool all_added = true;
7647 
7648 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
7649 		if (mac_node->state == HCLGE_MAC_TO_ADD)
7650 			all_added = false;
7651 
7652 		/* if the mac address from tmp_add_list is not in the
7653 		 * uc/mc_mac_list, it means have received a TO_DEL request
7654 		 * during the time window of adding the mac address into mac
7655 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
7656 		 * then it will be removed at next time. else it must be TO_ADD,
7657 		 * this address hasn't been added into mac table,
7658 		 * so just remove the mac node.
7659 		 */
7660 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7661 		if (new_node) {
7662 			hclge_update_mac_node(new_node, mac_node->state);
7663 			list_del(&mac_node->node);
7664 			kfree(mac_node);
7665 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7666 			mac_node->state = HCLGE_MAC_TO_DEL;
7667 			list_del(&mac_node->node);
7668 			list_add_tail(&mac_node->node, mac_list);
7669 		} else {
7670 			list_del(&mac_node->node);
7671 			kfree(mac_node);
7672 		}
7673 	}
7674 
7675 	return all_added;
7676 }
7677 
7678 static void hclge_sync_from_del_list(struct list_head *del_list,
7679 				     struct list_head *mac_list)
7680 {
7681 	struct hclge_mac_node *mac_node, *tmp, *new_node;
7682 
7683 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7684 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7685 		if (new_node) {
7686 			/* If the mac addr exists in the mac list, it means
7687 			 * received a new TO_ADD request during the time window
7688 			 * of configuring the mac address. For the mac node
7689 			 * state is TO_ADD, and the address is already in the
7690 			 * in the hardware(due to delete fail), so we just need
7691 			 * to change the mac node state to ACTIVE.
7692 			 */
7693 			new_node->state = HCLGE_MAC_ACTIVE;
7694 			list_del(&mac_node->node);
7695 			kfree(mac_node);
7696 		} else {
7697 			list_del(&mac_node->node);
7698 			list_add_tail(&mac_node->node, mac_list);
7699 		}
7700 	}
7701 }
7702 
7703 static void hclge_update_overflow_flags(struct hclge_vport *vport,
7704 					enum HCLGE_MAC_ADDR_TYPE mac_type,
7705 					bool is_all_added)
7706 {
7707 	if (mac_type == HCLGE_MAC_ADDR_UC) {
7708 		if (is_all_added)
7709 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7710 		else
7711 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7712 	} else {
7713 		if (is_all_added)
7714 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7715 		else
7716 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7717 	}
7718 }
7719 
7720 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7721 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
7722 {
7723 	struct hclge_mac_node *mac_node, *tmp, *new_node;
7724 	struct list_head tmp_add_list, tmp_del_list;
7725 	struct list_head *list;
7726 	bool all_added;
7727 
7728 	INIT_LIST_HEAD(&tmp_add_list);
7729 	INIT_LIST_HEAD(&tmp_del_list);
7730 
7731 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
7732 	 * we can add/delete these mac addr outside the spin lock
7733 	 */
7734 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7735 		&vport->uc_mac_list : &vport->mc_mac_list;
7736 
7737 	spin_lock_bh(&vport->mac_list_lock);
7738 
7739 	list_for_each_entry_safe(mac_node, tmp, list, node) {
7740 		switch (mac_node->state) {
7741 		case HCLGE_MAC_TO_DEL:
7742 			list_del(&mac_node->node);
7743 			list_add_tail(&mac_node->node, &tmp_del_list);
7744 			break;
7745 		case HCLGE_MAC_TO_ADD:
7746 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7747 			if (!new_node)
7748 				goto stop_traverse;
7749 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7750 			new_node->state = mac_node->state;
7751 			list_add_tail(&new_node->node, &tmp_add_list);
7752 			break;
7753 		default:
7754 			break;
7755 		}
7756 	}
7757 
7758 stop_traverse:
7759 	spin_unlock_bh(&vport->mac_list_lock);
7760 
7761 	/* delete first, in order to get max mac table space for adding */
7762 	if (mac_type == HCLGE_MAC_ADDR_UC) {
7763 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7764 					    hclge_rm_uc_addr_common);
7765 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
7766 					  hclge_add_uc_addr_common);
7767 	} else {
7768 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7769 					    hclge_rm_mc_addr_common);
7770 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
7771 					  hclge_add_mc_addr_common);
7772 	}
7773 
7774 	/* if some mac addresses were added/deleted fail, move back to the
7775 	 * mac_list, and retry at next time.
7776 	 */
7777 	spin_lock_bh(&vport->mac_list_lock);
7778 
7779 	hclge_sync_from_del_list(&tmp_del_list, list);
7780 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
7781 
7782 	spin_unlock_bh(&vport->mac_list_lock);
7783 
7784 	hclge_update_overflow_flags(vport, mac_type, all_added);
7785 }
7786 
7787 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7788 {
7789 	struct hclge_dev *hdev = vport->back;
7790 
7791 	if (test_bit(vport->vport_id, hdev->vport_config_block))
7792 		return false;
7793 
7794 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7795 		return true;
7796 
7797 	return false;
7798 }
7799 
7800 static void hclge_sync_mac_table(struct hclge_dev *hdev)
7801 {
7802 	int i;
7803 
7804 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7805 		struct hclge_vport *vport = &hdev->vport[i];
7806 
7807 		if (!hclge_need_sync_mac_table(vport))
7808 			continue;
7809 
7810 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7811 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7812 	}
7813 }
7814 
7815 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7816 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
7817 {
7818 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7819 	struct hclge_mac_node *mac_cfg, *tmp;
7820 	struct hclge_dev *hdev = vport->back;
7821 	struct list_head tmp_del_list, *list;
7822 	int ret;
7823 
7824 	if (mac_type == HCLGE_MAC_ADDR_UC) {
7825 		list = &vport->uc_mac_list;
7826 		unsync = hclge_rm_uc_addr_common;
7827 	} else {
7828 		list = &vport->mc_mac_list;
7829 		unsync = hclge_rm_mc_addr_common;
7830 	}
7831 
7832 	INIT_LIST_HEAD(&tmp_del_list);
7833 
7834 	if (!is_del_list)
7835 		set_bit(vport->vport_id, hdev->vport_config_block);
7836 
7837 	spin_lock_bh(&vport->mac_list_lock);
7838 
7839 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7840 		switch (mac_cfg->state) {
7841 		case HCLGE_MAC_TO_DEL:
7842 		case HCLGE_MAC_ACTIVE:
7843 			list_del(&mac_cfg->node);
7844 			list_add_tail(&mac_cfg->node, &tmp_del_list);
7845 			break;
7846 		case HCLGE_MAC_TO_ADD:
7847 			if (is_del_list) {
7848 				list_del(&mac_cfg->node);
7849 				kfree(mac_cfg);
7850 			}
7851 			break;
7852 		}
7853 	}
7854 
7855 	spin_unlock_bh(&vport->mac_list_lock);
7856 
7857 	list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
7858 		ret = unsync(vport, mac_cfg->mac_addr);
7859 		if (!ret || ret == -ENOENT) {
7860 			/* clear all mac addr from hardware, but remain these
7861 			 * mac addr in the mac list, and restore them after
7862 			 * vf reset finished.
7863 			 */
7864 			if (!is_del_list &&
7865 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
7866 				mac_cfg->state = HCLGE_MAC_TO_ADD;
7867 			} else {
7868 				list_del(&mac_cfg->node);
7869 				kfree(mac_cfg);
7870 			}
7871 		} else if (is_del_list) {
7872 			mac_cfg->state = HCLGE_MAC_TO_DEL;
7873 		}
7874 	}
7875 
7876 	spin_lock_bh(&vport->mac_list_lock);
7877 
7878 	hclge_sync_from_del_list(&tmp_del_list, list);
7879 
7880 	spin_unlock_bh(&vport->mac_list_lock);
7881 }
7882 
7883 /* remove all mac address when uninitailize */
7884 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
7885 					enum HCLGE_MAC_ADDR_TYPE mac_type)
7886 {
7887 	struct hclge_mac_node *mac_node, *tmp;
7888 	struct hclge_dev *hdev = vport->back;
7889 	struct list_head tmp_del_list, *list;
7890 
7891 	INIT_LIST_HEAD(&tmp_del_list);
7892 
7893 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7894 		&vport->uc_mac_list : &vport->mc_mac_list;
7895 
7896 	spin_lock_bh(&vport->mac_list_lock);
7897 
7898 	list_for_each_entry_safe(mac_node, tmp, list, node) {
7899 		switch (mac_node->state) {
7900 		case HCLGE_MAC_TO_DEL:
7901 		case HCLGE_MAC_ACTIVE:
7902 			list_del(&mac_node->node);
7903 			list_add_tail(&mac_node->node, &tmp_del_list);
7904 			break;
7905 		case HCLGE_MAC_TO_ADD:
7906 			list_del(&mac_node->node);
7907 			kfree(mac_node);
7908 			break;
7909 		}
7910 	}
7911 
7912 	spin_unlock_bh(&vport->mac_list_lock);
7913 
7914 	if (mac_type == HCLGE_MAC_ADDR_UC)
7915 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7916 					    hclge_rm_uc_addr_common);
7917 	else
7918 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7919 					    hclge_rm_mc_addr_common);
7920 
7921 	if (!list_empty(&tmp_del_list))
7922 		dev_warn(&hdev->pdev->dev,
7923 			 "uninit %s mac list for vport %u not completely.\n",
7924 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
7925 			 vport->vport_id);
7926 
7927 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
7928 		list_del(&mac_node->node);
7929 		kfree(mac_node);
7930 	}
7931 }
7932 
7933 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
7934 {
7935 	struct hclge_vport *vport;
7936 	int i;
7937 
7938 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7939 		vport = &hdev->vport[i];
7940 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
7941 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
7942 	}
7943 }
7944 
7945 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7946 					      u16 cmdq_resp, u8 resp_code)
7947 {
7948 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
7949 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
7950 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
7951 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
7952 
7953 	int return_status;
7954 
7955 	if (cmdq_resp) {
7956 		dev_err(&hdev->pdev->dev,
7957 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7958 			cmdq_resp);
7959 		return -EIO;
7960 	}
7961 
7962 	switch (resp_code) {
7963 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
7964 	case HCLGE_ETHERTYPE_ALREADY_ADD:
7965 		return_status = 0;
7966 		break;
7967 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7968 		dev_err(&hdev->pdev->dev,
7969 			"add mac ethertype failed for manager table overflow.\n");
7970 		return_status = -EIO;
7971 		break;
7972 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
7973 		dev_err(&hdev->pdev->dev,
7974 			"add mac ethertype failed for key conflict.\n");
7975 		return_status = -EIO;
7976 		break;
7977 	default:
7978 		dev_err(&hdev->pdev->dev,
7979 			"add mac ethertype failed for undefined, code=%u.\n",
7980 			resp_code);
7981 		return_status = -EIO;
7982 	}
7983 
7984 	return return_status;
7985 }
7986 
7987 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7988 				     u8 *mac_addr)
7989 {
7990 	struct hclge_mac_vlan_tbl_entry_cmd req;
7991 	struct hclge_dev *hdev = vport->back;
7992 	struct hclge_desc desc;
7993 	u16 egress_port = 0;
7994 	int i;
7995 
7996 	if (is_zero_ether_addr(mac_addr))
7997 		return false;
7998 
7999 	memset(&req, 0, sizeof(req));
8000 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8001 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8002 	req.egress_port = cpu_to_le16(egress_port);
8003 	hclge_prepare_mac_addr(&req, mac_addr, false);
8004 
8005 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8006 		return true;
8007 
8008 	vf_idx += HCLGE_VF_VPORT_START_NUM;
8009 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8010 		if (i != vf_idx &&
8011 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8012 			return true;
8013 
8014 	return false;
8015 }
8016 
8017 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8018 			    u8 *mac_addr)
8019 {
8020 	struct hclge_vport *vport = hclge_get_vport(handle);
8021 	struct hclge_dev *hdev = vport->back;
8022 
8023 	vport = hclge_get_vf_vport(hdev, vf);
8024 	if (!vport)
8025 		return -EINVAL;
8026 
8027 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8028 		dev_info(&hdev->pdev->dev,
8029 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
8030 			 mac_addr);
8031 		return 0;
8032 	}
8033 
8034 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8035 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8036 			mac_addr);
8037 		return -EEXIST;
8038 	}
8039 
8040 	ether_addr_copy(vport->vf_info.mac, mac_addr);
8041 
8042 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8043 		dev_info(&hdev->pdev->dev,
8044 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8045 			 vf, mac_addr);
8046 		return hclge_inform_reset_assert_to_vf(vport);
8047 	}
8048 
8049 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8050 		 vf, mac_addr);
8051 	return 0;
8052 }
8053 
8054 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8055 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
8056 {
8057 	struct hclge_desc desc;
8058 	u8 resp_code;
8059 	u16 retval;
8060 	int ret;
8061 
8062 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8063 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8064 
8065 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8066 	if (ret) {
8067 		dev_err(&hdev->pdev->dev,
8068 			"add mac ethertype failed for cmd_send, ret =%d.\n",
8069 			ret);
8070 		return ret;
8071 	}
8072 
8073 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8074 	retval = le16_to_cpu(desc.retval);
8075 
8076 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8077 }
8078 
8079 static int init_mgr_tbl(struct hclge_dev *hdev)
8080 {
8081 	int ret;
8082 	int i;
8083 
8084 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8085 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8086 		if (ret) {
8087 			dev_err(&hdev->pdev->dev,
8088 				"add mac ethertype failed, ret =%d.\n",
8089 				ret);
8090 			return ret;
8091 		}
8092 	}
8093 
8094 	return 0;
8095 }
8096 
8097 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8098 {
8099 	struct hclge_vport *vport = hclge_get_vport(handle);
8100 	struct hclge_dev *hdev = vport->back;
8101 
8102 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
8103 }
8104 
8105 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8106 				       const u8 *old_addr, const u8 *new_addr)
8107 {
8108 	struct list_head *list = &vport->uc_mac_list;
8109 	struct hclge_mac_node *old_node, *new_node;
8110 
8111 	new_node = hclge_find_mac_node(list, new_addr);
8112 	if (!new_node) {
8113 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8114 		if (!new_node)
8115 			return -ENOMEM;
8116 
8117 		new_node->state = HCLGE_MAC_TO_ADD;
8118 		ether_addr_copy(new_node->mac_addr, new_addr);
8119 		list_add(&new_node->node, list);
8120 	} else {
8121 		if (new_node->state == HCLGE_MAC_TO_DEL)
8122 			new_node->state = HCLGE_MAC_ACTIVE;
8123 
8124 		/* make sure the new addr is in the list head, avoid dev
8125 		 * addr may be not re-added into mac table for the umv space
8126 		 * limitation after global/imp reset which will clear mac
8127 		 * table by hardware.
8128 		 */
8129 		list_move(&new_node->node, list);
8130 	}
8131 
8132 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8133 		old_node = hclge_find_mac_node(list, old_addr);
8134 		if (old_node) {
8135 			if (old_node->state == HCLGE_MAC_TO_ADD) {
8136 				list_del(&old_node->node);
8137 				kfree(old_node);
8138 			} else {
8139 				old_node->state = HCLGE_MAC_TO_DEL;
8140 			}
8141 		}
8142 	}
8143 
8144 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8145 
8146 	return 0;
8147 }
8148 
8149 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8150 			      bool is_first)
8151 {
8152 	const unsigned char *new_addr = (const unsigned char *)p;
8153 	struct hclge_vport *vport = hclge_get_vport(handle);
8154 	struct hclge_dev *hdev = vport->back;
8155 	unsigned char *old_addr = NULL;
8156 	int ret;
8157 
8158 	/* mac addr check */
8159 	if (is_zero_ether_addr(new_addr) ||
8160 	    is_broadcast_ether_addr(new_addr) ||
8161 	    is_multicast_ether_addr(new_addr)) {
8162 		dev_err(&hdev->pdev->dev,
8163 			"change uc mac err! invalid mac: %pM.\n",
8164 			 new_addr);
8165 		return -EINVAL;
8166 	}
8167 
8168 	ret = hclge_pause_addr_cfg(hdev, new_addr);
8169 	if (ret) {
8170 		dev_err(&hdev->pdev->dev,
8171 			"failed to configure mac pause address, ret = %d\n",
8172 			ret);
8173 		return ret;
8174 	}
8175 
8176 	if (!is_first)
8177 		old_addr = hdev->hw.mac.mac_addr;
8178 
8179 	spin_lock_bh(&vport->mac_list_lock);
8180 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8181 	if (ret) {
8182 		dev_err(&hdev->pdev->dev,
8183 			"failed to change the mac addr:%pM, ret = %d\n",
8184 			new_addr, ret);
8185 		spin_unlock_bh(&vport->mac_list_lock);
8186 
8187 		if (!is_first)
8188 			hclge_pause_addr_cfg(hdev, old_addr);
8189 
8190 		return ret;
8191 	}
8192 	/* we must update dev addr with spin lock protect, preventing dev addr
8193 	 * being removed by set_rx_mode path.
8194 	 */
8195 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8196 	spin_unlock_bh(&vport->mac_list_lock);
8197 
8198 	hclge_task_schedule(hdev, 0);
8199 
8200 	return 0;
8201 }
8202 
8203 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8204 			  int cmd)
8205 {
8206 	struct hclge_vport *vport = hclge_get_vport(handle);
8207 	struct hclge_dev *hdev = vport->back;
8208 
8209 	if (!hdev->hw.mac.phydev)
8210 		return -EOPNOTSUPP;
8211 
8212 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8213 }
8214 
8215 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8216 				      u8 fe_type, bool filter_en, u8 vf_id)
8217 {
8218 	struct hclge_vlan_filter_ctrl_cmd *req;
8219 	struct hclge_desc desc;
8220 	int ret;
8221 
8222 	/* read current vlan filter parameter */
8223 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8224 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8225 	req->vlan_type = vlan_type;
8226 	req->vf_id = vf_id;
8227 
8228 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8229 	if (ret) {
8230 		dev_err(&hdev->pdev->dev,
8231 			"failed to get vlan filter config, ret = %d.\n", ret);
8232 		return ret;
8233 	}
8234 
8235 	/* modify and write new config parameter */
8236 	hclge_cmd_reuse_desc(&desc, false);
8237 	req->vlan_fe = filter_en ?
8238 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8239 
8240 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8241 	if (ret)
8242 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8243 			ret);
8244 
8245 	return ret;
8246 }
8247 
8248 #define HCLGE_FILTER_TYPE_VF		0
8249 #define HCLGE_FILTER_TYPE_PORT		1
8250 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
8251 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
8252 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
8253 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
8254 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
8255 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
8256 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
8257 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
8258 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
8259 
8260 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8261 {
8262 	struct hclge_vport *vport = hclge_get_vport(handle);
8263 	struct hclge_dev *hdev = vport->back;
8264 
8265 	if (hdev->pdev->revision >= 0x21) {
8266 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8267 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
8268 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8269 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
8270 	} else {
8271 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8272 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8273 					   0);
8274 	}
8275 	if (enable)
8276 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
8277 	else
8278 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8279 }
8280 
8281 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8282 				    bool is_kill, u16 vlan,
8283 				    __be16 proto)
8284 {
8285 	struct hclge_vport *vport = &hdev->vport[vfid];
8286 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
8287 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
8288 	struct hclge_desc desc[2];
8289 	u8 vf_byte_val;
8290 	u8 vf_byte_off;
8291 	int ret;
8292 
8293 	/* if vf vlan table is full, firmware will close vf vlan filter, it
8294 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
8295 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
8296 	 * new vlan, because tx packets with these vlan id will be dropped.
8297 	 */
8298 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8299 		if (vport->vf_info.spoofchk && vlan) {
8300 			dev_err(&hdev->pdev->dev,
8301 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
8302 			return -EPERM;
8303 		}
8304 		return 0;
8305 	}
8306 
8307 	hclge_cmd_setup_basic_desc(&desc[0],
8308 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8309 	hclge_cmd_setup_basic_desc(&desc[1],
8310 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8311 
8312 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8313 
8314 	vf_byte_off = vfid / 8;
8315 	vf_byte_val = 1 << (vfid % 8);
8316 
8317 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8318 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8319 
8320 	req0->vlan_id  = cpu_to_le16(vlan);
8321 	req0->vlan_cfg = is_kill;
8322 
8323 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8324 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8325 	else
8326 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8327 
8328 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
8329 	if (ret) {
8330 		dev_err(&hdev->pdev->dev,
8331 			"Send vf vlan command fail, ret =%d.\n",
8332 			ret);
8333 		return ret;
8334 	}
8335 
8336 	if (!is_kill) {
8337 #define HCLGE_VF_VLAN_NO_ENTRY	2
8338 		if (!req0->resp_code || req0->resp_code == 1)
8339 			return 0;
8340 
8341 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8342 			set_bit(vfid, hdev->vf_vlan_full);
8343 			dev_warn(&hdev->pdev->dev,
8344 				 "vf vlan table is full, vf vlan filter is disabled\n");
8345 			return 0;
8346 		}
8347 
8348 		dev_err(&hdev->pdev->dev,
8349 			"Add vf vlan filter fail, ret =%u.\n",
8350 			req0->resp_code);
8351 	} else {
8352 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
8353 		if (!req0->resp_code)
8354 			return 0;
8355 
8356 		/* vf vlan filter is disabled when vf vlan table is full,
8357 		 * then new vlan id will not be added into vf vlan table.
8358 		 * Just return 0 without warning, avoid massive verbose
8359 		 * print logs when unload.
8360 		 */
8361 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8362 			return 0;
8363 
8364 		dev_err(&hdev->pdev->dev,
8365 			"Kill vf vlan filter fail, ret =%u.\n",
8366 			req0->resp_code);
8367 	}
8368 
8369 	return -EIO;
8370 }
8371 
8372 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8373 				      u16 vlan_id, bool is_kill)
8374 {
8375 	struct hclge_vlan_filter_pf_cfg_cmd *req;
8376 	struct hclge_desc desc;
8377 	u8 vlan_offset_byte_val;
8378 	u8 vlan_offset_byte;
8379 	u8 vlan_offset_160;
8380 	int ret;
8381 
8382 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8383 
8384 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8385 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8386 			   HCLGE_VLAN_BYTE_SIZE;
8387 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8388 
8389 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8390 	req->vlan_offset = vlan_offset_160;
8391 	req->vlan_cfg = is_kill;
8392 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8393 
8394 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8395 	if (ret)
8396 		dev_err(&hdev->pdev->dev,
8397 			"port vlan command, send fail, ret =%d.\n", ret);
8398 	return ret;
8399 }
8400 
8401 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8402 				    u16 vport_id, u16 vlan_id,
8403 				    bool is_kill)
8404 {
8405 	u16 vport_idx, vport_num = 0;
8406 	int ret;
8407 
8408 	if (is_kill && !vlan_id)
8409 		return 0;
8410 
8411 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8412 				       proto);
8413 	if (ret) {
8414 		dev_err(&hdev->pdev->dev,
8415 			"Set %u vport vlan filter config fail, ret =%d.\n",
8416 			vport_id, ret);
8417 		return ret;
8418 	}
8419 
8420 	/* vlan 0 may be added twice when 8021q module is enabled */
8421 	if (!is_kill && !vlan_id &&
8422 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
8423 		return 0;
8424 
8425 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
8426 		dev_err(&hdev->pdev->dev,
8427 			"Add port vlan failed, vport %u is already in vlan %u\n",
8428 			vport_id, vlan_id);
8429 		return -EINVAL;
8430 	}
8431 
8432 	if (is_kill &&
8433 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8434 		dev_err(&hdev->pdev->dev,
8435 			"Delete port vlan failed, vport %u is not in vlan %u\n",
8436 			vport_id, vlan_id);
8437 		return -EINVAL;
8438 	}
8439 
8440 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
8441 		vport_num++;
8442 
8443 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8444 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8445 						 is_kill);
8446 
8447 	return ret;
8448 }
8449 
8450 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8451 {
8452 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8453 	struct hclge_vport_vtag_tx_cfg_cmd *req;
8454 	struct hclge_dev *hdev = vport->back;
8455 	struct hclge_desc desc;
8456 	u16 bmap_index;
8457 	int status;
8458 
8459 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8460 
8461 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8462 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8463 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8464 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8465 		      vcfg->accept_tag1 ? 1 : 0);
8466 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8467 		      vcfg->accept_untag1 ? 1 : 0);
8468 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8469 		      vcfg->accept_tag2 ? 1 : 0);
8470 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8471 		      vcfg->accept_untag2 ? 1 : 0);
8472 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8473 		      vcfg->insert_tag1_en ? 1 : 0);
8474 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8475 		      vcfg->insert_tag2_en ? 1 : 0);
8476 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8477 
8478 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8479 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8480 			HCLGE_VF_NUM_PER_BYTE;
8481 	req->vf_bitmap[bmap_index] =
8482 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8483 
8484 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8485 	if (status)
8486 		dev_err(&hdev->pdev->dev,
8487 			"Send port txvlan cfg command fail, ret =%d\n",
8488 			status);
8489 
8490 	return status;
8491 }
8492 
8493 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8494 {
8495 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8496 	struct hclge_vport_vtag_rx_cfg_cmd *req;
8497 	struct hclge_dev *hdev = vport->back;
8498 	struct hclge_desc desc;
8499 	u16 bmap_index;
8500 	int status;
8501 
8502 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8503 
8504 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8505 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8506 		      vcfg->strip_tag1_en ? 1 : 0);
8507 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8508 		      vcfg->strip_tag2_en ? 1 : 0);
8509 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8510 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
8511 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8512 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
8513 
8514 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8515 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8516 			HCLGE_VF_NUM_PER_BYTE;
8517 	req->vf_bitmap[bmap_index] =
8518 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8519 
8520 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8521 	if (status)
8522 		dev_err(&hdev->pdev->dev,
8523 			"Send port rxvlan cfg command fail, ret =%d\n",
8524 			status);
8525 
8526 	return status;
8527 }
8528 
8529 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8530 				  u16 port_base_vlan_state,
8531 				  u16 vlan_tag)
8532 {
8533 	int ret;
8534 
8535 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8536 		vport->txvlan_cfg.accept_tag1 = true;
8537 		vport->txvlan_cfg.insert_tag1_en = false;
8538 		vport->txvlan_cfg.default_tag1 = 0;
8539 	} else {
8540 		vport->txvlan_cfg.accept_tag1 = false;
8541 		vport->txvlan_cfg.insert_tag1_en = true;
8542 		vport->txvlan_cfg.default_tag1 = vlan_tag;
8543 	}
8544 
8545 	vport->txvlan_cfg.accept_untag1 = true;
8546 
8547 	/* accept_tag2 and accept_untag2 are not supported on
8548 	 * pdev revision(0x20), new revision support them,
8549 	 * this two fields can not be configured by user.
8550 	 */
8551 	vport->txvlan_cfg.accept_tag2 = true;
8552 	vport->txvlan_cfg.accept_untag2 = true;
8553 	vport->txvlan_cfg.insert_tag2_en = false;
8554 	vport->txvlan_cfg.default_tag2 = 0;
8555 
8556 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8557 		vport->rxvlan_cfg.strip_tag1_en = false;
8558 		vport->rxvlan_cfg.strip_tag2_en =
8559 				vport->rxvlan_cfg.rx_vlan_offload_en;
8560 	} else {
8561 		vport->rxvlan_cfg.strip_tag1_en =
8562 				vport->rxvlan_cfg.rx_vlan_offload_en;
8563 		vport->rxvlan_cfg.strip_tag2_en = true;
8564 	}
8565 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8566 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8567 
8568 	ret = hclge_set_vlan_tx_offload_cfg(vport);
8569 	if (ret)
8570 		return ret;
8571 
8572 	return hclge_set_vlan_rx_offload_cfg(vport);
8573 }
8574 
8575 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8576 {
8577 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8578 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8579 	struct hclge_desc desc;
8580 	int status;
8581 
8582 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8583 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8584 	rx_req->ot_fst_vlan_type =
8585 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8586 	rx_req->ot_sec_vlan_type =
8587 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8588 	rx_req->in_fst_vlan_type =
8589 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8590 	rx_req->in_sec_vlan_type =
8591 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8592 
8593 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8594 	if (status) {
8595 		dev_err(&hdev->pdev->dev,
8596 			"Send rxvlan protocol type command fail, ret =%d\n",
8597 			status);
8598 		return status;
8599 	}
8600 
8601 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8602 
8603 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8604 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8605 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8606 
8607 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8608 	if (status)
8609 		dev_err(&hdev->pdev->dev,
8610 			"Send txvlan protocol type command fail, ret =%d\n",
8611 			status);
8612 
8613 	return status;
8614 }
8615 
8616 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8617 {
8618 #define HCLGE_DEF_VLAN_TYPE		0x8100
8619 
8620 	struct hnae3_handle *handle = &hdev->vport[0].nic;
8621 	struct hclge_vport *vport;
8622 	int ret;
8623 	int i;
8624 
8625 	if (hdev->pdev->revision >= 0x21) {
8626 		/* for revision 0x21, vf vlan filter is per function */
8627 		for (i = 0; i < hdev->num_alloc_vport; i++) {
8628 			vport = &hdev->vport[i];
8629 			ret = hclge_set_vlan_filter_ctrl(hdev,
8630 							 HCLGE_FILTER_TYPE_VF,
8631 							 HCLGE_FILTER_FE_EGRESS,
8632 							 true,
8633 							 vport->vport_id);
8634 			if (ret)
8635 				return ret;
8636 		}
8637 
8638 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8639 						 HCLGE_FILTER_FE_INGRESS, true,
8640 						 0);
8641 		if (ret)
8642 			return ret;
8643 	} else {
8644 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8645 						 HCLGE_FILTER_FE_EGRESS_V1_B,
8646 						 true, 0);
8647 		if (ret)
8648 			return ret;
8649 	}
8650 
8651 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
8652 
8653 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8654 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8655 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8656 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8657 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8658 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8659 
8660 	ret = hclge_set_vlan_protocol_type(hdev);
8661 	if (ret)
8662 		return ret;
8663 
8664 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8665 		u16 vlan_tag;
8666 
8667 		vport = &hdev->vport[i];
8668 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8669 
8670 		ret = hclge_vlan_offload_cfg(vport,
8671 					     vport->port_base_vlan_cfg.state,
8672 					     vlan_tag);
8673 		if (ret)
8674 			return ret;
8675 	}
8676 
8677 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8678 }
8679 
8680 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8681 				       bool writen_to_tbl)
8682 {
8683 	struct hclge_vport_vlan_cfg *vlan;
8684 
8685 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8686 	if (!vlan)
8687 		return;
8688 
8689 	vlan->hd_tbl_status = writen_to_tbl;
8690 	vlan->vlan_id = vlan_id;
8691 
8692 	list_add_tail(&vlan->node, &vport->vlan_list);
8693 }
8694 
8695 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8696 {
8697 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8698 	struct hclge_dev *hdev = vport->back;
8699 	int ret;
8700 
8701 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8702 		if (!vlan->hd_tbl_status) {
8703 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8704 						       vport->vport_id,
8705 						       vlan->vlan_id, false);
8706 			if (ret) {
8707 				dev_err(&hdev->pdev->dev,
8708 					"restore vport vlan list failed, ret=%d\n",
8709 					ret);
8710 				return ret;
8711 			}
8712 		}
8713 		vlan->hd_tbl_status = true;
8714 	}
8715 
8716 	return 0;
8717 }
8718 
8719 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8720 				      bool is_write_tbl)
8721 {
8722 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8723 	struct hclge_dev *hdev = vport->back;
8724 
8725 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8726 		if (vlan->vlan_id == vlan_id) {
8727 			if (is_write_tbl && vlan->hd_tbl_status)
8728 				hclge_set_vlan_filter_hw(hdev,
8729 							 htons(ETH_P_8021Q),
8730 							 vport->vport_id,
8731 							 vlan_id,
8732 							 true);
8733 
8734 			list_del(&vlan->node);
8735 			kfree(vlan);
8736 			break;
8737 		}
8738 	}
8739 }
8740 
8741 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8742 {
8743 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8744 	struct hclge_dev *hdev = vport->back;
8745 
8746 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8747 		if (vlan->hd_tbl_status)
8748 			hclge_set_vlan_filter_hw(hdev,
8749 						 htons(ETH_P_8021Q),
8750 						 vport->vport_id,
8751 						 vlan->vlan_id,
8752 						 true);
8753 
8754 		vlan->hd_tbl_status = false;
8755 		if (is_del_list) {
8756 			list_del(&vlan->node);
8757 			kfree(vlan);
8758 		}
8759 	}
8760 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
8761 }
8762 
8763 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8764 {
8765 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8766 	struct hclge_vport *vport;
8767 	int i;
8768 
8769 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8770 		vport = &hdev->vport[i];
8771 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8772 			list_del(&vlan->node);
8773 			kfree(vlan);
8774 		}
8775 	}
8776 }
8777 
8778 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
8779 {
8780 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8781 	struct hclge_dev *hdev = vport->back;
8782 	u16 vlan_proto;
8783 	u16 vlan_id;
8784 	u16 state;
8785 	int ret;
8786 
8787 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8788 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8789 	state = vport->port_base_vlan_cfg.state;
8790 
8791 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8792 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8793 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8794 					 vport->vport_id, vlan_id,
8795 					 false);
8796 		return;
8797 	}
8798 
8799 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8800 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8801 					       vport->vport_id,
8802 					       vlan->vlan_id, false);
8803 		if (ret)
8804 			break;
8805 		vlan->hd_tbl_status = true;
8806 	}
8807 }
8808 
8809 /* For global reset and imp reset, hardware will clear the mac table,
8810  * so we change the mac address state from ACTIVE to TO_ADD, then they
8811  * can be restored in the service task after reset complete. Furtherly,
8812  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
8813  * be restored after reset, so just remove these mac nodes from mac_list.
8814  */
8815 static void hclge_mac_node_convert_for_reset(struct list_head *list)
8816 {
8817 	struct hclge_mac_node *mac_node, *tmp;
8818 
8819 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8820 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
8821 			mac_node->state = HCLGE_MAC_TO_ADD;
8822 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8823 			list_del(&mac_node->node);
8824 			kfree(mac_node);
8825 		}
8826 	}
8827 }
8828 
8829 void hclge_restore_mac_table_common(struct hclge_vport *vport)
8830 {
8831 	spin_lock_bh(&vport->mac_list_lock);
8832 
8833 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8834 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8835 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8836 
8837 	spin_unlock_bh(&vport->mac_list_lock);
8838 }
8839 
8840 static void hclge_restore_hw_table(struct hclge_dev *hdev)
8841 {
8842 	struct hclge_vport *vport = &hdev->vport[0];
8843 	struct hnae3_handle *handle = &vport->nic;
8844 
8845 	hclge_restore_mac_table_common(vport);
8846 	hclge_restore_vport_vlan_table(vport);
8847 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
8848 
8849 	hclge_restore_fd_entries(handle);
8850 }
8851 
8852 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8853 {
8854 	struct hclge_vport *vport = hclge_get_vport(handle);
8855 
8856 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8857 		vport->rxvlan_cfg.strip_tag1_en = false;
8858 		vport->rxvlan_cfg.strip_tag2_en = enable;
8859 	} else {
8860 		vport->rxvlan_cfg.strip_tag1_en = enable;
8861 		vport->rxvlan_cfg.strip_tag2_en = true;
8862 	}
8863 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8864 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8865 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8866 
8867 	return hclge_set_vlan_rx_offload_cfg(vport);
8868 }
8869 
8870 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8871 					    u16 port_base_vlan_state,
8872 					    struct hclge_vlan_info *new_info,
8873 					    struct hclge_vlan_info *old_info)
8874 {
8875 	struct hclge_dev *hdev = vport->back;
8876 	int ret;
8877 
8878 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8879 		hclge_rm_vport_all_vlan_table(vport, false);
8880 		return hclge_set_vlan_filter_hw(hdev,
8881 						 htons(new_info->vlan_proto),
8882 						 vport->vport_id,
8883 						 new_info->vlan_tag,
8884 						 false);
8885 	}
8886 
8887 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8888 				       vport->vport_id, old_info->vlan_tag,
8889 				       true);
8890 	if (ret)
8891 		return ret;
8892 
8893 	return hclge_add_vport_all_vlan_table(vport);
8894 }
8895 
8896 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8897 				    struct hclge_vlan_info *vlan_info)
8898 {
8899 	struct hnae3_handle *nic = &vport->nic;
8900 	struct hclge_vlan_info *old_vlan_info;
8901 	struct hclge_dev *hdev = vport->back;
8902 	int ret;
8903 
8904 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8905 
8906 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8907 	if (ret)
8908 		return ret;
8909 
8910 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8911 		/* add new VLAN tag */
8912 		ret = hclge_set_vlan_filter_hw(hdev,
8913 					       htons(vlan_info->vlan_proto),
8914 					       vport->vport_id,
8915 					       vlan_info->vlan_tag,
8916 					       false);
8917 		if (ret)
8918 			return ret;
8919 
8920 		/* remove old VLAN tag */
8921 		ret = hclge_set_vlan_filter_hw(hdev,
8922 					       htons(old_vlan_info->vlan_proto),
8923 					       vport->vport_id,
8924 					       old_vlan_info->vlan_tag,
8925 					       true);
8926 		if (ret)
8927 			return ret;
8928 
8929 		goto update;
8930 	}
8931 
8932 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8933 					       old_vlan_info);
8934 	if (ret)
8935 		return ret;
8936 
8937 	/* update state only when disable/enable port based VLAN */
8938 	vport->port_base_vlan_cfg.state = state;
8939 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8940 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8941 	else
8942 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8943 
8944 update:
8945 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8946 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8947 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8948 
8949 	return 0;
8950 }
8951 
8952 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8953 					  enum hnae3_port_base_vlan_state state,
8954 					  u16 vlan)
8955 {
8956 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8957 		if (!vlan)
8958 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8959 		else
8960 			return HNAE3_PORT_BASE_VLAN_ENABLE;
8961 	} else {
8962 		if (!vlan)
8963 			return HNAE3_PORT_BASE_VLAN_DISABLE;
8964 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8965 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8966 		else
8967 			return HNAE3_PORT_BASE_VLAN_MODIFY;
8968 	}
8969 }
8970 
8971 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8972 				    u16 vlan, u8 qos, __be16 proto)
8973 {
8974 	struct hclge_vport *vport = hclge_get_vport(handle);
8975 	struct hclge_dev *hdev = vport->back;
8976 	struct hclge_vlan_info vlan_info;
8977 	u16 state;
8978 	int ret;
8979 
8980 	if (hdev->pdev->revision == 0x20)
8981 		return -EOPNOTSUPP;
8982 
8983 	vport = hclge_get_vf_vport(hdev, vfid);
8984 	if (!vport)
8985 		return -EINVAL;
8986 
8987 	/* qos is a 3 bits value, so can not be bigger than 7 */
8988 	if (vlan > VLAN_N_VID - 1 || qos > 7)
8989 		return -EINVAL;
8990 	if (proto != htons(ETH_P_8021Q))
8991 		return -EPROTONOSUPPORT;
8992 
8993 	state = hclge_get_port_base_vlan_state(vport,
8994 					       vport->port_base_vlan_cfg.state,
8995 					       vlan);
8996 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8997 		return 0;
8998 
8999 	vlan_info.vlan_tag = vlan;
9000 	vlan_info.qos = qos;
9001 	vlan_info.vlan_proto = ntohs(proto);
9002 
9003 	if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9004 		return hclge_update_port_base_vlan_cfg(vport, state,
9005 						       &vlan_info);
9006 	} else {
9007 		ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9008 							vport->vport_id, state,
9009 							vlan, qos,
9010 							ntohs(proto));
9011 		return ret;
9012 	}
9013 }
9014 
9015 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9016 {
9017 	struct hclge_vlan_info *vlan_info;
9018 	struct hclge_vport *vport;
9019 	int ret;
9020 	int vf;
9021 
9022 	/* clear port base vlan for all vf */
9023 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9024 		vport = &hdev->vport[vf];
9025 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9026 
9027 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9028 					       vport->vport_id,
9029 					       vlan_info->vlan_tag, true);
9030 		if (ret)
9031 			dev_err(&hdev->pdev->dev,
9032 				"failed to clear vf vlan for vf%d, ret = %d\n",
9033 				vf - HCLGE_VF_VPORT_START_NUM, ret);
9034 	}
9035 }
9036 
9037 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9038 			  u16 vlan_id, bool is_kill)
9039 {
9040 	struct hclge_vport *vport = hclge_get_vport(handle);
9041 	struct hclge_dev *hdev = vport->back;
9042 	bool writen_to_tbl = false;
9043 	int ret = 0;
9044 
9045 	/* When device is resetting or reset failed, firmware is unable to
9046 	 * handle mailbox. Just record the vlan id, and remove it after
9047 	 * reset finished.
9048 	 */
9049 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9050 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9051 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
9052 		return -EBUSY;
9053 	}
9054 
9055 	/* when port base vlan enabled, we use port base vlan as the vlan
9056 	 * filter entry. In this case, we don't update vlan filter table
9057 	 * when user add new vlan or remove exist vlan, just update the vport
9058 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
9059 	 * table until port base vlan disabled
9060 	 */
9061 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9062 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9063 					       vlan_id, is_kill);
9064 		writen_to_tbl = true;
9065 	}
9066 
9067 	if (!ret) {
9068 		if (is_kill)
9069 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
9070 		else
9071 			hclge_add_vport_vlan_table(vport, vlan_id,
9072 						   writen_to_tbl);
9073 	} else if (is_kill) {
9074 		/* when remove hw vlan filter failed, record the vlan id,
9075 		 * and try to remove it from hw later, to be consistence
9076 		 * with stack
9077 		 */
9078 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
9079 	}
9080 	return ret;
9081 }
9082 
9083 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9084 {
9085 #define HCLGE_MAX_SYNC_COUNT	60
9086 
9087 	int i, ret, sync_cnt = 0;
9088 	u16 vlan_id;
9089 
9090 	/* start from vport 1 for PF is always alive */
9091 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9092 		struct hclge_vport *vport = &hdev->vport[i];
9093 
9094 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9095 					 VLAN_N_VID);
9096 		while (vlan_id != VLAN_N_VID) {
9097 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9098 						       vport->vport_id, vlan_id,
9099 						       true);
9100 			if (ret && ret != -EINVAL)
9101 				return;
9102 
9103 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9104 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
9105 
9106 			sync_cnt++;
9107 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9108 				return;
9109 
9110 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9111 						 VLAN_N_VID);
9112 		}
9113 	}
9114 }
9115 
9116 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9117 {
9118 	struct hclge_config_max_frm_size_cmd *req;
9119 	struct hclge_desc desc;
9120 
9121 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9122 
9123 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9124 	req->max_frm_size = cpu_to_le16(new_mps);
9125 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9126 
9127 	return hclge_cmd_send(&hdev->hw, &desc, 1);
9128 }
9129 
9130 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9131 {
9132 	struct hclge_vport *vport = hclge_get_vport(handle);
9133 
9134 	return hclge_set_vport_mtu(vport, new_mtu);
9135 }
9136 
9137 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9138 {
9139 	struct hclge_dev *hdev = vport->back;
9140 	int i, max_frm_size, ret;
9141 
9142 	/* HW supprt 2 layer vlan */
9143 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9144 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9145 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
9146 		return -EINVAL;
9147 
9148 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9149 	mutex_lock(&hdev->vport_lock);
9150 	/* VF's mps must fit within hdev->mps */
9151 	if (vport->vport_id && max_frm_size > hdev->mps) {
9152 		mutex_unlock(&hdev->vport_lock);
9153 		return -EINVAL;
9154 	} else if (vport->vport_id) {
9155 		vport->mps = max_frm_size;
9156 		mutex_unlock(&hdev->vport_lock);
9157 		return 0;
9158 	}
9159 
9160 	/* PF's mps must be greater then VF's mps */
9161 	for (i = 1; i < hdev->num_alloc_vport; i++)
9162 		if (max_frm_size < hdev->vport[i].mps) {
9163 			mutex_unlock(&hdev->vport_lock);
9164 			return -EINVAL;
9165 		}
9166 
9167 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9168 
9169 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
9170 	if (ret) {
9171 		dev_err(&hdev->pdev->dev,
9172 			"Change mtu fail, ret =%d\n", ret);
9173 		goto out;
9174 	}
9175 
9176 	hdev->mps = max_frm_size;
9177 	vport->mps = max_frm_size;
9178 
9179 	ret = hclge_buffer_alloc(hdev);
9180 	if (ret)
9181 		dev_err(&hdev->pdev->dev,
9182 			"Allocate buffer fail, ret =%d\n", ret);
9183 
9184 out:
9185 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9186 	mutex_unlock(&hdev->vport_lock);
9187 	return ret;
9188 }
9189 
9190 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9191 				    bool enable)
9192 {
9193 	struct hclge_reset_tqp_queue_cmd *req;
9194 	struct hclge_desc desc;
9195 	int ret;
9196 
9197 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9198 
9199 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9200 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9201 	if (enable)
9202 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9203 
9204 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9205 	if (ret) {
9206 		dev_err(&hdev->pdev->dev,
9207 			"Send tqp reset cmd error, status =%d\n", ret);
9208 		return ret;
9209 	}
9210 
9211 	return 0;
9212 }
9213 
9214 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9215 {
9216 	struct hclge_reset_tqp_queue_cmd *req;
9217 	struct hclge_desc desc;
9218 	int ret;
9219 
9220 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9221 
9222 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9223 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9224 
9225 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9226 	if (ret) {
9227 		dev_err(&hdev->pdev->dev,
9228 			"Get reset status error, status =%d\n", ret);
9229 		return ret;
9230 	}
9231 
9232 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9233 }
9234 
9235 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9236 {
9237 	struct hnae3_queue *queue;
9238 	struct hclge_tqp *tqp;
9239 
9240 	queue = handle->kinfo.tqp[queue_id];
9241 	tqp = container_of(queue, struct hclge_tqp, q);
9242 
9243 	return tqp->index;
9244 }
9245 
9246 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9247 {
9248 	struct hclge_vport *vport = hclge_get_vport(handle);
9249 	struct hclge_dev *hdev = vport->back;
9250 	int reset_try_times = 0;
9251 	int reset_status;
9252 	u16 queue_gid;
9253 	int ret;
9254 
9255 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9256 
9257 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9258 	if (ret) {
9259 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9260 		return ret;
9261 	}
9262 
9263 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9264 	if (ret) {
9265 		dev_err(&hdev->pdev->dev,
9266 			"Send reset tqp cmd fail, ret = %d\n", ret);
9267 		return ret;
9268 	}
9269 
9270 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9271 		reset_status = hclge_get_reset_status(hdev, queue_gid);
9272 		if (reset_status)
9273 			break;
9274 
9275 		/* Wait for tqp hw reset */
9276 		usleep_range(1000, 1200);
9277 	}
9278 
9279 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9280 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9281 		return ret;
9282 	}
9283 
9284 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9285 	if (ret)
9286 		dev_err(&hdev->pdev->dev,
9287 			"Deassert the soft reset fail, ret = %d\n", ret);
9288 
9289 	return ret;
9290 }
9291 
9292 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9293 {
9294 	struct hclge_dev *hdev = vport->back;
9295 	int reset_try_times = 0;
9296 	int reset_status;
9297 	u16 queue_gid;
9298 	int ret;
9299 
9300 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9301 
9302 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9303 	if (ret) {
9304 		dev_warn(&hdev->pdev->dev,
9305 			 "Send reset tqp cmd fail, ret = %d\n", ret);
9306 		return;
9307 	}
9308 
9309 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9310 		reset_status = hclge_get_reset_status(hdev, queue_gid);
9311 		if (reset_status)
9312 			break;
9313 
9314 		/* Wait for tqp hw reset */
9315 		usleep_range(1000, 1200);
9316 	}
9317 
9318 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9319 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9320 		return;
9321 	}
9322 
9323 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9324 	if (ret)
9325 		dev_warn(&hdev->pdev->dev,
9326 			 "Deassert the soft reset fail, ret = %d\n", ret);
9327 }
9328 
9329 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9330 {
9331 	struct hclge_vport *vport = hclge_get_vport(handle);
9332 	struct hclge_dev *hdev = vport->back;
9333 
9334 	return hdev->fw_version;
9335 }
9336 
9337 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9338 {
9339 	struct phy_device *phydev = hdev->hw.mac.phydev;
9340 
9341 	if (!phydev)
9342 		return;
9343 
9344 	phy_set_asym_pause(phydev, rx_en, tx_en);
9345 }
9346 
9347 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9348 {
9349 	int ret;
9350 
9351 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9352 		return 0;
9353 
9354 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9355 	if (ret)
9356 		dev_err(&hdev->pdev->dev,
9357 			"configure pauseparam error, ret = %d.\n", ret);
9358 
9359 	return ret;
9360 }
9361 
9362 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9363 {
9364 	struct phy_device *phydev = hdev->hw.mac.phydev;
9365 	u16 remote_advertising = 0;
9366 	u16 local_advertising;
9367 	u32 rx_pause, tx_pause;
9368 	u8 flowctl;
9369 
9370 	if (!phydev->link || !phydev->autoneg)
9371 		return 0;
9372 
9373 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9374 
9375 	if (phydev->pause)
9376 		remote_advertising = LPA_PAUSE_CAP;
9377 
9378 	if (phydev->asym_pause)
9379 		remote_advertising |= LPA_PAUSE_ASYM;
9380 
9381 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9382 					   remote_advertising);
9383 	tx_pause = flowctl & FLOW_CTRL_TX;
9384 	rx_pause = flowctl & FLOW_CTRL_RX;
9385 
9386 	if (phydev->duplex == HCLGE_MAC_HALF) {
9387 		tx_pause = 0;
9388 		rx_pause = 0;
9389 	}
9390 
9391 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9392 }
9393 
9394 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9395 				 u32 *rx_en, u32 *tx_en)
9396 {
9397 	struct hclge_vport *vport = hclge_get_vport(handle);
9398 	struct hclge_dev *hdev = vport->back;
9399 	struct phy_device *phydev = hdev->hw.mac.phydev;
9400 
9401 	*auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
9402 
9403 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9404 		*rx_en = 0;
9405 		*tx_en = 0;
9406 		return;
9407 	}
9408 
9409 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9410 		*rx_en = 1;
9411 		*tx_en = 0;
9412 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9413 		*tx_en = 1;
9414 		*rx_en = 0;
9415 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9416 		*rx_en = 1;
9417 		*tx_en = 1;
9418 	} else {
9419 		*rx_en = 0;
9420 		*tx_en = 0;
9421 	}
9422 }
9423 
9424 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9425 					 u32 rx_en, u32 tx_en)
9426 {
9427 	if (rx_en && tx_en)
9428 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
9429 	else if (rx_en && !tx_en)
9430 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9431 	else if (!rx_en && tx_en)
9432 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9433 	else
9434 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
9435 
9436 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9437 }
9438 
9439 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9440 				u32 rx_en, u32 tx_en)
9441 {
9442 	struct hclge_vport *vport = hclge_get_vport(handle);
9443 	struct hclge_dev *hdev = vport->back;
9444 	struct phy_device *phydev = hdev->hw.mac.phydev;
9445 	u32 fc_autoneg;
9446 
9447 	if (phydev) {
9448 		fc_autoneg = hclge_get_autoneg(handle);
9449 		if (auto_neg != fc_autoneg) {
9450 			dev_info(&hdev->pdev->dev,
9451 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9452 			return -EOPNOTSUPP;
9453 		}
9454 	}
9455 
9456 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9457 		dev_info(&hdev->pdev->dev,
9458 			 "Priority flow control enabled. Cannot set link flow control.\n");
9459 		return -EOPNOTSUPP;
9460 	}
9461 
9462 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9463 
9464 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9465 
9466 	if (!auto_neg)
9467 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9468 
9469 	if (phydev)
9470 		return phy_start_aneg(phydev);
9471 
9472 	return -EOPNOTSUPP;
9473 }
9474 
9475 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9476 					  u8 *auto_neg, u32 *speed, u8 *duplex)
9477 {
9478 	struct hclge_vport *vport = hclge_get_vport(handle);
9479 	struct hclge_dev *hdev = vport->back;
9480 
9481 	if (speed)
9482 		*speed = hdev->hw.mac.speed;
9483 	if (duplex)
9484 		*duplex = hdev->hw.mac.duplex;
9485 	if (auto_neg)
9486 		*auto_neg = hdev->hw.mac.autoneg;
9487 }
9488 
9489 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9490 				 u8 *module_type)
9491 {
9492 	struct hclge_vport *vport = hclge_get_vport(handle);
9493 	struct hclge_dev *hdev = vport->back;
9494 
9495 	/* When nic is down, the service task is not running, doesn't update
9496 	 * the port information per second. Query the port information before
9497 	 * return the media type, ensure getting the correct media information.
9498 	 */
9499 	hclge_update_port_info(hdev);
9500 
9501 	if (media_type)
9502 		*media_type = hdev->hw.mac.media_type;
9503 
9504 	if (module_type)
9505 		*module_type = hdev->hw.mac.module_type;
9506 }
9507 
9508 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
9509 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
9510 {
9511 	struct hclge_vport *vport = hclge_get_vport(handle);
9512 	struct hclge_dev *hdev = vport->back;
9513 	struct phy_device *phydev = hdev->hw.mac.phydev;
9514 	int mdix_ctrl, mdix, is_resolved;
9515 	unsigned int retval;
9516 
9517 	if (!phydev) {
9518 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9519 		*tp_mdix = ETH_TP_MDI_INVALID;
9520 		return;
9521 	}
9522 
9523 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9524 
9525 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9526 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9527 				    HCLGE_PHY_MDIX_CTRL_S);
9528 
9529 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9530 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9531 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9532 
9533 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9534 
9535 	switch (mdix_ctrl) {
9536 	case 0x0:
9537 		*tp_mdix_ctrl = ETH_TP_MDI;
9538 		break;
9539 	case 0x1:
9540 		*tp_mdix_ctrl = ETH_TP_MDI_X;
9541 		break;
9542 	case 0x3:
9543 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9544 		break;
9545 	default:
9546 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9547 		break;
9548 	}
9549 
9550 	if (!is_resolved)
9551 		*tp_mdix = ETH_TP_MDI_INVALID;
9552 	else if (mdix)
9553 		*tp_mdix = ETH_TP_MDI_X;
9554 	else
9555 		*tp_mdix = ETH_TP_MDI;
9556 }
9557 
9558 static void hclge_info_show(struct hclge_dev *hdev)
9559 {
9560 	struct device *dev = &hdev->pdev->dev;
9561 
9562 	dev_info(dev, "PF info begin:\n");
9563 
9564 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9565 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9566 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9567 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9568 	dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9569 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9570 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9571 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9572 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9573 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9574 	dev_info(dev, "This is %s PF\n",
9575 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9576 	dev_info(dev, "DCB %s\n",
9577 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9578 	dev_info(dev, "MQPRIO %s\n",
9579 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9580 
9581 	dev_info(dev, "PF info end.\n");
9582 }
9583 
9584 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9585 					  struct hclge_vport *vport)
9586 {
9587 	struct hnae3_client *client = vport->nic.client;
9588 	struct hclge_dev *hdev = ae_dev->priv;
9589 	int rst_cnt = hdev->rst_stats.reset_cnt;
9590 	int ret;
9591 
9592 	ret = client->ops->init_instance(&vport->nic);
9593 	if (ret)
9594 		return ret;
9595 
9596 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9597 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9598 	    rst_cnt != hdev->rst_stats.reset_cnt) {
9599 		ret = -EBUSY;
9600 		goto init_nic_err;
9601 	}
9602 
9603 	/* Enable nic hw error interrupts */
9604 	ret = hclge_config_nic_hw_error(hdev, true);
9605 	if (ret) {
9606 		dev_err(&ae_dev->pdev->dev,
9607 			"fail(%d) to enable hw error interrupts\n", ret);
9608 		goto init_nic_err;
9609 	}
9610 
9611 	hnae3_set_client_init_flag(client, ae_dev, 1);
9612 
9613 	if (netif_msg_drv(&hdev->vport->nic))
9614 		hclge_info_show(hdev);
9615 
9616 	return ret;
9617 
9618 init_nic_err:
9619 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9620 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9621 		msleep(HCLGE_WAIT_RESET_DONE);
9622 
9623 	client->ops->uninit_instance(&vport->nic, 0);
9624 
9625 	return ret;
9626 }
9627 
9628 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9629 					   struct hclge_vport *vport)
9630 {
9631 	struct hclge_dev *hdev = ae_dev->priv;
9632 	struct hnae3_client *client;
9633 	int rst_cnt;
9634 	int ret;
9635 
9636 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9637 	    !hdev->nic_client)
9638 		return 0;
9639 
9640 	client = hdev->roce_client;
9641 	ret = hclge_init_roce_base_info(vport);
9642 	if (ret)
9643 		return ret;
9644 
9645 	rst_cnt = hdev->rst_stats.reset_cnt;
9646 	ret = client->ops->init_instance(&vport->roce);
9647 	if (ret)
9648 		return ret;
9649 
9650 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9651 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9652 	    rst_cnt != hdev->rst_stats.reset_cnt) {
9653 		ret = -EBUSY;
9654 		goto init_roce_err;
9655 	}
9656 
9657 	/* Enable roce ras interrupts */
9658 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
9659 	if (ret) {
9660 		dev_err(&ae_dev->pdev->dev,
9661 			"fail(%d) to enable roce ras interrupts\n", ret);
9662 		goto init_roce_err;
9663 	}
9664 
9665 	hnae3_set_client_init_flag(client, ae_dev, 1);
9666 
9667 	return 0;
9668 
9669 init_roce_err:
9670 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9671 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9672 		msleep(HCLGE_WAIT_RESET_DONE);
9673 
9674 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9675 
9676 	return ret;
9677 }
9678 
9679 static int hclge_init_client_instance(struct hnae3_client *client,
9680 				      struct hnae3_ae_dev *ae_dev)
9681 {
9682 	struct hclge_dev *hdev = ae_dev->priv;
9683 	struct hclge_vport *vport;
9684 	int i, ret;
9685 
9686 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9687 		vport = &hdev->vport[i];
9688 
9689 		switch (client->type) {
9690 		case HNAE3_CLIENT_KNIC:
9691 			hdev->nic_client = client;
9692 			vport->nic.client = client;
9693 			ret = hclge_init_nic_client_instance(ae_dev, vport);
9694 			if (ret)
9695 				goto clear_nic;
9696 
9697 			ret = hclge_init_roce_client_instance(ae_dev, vport);
9698 			if (ret)
9699 				goto clear_roce;
9700 
9701 			break;
9702 		case HNAE3_CLIENT_ROCE:
9703 			if (hnae3_dev_roce_supported(hdev)) {
9704 				hdev->roce_client = client;
9705 				vport->roce.client = client;
9706 			}
9707 
9708 			ret = hclge_init_roce_client_instance(ae_dev, vport);
9709 			if (ret)
9710 				goto clear_roce;
9711 
9712 			break;
9713 		default:
9714 			return -EINVAL;
9715 		}
9716 	}
9717 
9718 	return 0;
9719 
9720 clear_nic:
9721 	hdev->nic_client = NULL;
9722 	vport->nic.client = NULL;
9723 	return ret;
9724 clear_roce:
9725 	hdev->roce_client = NULL;
9726 	vport->roce.client = NULL;
9727 	return ret;
9728 }
9729 
9730 static void hclge_uninit_client_instance(struct hnae3_client *client,
9731 					 struct hnae3_ae_dev *ae_dev)
9732 {
9733 	struct hclge_dev *hdev = ae_dev->priv;
9734 	struct hclge_vport *vport;
9735 	int i;
9736 
9737 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9738 		vport = &hdev->vport[i];
9739 		if (hdev->roce_client) {
9740 			clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9741 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9742 				msleep(HCLGE_WAIT_RESET_DONE);
9743 
9744 			hdev->roce_client->ops->uninit_instance(&vport->roce,
9745 								0);
9746 			hdev->roce_client = NULL;
9747 			vport->roce.client = NULL;
9748 		}
9749 		if (client->type == HNAE3_CLIENT_ROCE)
9750 			return;
9751 		if (hdev->nic_client && client->ops->uninit_instance) {
9752 			clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9753 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9754 				msleep(HCLGE_WAIT_RESET_DONE);
9755 
9756 			client->ops->uninit_instance(&vport->nic, 0);
9757 			hdev->nic_client = NULL;
9758 			vport->nic.client = NULL;
9759 		}
9760 	}
9761 }
9762 
9763 static int hclge_pci_init(struct hclge_dev *hdev)
9764 {
9765 	struct pci_dev *pdev = hdev->pdev;
9766 	struct hclge_hw *hw;
9767 	int ret;
9768 
9769 	ret = pci_enable_device(pdev);
9770 	if (ret) {
9771 		dev_err(&pdev->dev, "failed to enable PCI device\n");
9772 		return ret;
9773 	}
9774 
9775 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9776 	if (ret) {
9777 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9778 		if (ret) {
9779 			dev_err(&pdev->dev,
9780 				"can't set consistent PCI DMA");
9781 			goto err_disable_device;
9782 		}
9783 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9784 	}
9785 
9786 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9787 	if (ret) {
9788 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9789 		goto err_disable_device;
9790 	}
9791 
9792 	pci_set_master(pdev);
9793 	hw = &hdev->hw;
9794 	hw->io_base = pcim_iomap(pdev, 2, 0);
9795 	if (!hw->io_base) {
9796 		dev_err(&pdev->dev, "Can't map configuration register space\n");
9797 		ret = -ENOMEM;
9798 		goto err_clr_master;
9799 	}
9800 
9801 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9802 
9803 	return 0;
9804 err_clr_master:
9805 	pci_clear_master(pdev);
9806 	pci_release_regions(pdev);
9807 err_disable_device:
9808 	pci_disable_device(pdev);
9809 
9810 	return ret;
9811 }
9812 
9813 static void hclge_pci_uninit(struct hclge_dev *hdev)
9814 {
9815 	struct pci_dev *pdev = hdev->pdev;
9816 
9817 	pcim_iounmap(pdev, hdev->hw.io_base);
9818 	pci_free_irq_vectors(pdev);
9819 	pci_clear_master(pdev);
9820 	pci_release_mem_regions(pdev);
9821 	pci_disable_device(pdev);
9822 }
9823 
9824 static void hclge_state_init(struct hclge_dev *hdev)
9825 {
9826 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9827 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9828 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9829 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9830 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9831 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9832 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9833 }
9834 
9835 static void hclge_state_uninit(struct hclge_dev *hdev)
9836 {
9837 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9838 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9839 
9840 	if (hdev->reset_timer.function)
9841 		del_timer_sync(&hdev->reset_timer);
9842 	if (hdev->service_task.work.func)
9843 		cancel_delayed_work_sync(&hdev->service_task);
9844 }
9845 
9846 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9847 {
9848 #define HCLGE_FLR_RETRY_WAIT_MS	500
9849 #define HCLGE_FLR_RETRY_CNT	5
9850 
9851 	struct hclge_dev *hdev = ae_dev->priv;
9852 	int retry_cnt = 0;
9853 	int ret;
9854 
9855 retry:
9856 	down(&hdev->reset_sem);
9857 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9858 	hdev->reset_type = HNAE3_FLR_RESET;
9859 	ret = hclge_reset_prepare(hdev);
9860 	if (ret || hdev->reset_pending) {
9861 		dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9862 			ret);
9863 		if (hdev->reset_pending ||
9864 		    retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9865 			dev_err(&hdev->pdev->dev,
9866 				"reset_pending:0x%lx, retry_cnt:%d\n",
9867 				hdev->reset_pending, retry_cnt);
9868 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9869 			up(&hdev->reset_sem);
9870 			msleep(HCLGE_FLR_RETRY_WAIT_MS);
9871 			goto retry;
9872 		}
9873 	}
9874 
9875 	/* disable misc vector before FLR done */
9876 	hclge_enable_vector(&hdev->misc_vector, false);
9877 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9878 	hdev->rst_stats.flr_rst_cnt++;
9879 }
9880 
9881 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9882 {
9883 	struct hclge_dev *hdev = ae_dev->priv;
9884 	int ret;
9885 
9886 	hclge_enable_vector(&hdev->misc_vector, true);
9887 
9888 	ret = hclge_reset_rebuild(hdev);
9889 	if (ret)
9890 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9891 
9892 	hdev->reset_type = HNAE3_NONE_RESET;
9893 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9894 	up(&hdev->reset_sem);
9895 }
9896 
9897 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9898 {
9899 	u16 i;
9900 
9901 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9902 		struct hclge_vport *vport = &hdev->vport[i];
9903 		int ret;
9904 
9905 		 /* Send cmd to clear VF's FUNC_RST_ING */
9906 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9907 		if (ret)
9908 			dev_warn(&hdev->pdev->dev,
9909 				 "clear vf(%u) rst failed %d!\n",
9910 				 vport->vport_id, ret);
9911 	}
9912 }
9913 
9914 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9915 {
9916 	struct pci_dev *pdev = ae_dev->pdev;
9917 	struct hclge_dev *hdev;
9918 	int ret;
9919 
9920 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9921 	if (!hdev)
9922 		return -ENOMEM;
9923 
9924 	hdev->pdev = pdev;
9925 	hdev->ae_dev = ae_dev;
9926 	hdev->reset_type = HNAE3_NONE_RESET;
9927 	hdev->reset_level = HNAE3_FUNC_RESET;
9928 	ae_dev->priv = hdev;
9929 
9930 	/* HW supprt 2 layer vlan */
9931 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9932 
9933 	mutex_init(&hdev->vport_lock);
9934 	spin_lock_init(&hdev->fd_rule_lock);
9935 	sema_init(&hdev->reset_sem, 1);
9936 
9937 	ret = hclge_pci_init(hdev);
9938 	if (ret)
9939 		goto out;
9940 
9941 	/* Firmware command queue initialize */
9942 	ret = hclge_cmd_queue_init(hdev);
9943 	if (ret)
9944 		goto err_pci_uninit;
9945 
9946 	/* Firmware command initialize */
9947 	ret = hclge_cmd_init(hdev);
9948 	if (ret)
9949 		goto err_cmd_uninit;
9950 
9951 	ret = hclge_get_cap(hdev);
9952 	if (ret)
9953 		goto err_cmd_uninit;
9954 
9955 	ret = hclge_configure(hdev);
9956 	if (ret) {
9957 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9958 		goto err_cmd_uninit;
9959 	}
9960 
9961 	ret = hclge_init_msi(hdev);
9962 	if (ret) {
9963 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9964 		goto err_cmd_uninit;
9965 	}
9966 
9967 	ret = hclge_misc_irq_init(hdev);
9968 	if (ret)
9969 		goto err_msi_uninit;
9970 
9971 	ret = hclge_alloc_tqps(hdev);
9972 	if (ret) {
9973 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9974 		goto err_msi_irq_uninit;
9975 	}
9976 
9977 	ret = hclge_alloc_vport(hdev);
9978 	if (ret)
9979 		goto err_msi_irq_uninit;
9980 
9981 	ret = hclge_map_tqp(hdev);
9982 	if (ret)
9983 		goto err_msi_irq_uninit;
9984 
9985 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9986 		ret = hclge_mac_mdio_config(hdev);
9987 		if (ret)
9988 			goto err_msi_irq_uninit;
9989 	}
9990 
9991 	ret = hclge_init_umv_space(hdev);
9992 	if (ret)
9993 		goto err_mdiobus_unreg;
9994 
9995 	ret = hclge_mac_init(hdev);
9996 	if (ret) {
9997 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9998 		goto err_mdiobus_unreg;
9999 	}
10000 
10001 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10002 	if (ret) {
10003 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10004 		goto err_mdiobus_unreg;
10005 	}
10006 
10007 	ret = hclge_config_gro(hdev, true);
10008 	if (ret)
10009 		goto err_mdiobus_unreg;
10010 
10011 	ret = hclge_init_vlan_config(hdev);
10012 	if (ret) {
10013 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10014 		goto err_mdiobus_unreg;
10015 	}
10016 
10017 	ret = hclge_tm_schd_init(hdev);
10018 	if (ret) {
10019 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10020 		goto err_mdiobus_unreg;
10021 	}
10022 
10023 	hclge_rss_init_cfg(hdev);
10024 	ret = hclge_rss_init_hw(hdev);
10025 	if (ret) {
10026 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10027 		goto err_mdiobus_unreg;
10028 	}
10029 
10030 	ret = init_mgr_tbl(hdev);
10031 	if (ret) {
10032 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10033 		goto err_mdiobus_unreg;
10034 	}
10035 
10036 	ret = hclge_init_fd_config(hdev);
10037 	if (ret) {
10038 		dev_err(&pdev->dev,
10039 			"fd table init fail, ret=%d\n", ret);
10040 		goto err_mdiobus_unreg;
10041 	}
10042 
10043 	INIT_KFIFO(hdev->mac_tnl_log);
10044 
10045 	hclge_dcb_ops_set(hdev);
10046 
10047 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10048 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10049 
10050 	/* Setup affinity after service timer setup because add_timer_on
10051 	 * is called in affinity notify.
10052 	 */
10053 	hclge_misc_affinity_setup(hdev);
10054 
10055 	hclge_clear_all_event_cause(hdev);
10056 	hclge_clear_resetting_state(hdev);
10057 
10058 	/* Log and clear the hw errors those already occurred */
10059 	hclge_handle_all_hns_hw_errors(ae_dev);
10060 
10061 	/* request delayed reset for the error recovery because an immediate
10062 	 * global reset on a PF affecting pending initialization of other PFs
10063 	 */
10064 	if (ae_dev->hw_err_reset_req) {
10065 		enum hnae3_reset_type reset_level;
10066 
10067 		reset_level = hclge_get_reset_level(ae_dev,
10068 						    &ae_dev->hw_err_reset_req);
10069 		hclge_set_def_reset_request(ae_dev, reset_level);
10070 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10071 	}
10072 
10073 	/* Enable MISC vector(vector0) */
10074 	hclge_enable_vector(&hdev->misc_vector, true);
10075 
10076 	hclge_state_init(hdev);
10077 	hdev->last_reset_time = jiffies;
10078 
10079 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10080 		 HCLGE_DRIVER_NAME);
10081 
10082 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10083 
10084 	return 0;
10085 
10086 err_mdiobus_unreg:
10087 	if (hdev->hw.mac.phydev)
10088 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
10089 err_msi_irq_uninit:
10090 	hclge_misc_irq_uninit(hdev);
10091 err_msi_uninit:
10092 	pci_free_irq_vectors(pdev);
10093 err_cmd_uninit:
10094 	hclge_cmd_uninit(hdev);
10095 err_pci_uninit:
10096 	pcim_iounmap(pdev, hdev->hw.io_base);
10097 	pci_clear_master(pdev);
10098 	pci_release_regions(pdev);
10099 	pci_disable_device(pdev);
10100 out:
10101 	mutex_destroy(&hdev->vport_lock);
10102 	return ret;
10103 }
10104 
10105 static void hclge_stats_clear(struct hclge_dev *hdev)
10106 {
10107 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10108 }
10109 
10110 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10111 {
10112 	return hclge_config_switch_param(hdev, vf, enable,
10113 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10114 }
10115 
10116 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10117 {
10118 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10119 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
10120 					  enable, vf);
10121 }
10122 
10123 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10124 {
10125 	int ret;
10126 
10127 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10128 	if (ret) {
10129 		dev_err(&hdev->pdev->dev,
10130 			"Set vf %d mac spoof check %s failed, ret=%d\n",
10131 			vf, enable ? "on" : "off", ret);
10132 		return ret;
10133 	}
10134 
10135 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10136 	if (ret)
10137 		dev_err(&hdev->pdev->dev,
10138 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
10139 			vf, enable ? "on" : "off", ret);
10140 
10141 	return ret;
10142 }
10143 
10144 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10145 				 bool enable)
10146 {
10147 	struct hclge_vport *vport = hclge_get_vport(handle);
10148 	struct hclge_dev *hdev = vport->back;
10149 	u32 new_spoofchk = enable ? 1 : 0;
10150 	int ret;
10151 
10152 	if (hdev->pdev->revision == 0x20)
10153 		return -EOPNOTSUPP;
10154 
10155 	vport = hclge_get_vf_vport(hdev, vf);
10156 	if (!vport)
10157 		return -EINVAL;
10158 
10159 	if (vport->vf_info.spoofchk == new_spoofchk)
10160 		return 0;
10161 
10162 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10163 		dev_warn(&hdev->pdev->dev,
10164 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10165 			 vf);
10166 	else if (enable && hclge_is_umv_space_full(vport, true))
10167 		dev_warn(&hdev->pdev->dev,
10168 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10169 			 vf);
10170 
10171 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10172 	if (ret)
10173 		return ret;
10174 
10175 	vport->vf_info.spoofchk = new_spoofchk;
10176 	return 0;
10177 }
10178 
10179 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10180 {
10181 	struct hclge_vport *vport = hdev->vport;
10182 	int ret;
10183 	int i;
10184 
10185 	if (hdev->pdev->revision == 0x20)
10186 		return 0;
10187 
10188 	/* resume the vf spoof check state after reset */
10189 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10190 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10191 					       vport->vf_info.spoofchk);
10192 		if (ret)
10193 			return ret;
10194 
10195 		vport++;
10196 	}
10197 
10198 	return 0;
10199 }
10200 
10201 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10202 {
10203 	struct hclge_vport *vport = hclge_get_vport(handle);
10204 	struct hclge_dev *hdev = vport->back;
10205 	u32 new_trusted = enable ? 1 : 0;
10206 	bool en_bc_pmc;
10207 	int ret;
10208 
10209 	vport = hclge_get_vf_vport(hdev, vf);
10210 	if (!vport)
10211 		return -EINVAL;
10212 
10213 	if (vport->vf_info.trusted == new_trusted)
10214 		return 0;
10215 
10216 	/* Disable promisc mode for VF if it is not trusted any more. */
10217 	if (!enable && vport->vf_info.promisc_enable) {
10218 		en_bc_pmc = hdev->pdev->revision != 0x20;
10219 		ret = hclge_set_vport_promisc_mode(vport, false, false,
10220 						   en_bc_pmc);
10221 		if (ret)
10222 			return ret;
10223 		vport->vf_info.promisc_enable = 0;
10224 		hclge_inform_vf_promisc_info(vport);
10225 	}
10226 
10227 	vport->vf_info.trusted = new_trusted;
10228 
10229 	return 0;
10230 }
10231 
10232 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10233 {
10234 	int ret;
10235 	int vf;
10236 
10237 	/* reset vf rate to default value */
10238 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10239 		struct hclge_vport *vport = &hdev->vport[vf];
10240 
10241 		vport->vf_info.max_tx_rate = 0;
10242 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10243 		if (ret)
10244 			dev_err(&hdev->pdev->dev,
10245 				"vf%d failed to reset to default, ret=%d\n",
10246 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10247 	}
10248 }
10249 
10250 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10251 				     int min_tx_rate, int max_tx_rate)
10252 {
10253 	if (min_tx_rate != 0 ||
10254 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10255 		dev_err(&hdev->pdev->dev,
10256 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10257 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10258 		return -EINVAL;
10259 	}
10260 
10261 	return 0;
10262 }
10263 
10264 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10265 			     int min_tx_rate, int max_tx_rate, bool force)
10266 {
10267 	struct hclge_vport *vport = hclge_get_vport(handle);
10268 	struct hclge_dev *hdev = vport->back;
10269 	int ret;
10270 
10271 	ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10272 	if (ret)
10273 		return ret;
10274 
10275 	vport = hclge_get_vf_vport(hdev, vf);
10276 	if (!vport)
10277 		return -EINVAL;
10278 
10279 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10280 		return 0;
10281 
10282 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10283 	if (ret)
10284 		return ret;
10285 
10286 	vport->vf_info.max_tx_rate = max_tx_rate;
10287 
10288 	return 0;
10289 }
10290 
10291 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10292 {
10293 	struct hnae3_handle *handle = &hdev->vport->nic;
10294 	struct hclge_vport *vport;
10295 	int ret;
10296 	int vf;
10297 
10298 	/* resume the vf max_tx_rate after reset */
10299 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10300 		vport = hclge_get_vf_vport(hdev, vf);
10301 		if (!vport)
10302 			return -EINVAL;
10303 
10304 		/* zero means max rate, after reset, firmware already set it to
10305 		 * max rate, so just continue.
10306 		 */
10307 		if (!vport->vf_info.max_tx_rate)
10308 			continue;
10309 
10310 		ret = hclge_set_vf_rate(handle, vf, 0,
10311 					vport->vf_info.max_tx_rate, true);
10312 		if (ret) {
10313 			dev_err(&hdev->pdev->dev,
10314 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
10315 				vf, vport->vf_info.max_tx_rate, ret);
10316 			return ret;
10317 		}
10318 	}
10319 
10320 	return 0;
10321 }
10322 
10323 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10324 {
10325 	struct hclge_vport *vport = hdev->vport;
10326 	int i;
10327 
10328 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10329 		hclge_vport_stop(vport);
10330 		vport++;
10331 	}
10332 }
10333 
10334 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10335 {
10336 	struct hclge_dev *hdev = ae_dev->priv;
10337 	struct pci_dev *pdev = ae_dev->pdev;
10338 	int ret;
10339 
10340 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
10341 
10342 	hclge_stats_clear(hdev);
10343 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10344 	 * so here should not clean table in memory.
10345 	 */
10346 	if (hdev->reset_type == HNAE3_IMP_RESET ||
10347 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
10348 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10349 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10350 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10351 		hclge_reset_umv_space(hdev);
10352 	}
10353 
10354 	ret = hclge_cmd_init(hdev);
10355 	if (ret) {
10356 		dev_err(&pdev->dev, "Cmd queue init failed\n");
10357 		return ret;
10358 	}
10359 
10360 	ret = hclge_map_tqp(hdev);
10361 	if (ret) {
10362 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10363 		return ret;
10364 	}
10365 
10366 	ret = hclge_mac_init(hdev);
10367 	if (ret) {
10368 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10369 		return ret;
10370 	}
10371 
10372 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10373 	if (ret) {
10374 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10375 		return ret;
10376 	}
10377 
10378 	ret = hclge_config_gro(hdev, true);
10379 	if (ret)
10380 		return ret;
10381 
10382 	ret = hclge_init_vlan_config(hdev);
10383 	if (ret) {
10384 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10385 		return ret;
10386 	}
10387 
10388 	ret = hclge_tm_init_hw(hdev, true);
10389 	if (ret) {
10390 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
10391 		return ret;
10392 	}
10393 
10394 	ret = hclge_rss_init_hw(hdev);
10395 	if (ret) {
10396 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10397 		return ret;
10398 	}
10399 
10400 	ret = init_mgr_tbl(hdev);
10401 	if (ret) {
10402 		dev_err(&pdev->dev,
10403 			"failed to reinit manager table, ret = %d\n", ret);
10404 		return ret;
10405 	}
10406 
10407 	ret = hclge_init_fd_config(hdev);
10408 	if (ret) {
10409 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10410 		return ret;
10411 	}
10412 
10413 	/* Log and clear the hw errors those already occurred */
10414 	hclge_handle_all_hns_hw_errors(ae_dev);
10415 
10416 	/* Re-enable the hw error interrupts because
10417 	 * the interrupts get disabled on global reset.
10418 	 */
10419 	ret = hclge_config_nic_hw_error(hdev, true);
10420 	if (ret) {
10421 		dev_err(&pdev->dev,
10422 			"fail(%d) to re-enable NIC hw error interrupts\n",
10423 			ret);
10424 		return ret;
10425 	}
10426 
10427 	if (hdev->roce_client) {
10428 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
10429 		if (ret) {
10430 			dev_err(&pdev->dev,
10431 				"fail(%d) to re-enable roce ras interrupts\n",
10432 				ret);
10433 			return ret;
10434 		}
10435 	}
10436 
10437 	hclge_reset_vport_state(hdev);
10438 	ret = hclge_reset_vport_spoofchk(hdev);
10439 	if (ret)
10440 		return ret;
10441 
10442 	ret = hclge_resume_vf_rate(hdev);
10443 	if (ret)
10444 		return ret;
10445 
10446 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
10447 		 HCLGE_DRIVER_NAME);
10448 
10449 	return 0;
10450 }
10451 
10452 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
10453 {
10454 	struct hclge_dev *hdev = ae_dev->priv;
10455 	struct hclge_mac *mac = &hdev->hw.mac;
10456 
10457 	hclge_reset_vf_rate(hdev);
10458 	hclge_clear_vf_vlan(hdev);
10459 	hclge_misc_affinity_teardown(hdev);
10460 	hclge_state_uninit(hdev);
10461 	hclge_uninit_mac_table(hdev);
10462 
10463 	if (mac->phydev)
10464 		mdiobus_unregister(mac->mdio_bus);
10465 
10466 	/* Disable MISC vector(vector0) */
10467 	hclge_enable_vector(&hdev->misc_vector, false);
10468 	synchronize_irq(hdev->misc_vector.vector_irq);
10469 
10470 	/* Disable all hw interrupts */
10471 	hclge_config_mac_tnl_int(hdev, false);
10472 	hclge_config_nic_hw_error(hdev, false);
10473 	hclge_config_rocee_ras_interrupt(hdev, false);
10474 
10475 	hclge_cmd_uninit(hdev);
10476 	hclge_misc_irq_uninit(hdev);
10477 	hclge_pci_uninit(hdev);
10478 	mutex_destroy(&hdev->vport_lock);
10479 	hclge_uninit_vport_vlan_table(hdev);
10480 	ae_dev->priv = NULL;
10481 }
10482 
10483 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
10484 {
10485 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10486 	struct hclge_vport *vport = hclge_get_vport(handle);
10487 	struct hclge_dev *hdev = vport->back;
10488 
10489 	return min_t(u32, hdev->rss_size_max,
10490 		     vport->alloc_tqps / kinfo->num_tc);
10491 }
10492 
10493 static void hclge_get_channels(struct hnae3_handle *handle,
10494 			       struct ethtool_channels *ch)
10495 {
10496 	ch->max_combined = hclge_get_max_channels(handle);
10497 	ch->other_count = 1;
10498 	ch->max_other = 1;
10499 	ch->combined_count = handle->kinfo.rss_size;
10500 }
10501 
10502 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
10503 					u16 *alloc_tqps, u16 *max_rss_size)
10504 {
10505 	struct hclge_vport *vport = hclge_get_vport(handle);
10506 	struct hclge_dev *hdev = vport->back;
10507 
10508 	*alloc_tqps = vport->alloc_tqps;
10509 	*max_rss_size = hdev->rss_size_max;
10510 }
10511 
10512 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10513 			      bool rxfh_configured)
10514 {
10515 	struct hclge_vport *vport = hclge_get_vport(handle);
10516 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
10517 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
10518 	struct hclge_dev *hdev = vport->back;
10519 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10520 	u16 cur_rss_size = kinfo->rss_size;
10521 	u16 cur_tqps = kinfo->num_tqps;
10522 	u16 tc_valid[HCLGE_MAX_TC_NUM];
10523 	u16 roundup_size;
10524 	u32 *rss_indir;
10525 	unsigned int i;
10526 	int ret;
10527 
10528 	kinfo->req_rss_size = new_tqps_num;
10529 
10530 	ret = hclge_tm_vport_map_update(hdev);
10531 	if (ret) {
10532 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10533 		return ret;
10534 	}
10535 
10536 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
10537 	roundup_size = ilog2(roundup_size);
10538 	/* Set the RSS TC mode according to the new RSS size */
10539 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10540 		tc_valid[i] = 0;
10541 
10542 		if (!(hdev->hw_tc_map & BIT(i)))
10543 			continue;
10544 
10545 		tc_valid[i] = 1;
10546 		tc_size[i] = roundup_size;
10547 		tc_offset[i] = kinfo->rss_size * i;
10548 	}
10549 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10550 	if (ret)
10551 		return ret;
10552 
10553 	/* RSS indirection table has been configuared by user */
10554 	if (rxfh_configured)
10555 		goto out;
10556 
10557 	/* Reinitializes the rss indirect table according to the new RSS size */
10558 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10559 	if (!rss_indir)
10560 		return -ENOMEM;
10561 
10562 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10563 		rss_indir[i] = i % kinfo->rss_size;
10564 
10565 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10566 	if (ret)
10567 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10568 			ret);
10569 
10570 	kfree(rss_indir);
10571 
10572 out:
10573 	if (!ret)
10574 		dev_info(&hdev->pdev->dev,
10575 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10576 			 cur_rss_size, kinfo->rss_size,
10577 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10578 
10579 	return ret;
10580 }
10581 
10582 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10583 			      u32 *regs_num_64_bit)
10584 {
10585 	struct hclge_desc desc;
10586 	u32 total_num;
10587 	int ret;
10588 
10589 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10590 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10591 	if (ret) {
10592 		dev_err(&hdev->pdev->dev,
10593 			"Query register number cmd failed, ret = %d.\n", ret);
10594 		return ret;
10595 	}
10596 
10597 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
10598 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
10599 
10600 	total_num = *regs_num_32_bit + *regs_num_64_bit;
10601 	if (!total_num)
10602 		return -EINVAL;
10603 
10604 	return 0;
10605 }
10606 
10607 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10608 				 void *data)
10609 {
10610 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10611 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10612 
10613 	struct hclge_desc *desc;
10614 	u32 *reg_val = data;
10615 	__le32 *desc_data;
10616 	int nodata_num;
10617 	int cmd_num;
10618 	int i, k, n;
10619 	int ret;
10620 
10621 	if (regs_num == 0)
10622 		return 0;
10623 
10624 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10625 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10626 			       HCLGE_32_BIT_REG_RTN_DATANUM);
10627 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10628 	if (!desc)
10629 		return -ENOMEM;
10630 
10631 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10632 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10633 	if (ret) {
10634 		dev_err(&hdev->pdev->dev,
10635 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
10636 		kfree(desc);
10637 		return ret;
10638 	}
10639 
10640 	for (i = 0; i < cmd_num; i++) {
10641 		if (i == 0) {
10642 			desc_data = (__le32 *)(&desc[i].data[0]);
10643 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10644 		} else {
10645 			desc_data = (__le32 *)(&desc[i]);
10646 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
10647 		}
10648 		for (k = 0; k < n; k++) {
10649 			*reg_val++ = le32_to_cpu(*desc_data++);
10650 
10651 			regs_num--;
10652 			if (!regs_num)
10653 				break;
10654 		}
10655 	}
10656 
10657 	kfree(desc);
10658 	return 0;
10659 }
10660 
10661 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10662 				 void *data)
10663 {
10664 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10665 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10666 
10667 	struct hclge_desc *desc;
10668 	u64 *reg_val = data;
10669 	__le64 *desc_data;
10670 	int nodata_len;
10671 	int cmd_num;
10672 	int i, k, n;
10673 	int ret;
10674 
10675 	if (regs_num == 0)
10676 		return 0;
10677 
10678 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10679 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10680 			       HCLGE_64_BIT_REG_RTN_DATANUM);
10681 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10682 	if (!desc)
10683 		return -ENOMEM;
10684 
10685 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10686 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10687 	if (ret) {
10688 		dev_err(&hdev->pdev->dev,
10689 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
10690 		kfree(desc);
10691 		return ret;
10692 	}
10693 
10694 	for (i = 0; i < cmd_num; i++) {
10695 		if (i == 0) {
10696 			desc_data = (__le64 *)(&desc[i].data[0]);
10697 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10698 		} else {
10699 			desc_data = (__le64 *)(&desc[i]);
10700 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
10701 		}
10702 		for (k = 0; k < n; k++) {
10703 			*reg_val++ = le64_to_cpu(*desc_data++);
10704 
10705 			regs_num--;
10706 			if (!regs_num)
10707 				break;
10708 		}
10709 	}
10710 
10711 	kfree(desc);
10712 	return 0;
10713 }
10714 
10715 #define MAX_SEPARATE_NUM	4
10716 #define SEPARATOR_VALUE		0xFDFCFBFA
10717 #define REG_NUM_PER_LINE	4
10718 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
10719 #define REG_SEPARATOR_LINE	1
10720 #define REG_NUM_REMAIN_MASK	3
10721 #define BD_LIST_MAX_NUM		30
10722 
10723 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10724 {
10725 	int i;
10726 
10727 	/* initialize command BD except the last one */
10728 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
10729 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
10730 					   true);
10731 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10732 	}
10733 
10734 	/* initialize the last command BD */
10735 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
10736 
10737 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
10738 }
10739 
10740 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10741 				    int *bd_num_list,
10742 				    u32 type_num)
10743 {
10744 	u32 entries_per_desc, desc_index, index, offset, i;
10745 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10746 	int ret;
10747 
10748 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
10749 	if (ret) {
10750 		dev_err(&hdev->pdev->dev,
10751 			"Get dfx bd num fail, status is %d.\n", ret);
10752 		return ret;
10753 	}
10754 
10755 	entries_per_desc = ARRAY_SIZE(desc[0].data);
10756 	for (i = 0; i < type_num; i++) {
10757 		offset = hclge_dfx_bd_offset_list[i];
10758 		index = offset % entries_per_desc;
10759 		desc_index = offset / entries_per_desc;
10760 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10761 	}
10762 
10763 	return ret;
10764 }
10765 
10766 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10767 				  struct hclge_desc *desc_src, int bd_num,
10768 				  enum hclge_opcode_type cmd)
10769 {
10770 	struct hclge_desc *desc = desc_src;
10771 	int i, ret;
10772 
10773 	hclge_cmd_setup_basic_desc(desc, cmd, true);
10774 	for (i = 0; i < bd_num - 1; i++) {
10775 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10776 		desc++;
10777 		hclge_cmd_setup_basic_desc(desc, cmd, true);
10778 	}
10779 
10780 	desc = desc_src;
10781 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10782 	if (ret)
10783 		dev_err(&hdev->pdev->dev,
10784 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10785 			cmd, ret);
10786 
10787 	return ret;
10788 }
10789 
10790 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10791 				    void *data)
10792 {
10793 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10794 	struct hclge_desc *desc = desc_src;
10795 	u32 *reg = data;
10796 
10797 	entries_per_desc = ARRAY_SIZE(desc->data);
10798 	reg_num = entries_per_desc * bd_num;
10799 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10800 	for (i = 0; i < reg_num; i++) {
10801 		index = i % entries_per_desc;
10802 		desc_index = i / entries_per_desc;
10803 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
10804 	}
10805 	for (i = 0; i < separator_num; i++)
10806 		*reg++ = SEPARATOR_VALUE;
10807 
10808 	return reg_num + separator_num;
10809 }
10810 
10811 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10812 {
10813 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10814 	int data_len_per_desc, bd_num, i;
10815 	int bd_num_list[BD_LIST_MAX_NUM];
10816 	u32 data_len;
10817 	int ret;
10818 
10819 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10820 	if (ret) {
10821 		dev_err(&hdev->pdev->dev,
10822 			"Get dfx reg bd num fail, status is %d.\n", ret);
10823 		return ret;
10824 	}
10825 
10826 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
10827 	*len = 0;
10828 	for (i = 0; i < dfx_reg_type_num; i++) {
10829 		bd_num = bd_num_list[i];
10830 		data_len = data_len_per_desc * bd_num;
10831 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10832 	}
10833 
10834 	return ret;
10835 }
10836 
10837 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10838 {
10839 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10840 	int bd_num, bd_num_max, buf_len, i;
10841 	int bd_num_list[BD_LIST_MAX_NUM];
10842 	struct hclge_desc *desc_src;
10843 	u32 *reg = data;
10844 	int ret;
10845 
10846 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10847 	if (ret) {
10848 		dev_err(&hdev->pdev->dev,
10849 			"Get dfx reg bd num fail, status is %d.\n", ret);
10850 		return ret;
10851 	}
10852 
10853 	bd_num_max = bd_num_list[0];
10854 	for (i = 1; i < dfx_reg_type_num; i++)
10855 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10856 
10857 	buf_len = sizeof(*desc_src) * bd_num_max;
10858 	desc_src = kzalloc(buf_len, GFP_KERNEL);
10859 	if (!desc_src)
10860 		return -ENOMEM;
10861 
10862 	for (i = 0; i < dfx_reg_type_num; i++) {
10863 		bd_num = bd_num_list[i];
10864 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10865 					     hclge_dfx_reg_opcode_list[i]);
10866 		if (ret) {
10867 			dev_err(&hdev->pdev->dev,
10868 				"Get dfx reg fail, status is %d.\n", ret);
10869 			break;
10870 		}
10871 
10872 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10873 	}
10874 
10875 	kfree(desc_src);
10876 	return ret;
10877 }
10878 
10879 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10880 			      struct hnae3_knic_private_info *kinfo)
10881 {
10882 #define HCLGE_RING_REG_OFFSET		0x200
10883 #define HCLGE_RING_INT_REG_OFFSET	0x4
10884 
10885 	int i, j, reg_num, separator_num;
10886 	int data_num_sum;
10887 	u32 *reg = data;
10888 
10889 	/* fetching per-PF registers valus from PF PCIe register space */
10890 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10891 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10892 	for (i = 0; i < reg_num; i++)
10893 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10894 	for (i = 0; i < separator_num; i++)
10895 		*reg++ = SEPARATOR_VALUE;
10896 	data_num_sum = reg_num + separator_num;
10897 
10898 	reg_num = ARRAY_SIZE(common_reg_addr_list);
10899 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10900 	for (i = 0; i < reg_num; i++)
10901 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10902 	for (i = 0; i < separator_num; i++)
10903 		*reg++ = SEPARATOR_VALUE;
10904 	data_num_sum += reg_num + separator_num;
10905 
10906 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
10907 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10908 	for (j = 0; j < kinfo->num_tqps; j++) {
10909 		for (i = 0; i < reg_num; i++)
10910 			*reg++ = hclge_read_dev(&hdev->hw,
10911 						ring_reg_addr_list[i] +
10912 						HCLGE_RING_REG_OFFSET * j);
10913 		for (i = 0; i < separator_num; i++)
10914 			*reg++ = SEPARATOR_VALUE;
10915 	}
10916 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10917 
10918 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10919 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10920 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
10921 		for (i = 0; i < reg_num; i++)
10922 			*reg++ = hclge_read_dev(&hdev->hw,
10923 						tqp_intr_reg_addr_list[i] +
10924 						HCLGE_RING_INT_REG_OFFSET * j);
10925 		for (i = 0; i < separator_num; i++)
10926 			*reg++ = SEPARATOR_VALUE;
10927 	}
10928 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10929 
10930 	return data_num_sum;
10931 }
10932 
10933 static int hclge_get_regs_len(struct hnae3_handle *handle)
10934 {
10935 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10936 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10937 	struct hclge_vport *vport = hclge_get_vport(handle);
10938 	struct hclge_dev *hdev = vport->back;
10939 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10940 	int regs_lines_32_bit, regs_lines_64_bit;
10941 	int ret;
10942 
10943 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10944 	if (ret) {
10945 		dev_err(&hdev->pdev->dev,
10946 			"Get register number failed, ret = %d.\n", ret);
10947 		return ret;
10948 	}
10949 
10950 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10951 	if (ret) {
10952 		dev_err(&hdev->pdev->dev,
10953 			"Get dfx reg len failed, ret = %d.\n", ret);
10954 		return ret;
10955 	}
10956 
10957 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10958 		REG_SEPARATOR_LINE;
10959 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10960 		REG_SEPARATOR_LINE;
10961 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10962 		REG_SEPARATOR_LINE;
10963 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10964 		REG_SEPARATOR_LINE;
10965 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10966 		REG_SEPARATOR_LINE;
10967 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10968 		REG_SEPARATOR_LINE;
10969 
10970 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10971 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10972 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10973 }
10974 
10975 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10976 			   void *data)
10977 {
10978 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10979 	struct hclge_vport *vport = hclge_get_vport(handle);
10980 	struct hclge_dev *hdev = vport->back;
10981 	u32 regs_num_32_bit, regs_num_64_bit;
10982 	int i, reg_num, separator_num, ret;
10983 	u32 *reg = data;
10984 
10985 	*version = hdev->fw_version;
10986 
10987 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10988 	if (ret) {
10989 		dev_err(&hdev->pdev->dev,
10990 			"Get register number failed, ret = %d.\n", ret);
10991 		return;
10992 	}
10993 
10994 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10995 
10996 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10997 	if (ret) {
10998 		dev_err(&hdev->pdev->dev,
10999 			"Get 32 bit register failed, ret = %d.\n", ret);
11000 		return;
11001 	}
11002 	reg_num = regs_num_32_bit;
11003 	reg += reg_num;
11004 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11005 	for (i = 0; i < separator_num; i++)
11006 		*reg++ = SEPARATOR_VALUE;
11007 
11008 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11009 	if (ret) {
11010 		dev_err(&hdev->pdev->dev,
11011 			"Get 64 bit register failed, ret = %d.\n", ret);
11012 		return;
11013 	}
11014 	reg_num = regs_num_64_bit * 2;
11015 	reg += reg_num;
11016 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11017 	for (i = 0; i < separator_num; i++)
11018 		*reg++ = SEPARATOR_VALUE;
11019 
11020 	ret = hclge_get_dfx_reg(hdev, reg);
11021 	if (ret)
11022 		dev_err(&hdev->pdev->dev,
11023 			"Get dfx register failed, ret = %d.\n", ret);
11024 }
11025 
11026 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11027 {
11028 	struct hclge_set_led_state_cmd *req;
11029 	struct hclge_desc desc;
11030 	int ret;
11031 
11032 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11033 
11034 	req = (struct hclge_set_led_state_cmd *)desc.data;
11035 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11036 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11037 
11038 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11039 	if (ret)
11040 		dev_err(&hdev->pdev->dev,
11041 			"Send set led state cmd error, ret =%d\n", ret);
11042 
11043 	return ret;
11044 }
11045 
11046 enum hclge_led_status {
11047 	HCLGE_LED_OFF,
11048 	HCLGE_LED_ON,
11049 	HCLGE_LED_NO_CHANGE = 0xFF,
11050 };
11051 
11052 static int hclge_set_led_id(struct hnae3_handle *handle,
11053 			    enum ethtool_phys_id_state status)
11054 {
11055 	struct hclge_vport *vport = hclge_get_vport(handle);
11056 	struct hclge_dev *hdev = vport->back;
11057 
11058 	switch (status) {
11059 	case ETHTOOL_ID_ACTIVE:
11060 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
11061 	case ETHTOOL_ID_INACTIVE:
11062 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11063 	default:
11064 		return -EINVAL;
11065 	}
11066 }
11067 
11068 static void hclge_get_link_mode(struct hnae3_handle *handle,
11069 				unsigned long *supported,
11070 				unsigned long *advertising)
11071 {
11072 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11073 	struct hclge_vport *vport = hclge_get_vport(handle);
11074 	struct hclge_dev *hdev = vport->back;
11075 	unsigned int idx = 0;
11076 
11077 	for (; idx < size; idx++) {
11078 		supported[idx] = hdev->hw.mac.supported[idx];
11079 		advertising[idx] = hdev->hw.mac.advertising[idx];
11080 	}
11081 }
11082 
11083 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11084 {
11085 	struct hclge_vport *vport = hclge_get_vport(handle);
11086 	struct hclge_dev *hdev = vport->back;
11087 
11088 	return hclge_config_gro(hdev, enable);
11089 }
11090 
11091 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11092 {
11093 	struct hclge_vport *vport = &hdev->vport[0];
11094 	struct hnae3_handle *handle = &vport->nic;
11095 	u8 tmp_flags = 0;
11096 	int ret;
11097 
11098 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11099 		set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11100 		vport->last_promisc_flags = vport->overflow_promisc_flags;
11101 	}
11102 
11103 	if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11104 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11105 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11106 					     tmp_flags & HNAE3_MPE);
11107 		if (!ret) {
11108 			clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11109 			hclge_enable_vlan_filter(handle,
11110 						 tmp_flags & HNAE3_VLAN_FLTR);
11111 		}
11112 	}
11113 }
11114 
11115 static bool hclge_module_existed(struct hclge_dev *hdev)
11116 {
11117 	struct hclge_desc desc;
11118 	u32 existed;
11119 	int ret;
11120 
11121 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11122 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11123 	if (ret) {
11124 		dev_err(&hdev->pdev->dev,
11125 			"failed to get SFP exist state, ret = %d\n", ret);
11126 		return false;
11127 	}
11128 
11129 	existed = le32_to_cpu(desc.data[0]);
11130 
11131 	return existed != 0;
11132 }
11133 
11134 /* need 6 bds(total 140 bytes) in one reading
11135  * return the number of bytes actually read, 0 means read failed.
11136  */
11137 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11138 				     u32 len, u8 *data)
11139 {
11140 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11141 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11142 	u16 read_len;
11143 	u16 copy_len;
11144 	int ret;
11145 	int i;
11146 
11147 	/* setup all 6 bds to read module eeprom info. */
11148 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11149 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11150 					   true);
11151 
11152 		/* bd0~bd4 need next flag */
11153 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11154 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11155 	}
11156 
11157 	/* setup bd0, this bd contains offset and read length. */
11158 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11159 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11160 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11161 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
11162 
11163 	ret = hclge_cmd_send(&hdev->hw, desc, i);
11164 	if (ret) {
11165 		dev_err(&hdev->pdev->dev,
11166 			"failed to get SFP eeprom info, ret = %d\n", ret);
11167 		return 0;
11168 	}
11169 
11170 	/* copy sfp info from bd0 to out buffer. */
11171 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11172 	memcpy(data, sfp_info_bd0->data, copy_len);
11173 	read_len = copy_len;
11174 
11175 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
11176 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11177 		if (read_len >= len)
11178 			return read_len;
11179 
11180 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11181 		memcpy(data + read_len, desc[i].data, copy_len);
11182 		read_len += copy_len;
11183 	}
11184 
11185 	return read_len;
11186 }
11187 
11188 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11189 				   u32 len, u8 *data)
11190 {
11191 	struct hclge_vport *vport = hclge_get_vport(handle);
11192 	struct hclge_dev *hdev = vport->back;
11193 	u32 read_len = 0;
11194 	u16 data_len;
11195 
11196 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11197 		return -EOPNOTSUPP;
11198 
11199 	if (!hclge_module_existed(hdev))
11200 		return -ENXIO;
11201 
11202 	while (read_len < len) {
11203 		data_len = hclge_get_sfp_eeprom_info(hdev,
11204 						     offset + read_len,
11205 						     len - read_len,
11206 						     data + read_len);
11207 		if (!data_len)
11208 			return -EIO;
11209 
11210 		read_len += data_len;
11211 	}
11212 
11213 	return 0;
11214 }
11215 
11216 static const struct hnae3_ae_ops hclge_ops = {
11217 	.init_ae_dev = hclge_init_ae_dev,
11218 	.uninit_ae_dev = hclge_uninit_ae_dev,
11219 	.flr_prepare = hclge_flr_prepare,
11220 	.flr_done = hclge_flr_done,
11221 	.init_client_instance = hclge_init_client_instance,
11222 	.uninit_client_instance = hclge_uninit_client_instance,
11223 	.map_ring_to_vector = hclge_map_ring_to_vector,
11224 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11225 	.get_vector = hclge_get_vector,
11226 	.put_vector = hclge_put_vector,
11227 	.set_promisc_mode = hclge_set_promisc_mode,
11228 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
11229 	.set_loopback = hclge_set_loopback,
11230 	.start = hclge_ae_start,
11231 	.stop = hclge_ae_stop,
11232 	.client_start = hclge_client_start,
11233 	.client_stop = hclge_client_stop,
11234 	.get_status = hclge_get_status,
11235 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
11236 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11237 	.get_media_type = hclge_get_media_type,
11238 	.check_port_speed = hclge_check_port_speed,
11239 	.get_fec = hclge_get_fec,
11240 	.set_fec = hclge_set_fec,
11241 	.get_rss_key_size = hclge_get_rss_key_size,
11242 	.get_rss_indir_size = hclge_get_rss_indir_size,
11243 	.get_rss = hclge_get_rss,
11244 	.set_rss = hclge_set_rss,
11245 	.set_rss_tuple = hclge_set_rss_tuple,
11246 	.get_rss_tuple = hclge_get_rss_tuple,
11247 	.get_tc_size = hclge_get_tc_size,
11248 	.get_mac_addr = hclge_get_mac_addr,
11249 	.set_mac_addr = hclge_set_mac_addr,
11250 	.do_ioctl = hclge_do_ioctl,
11251 	.add_uc_addr = hclge_add_uc_addr,
11252 	.rm_uc_addr = hclge_rm_uc_addr,
11253 	.add_mc_addr = hclge_add_mc_addr,
11254 	.rm_mc_addr = hclge_rm_mc_addr,
11255 	.set_autoneg = hclge_set_autoneg,
11256 	.get_autoneg = hclge_get_autoneg,
11257 	.restart_autoneg = hclge_restart_autoneg,
11258 	.halt_autoneg = hclge_halt_autoneg,
11259 	.get_pauseparam = hclge_get_pauseparam,
11260 	.set_pauseparam = hclge_set_pauseparam,
11261 	.set_mtu = hclge_set_mtu,
11262 	.reset_queue = hclge_reset_tqp,
11263 	.get_stats = hclge_get_stats,
11264 	.get_mac_stats = hclge_get_mac_stat,
11265 	.update_stats = hclge_update_stats,
11266 	.get_strings = hclge_get_strings,
11267 	.get_sset_count = hclge_get_sset_count,
11268 	.get_fw_version = hclge_get_fw_version,
11269 	.get_mdix_mode = hclge_get_mdix_mode,
11270 	.enable_vlan_filter = hclge_enable_vlan_filter,
11271 	.set_vlan_filter = hclge_set_vlan_filter,
11272 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11273 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11274 	.reset_event = hclge_reset_event,
11275 	.get_reset_level = hclge_get_reset_level,
11276 	.set_default_reset_request = hclge_set_def_reset_request,
11277 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11278 	.set_channels = hclge_set_channels,
11279 	.get_channels = hclge_get_channels,
11280 	.get_regs_len = hclge_get_regs_len,
11281 	.get_regs = hclge_get_regs,
11282 	.set_led_id = hclge_set_led_id,
11283 	.get_link_mode = hclge_get_link_mode,
11284 	.add_fd_entry = hclge_add_fd_entry,
11285 	.del_fd_entry = hclge_del_fd_entry,
11286 	.del_all_fd_entries = hclge_del_all_fd_entries,
11287 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11288 	.get_fd_rule_info = hclge_get_fd_rule_info,
11289 	.get_fd_all_rules = hclge_get_all_rules,
11290 	.enable_fd = hclge_enable_fd,
11291 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
11292 	.dbg_run_cmd = hclge_dbg_run_cmd,
11293 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
11294 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
11295 	.ae_dev_resetting = hclge_ae_dev_resetting,
11296 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11297 	.set_gro_en = hclge_gro_en,
11298 	.get_global_queue_id = hclge_covert_handle_qid_global,
11299 	.set_timer_task = hclge_set_timer_task,
11300 	.mac_connect_phy = hclge_mac_connect_phy,
11301 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
11302 	.get_vf_config = hclge_get_vf_config,
11303 	.set_vf_link_state = hclge_set_vf_link_state,
11304 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
11305 	.set_vf_trust = hclge_set_vf_trust,
11306 	.set_vf_rate = hclge_set_vf_rate,
11307 	.set_vf_mac = hclge_set_vf_mac,
11308 	.get_module_eeprom = hclge_get_module_eeprom,
11309 	.get_cmdq_stat = hclge_get_cmdq_stat,
11310 };
11311 
11312 static struct hnae3_ae_algo ae_algo = {
11313 	.ops = &hclge_ops,
11314 	.pdev_id_table = ae_algo_pci_tbl,
11315 };
11316 
11317 static int hclge_init(void)
11318 {
11319 	pr_info("%s is initializing\n", HCLGE_NAME);
11320 
11321 	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11322 	if (!hclge_wq) {
11323 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11324 		return -ENOMEM;
11325 	}
11326 
11327 	hnae3_register_ae_algo(&ae_algo);
11328 
11329 	return 0;
11330 }
11331 
11332 static void hclge_exit(void)
11333 {
11334 	hnae3_unregister_ae_algo(&ae_algo);
11335 	destroy_workqueue(hclge_wq);
11336 }
11337 module_init(hclge_init);
11338 module_exit(hclge_exit);
11339 
11340 MODULE_LICENSE("GPL");
11341 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11342 MODULE_DESCRIPTION("HCLGE Driver");
11343 MODULE_VERSION(HCLGE_MOD_VERSION);
11344