1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256U
31 #define HCLGE_BUF_MUL_BY	2
32 #define HCLGE_BUF_DIV_BY	2
33 #define NEED_RESERVE_TC_NUM	2
34 #define BUF_MAX_PERCENT		100
35 #define BUF_RESERVE_PERCENT	90
36 
37 #define HCLGE_RESET_MAX_FAIL_CNT	5
38 #define HCLGE_RESET_SYNC_TIME		100
39 #define HCLGE_PF_RESET_SYNC_TIME	20
40 #define HCLGE_PF_RESET_SYNC_CNT		1500
41 
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55 
56 #define HCLGE_LINK_STATUS_MS	10
57 
58 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
59 static int hclge_init_vlan_config(struct hclge_dev *hdev);
60 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
61 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
62 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
63 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
64 			       u16 *allocated_size, bool is_alloc);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 						   unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 
71 static struct hnae3_ae_algo ae_algo;
72 
73 static const struct pci_device_id ae_algo_pci_tbl[] = {
74 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
75 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
76 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
77 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
78 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
79 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
81 	/* required last entry */
82 	{0, }
83 };
84 
85 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
86 
87 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
88 					 HCLGE_CMDQ_TX_ADDR_H_REG,
89 					 HCLGE_CMDQ_TX_DEPTH_REG,
90 					 HCLGE_CMDQ_TX_TAIL_REG,
91 					 HCLGE_CMDQ_TX_HEAD_REG,
92 					 HCLGE_CMDQ_RX_ADDR_L_REG,
93 					 HCLGE_CMDQ_RX_ADDR_H_REG,
94 					 HCLGE_CMDQ_RX_DEPTH_REG,
95 					 HCLGE_CMDQ_RX_TAIL_REG,
96 					 HCLGE_CMDQ_RX_HEAD_REG,
97 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
98 					 HCLGE_CMDQ_INTR_STS_REG,
99 					 HCLGE_CMDQ_INTR_EN_REG,
100 					 HCLGE_CMDQ_INTR_GEN_REG};
101 
102 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
103 					   HCLGE_VECTOR0_OTER_EN_REG,
104 					   HCLGE_MISC_RESET_STS_REG,
105 					   HCLGE_MISC_VECTOR_INT_STS,
106 					   HCLGE_GLOBAL_RESET_REG,
107 					   HCLGE_FUN_RST_ING,
108 					   HCLGE_GRO_EN_REG};
109 
110 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
111 					 HCLGE_RING_RX_ADDR_H_REG,
112 					 HCLGE_RING_RX_BD_NUM_REG,
113 					 HCLGE_RING_RX_BD_LENGTH_REG,
114 					 HCLGE_RING_RX_MERGE_EN_REG,
115 					 HCLGE_RING_RX_TAIL_REG,
116 					 HCLGE_RING_RX_HEAD_REG,
117 					 HCLGE_RING_RX_FBD_NUM_REG,
118 					 HCLGE_RING_RX_OFFSET_REG,
119 					 HCLGE_RING_RX_FBD_OFFSET_REG,
120 					 HCLGE_RING_RX_STASH_REG,
121 					 HCLGE_RING_RX_BD_ERR_REG,
122 					 HCLGE_RING_TX_ADDR_L_REG,
123 					 HCLGE_RING_TX_ADDR_H_REG,
124 					 HCLGE_RING_TX_BD_NUM_REG,
125 					 HCLGE_RING_TX_PRIORITY_REG,
126 					 HCLGE_RING_TX_TC_REG,
127 					 HCLGE_RING_TX_MERGE_EN_REG,
128 					 HCLGE_RING_TX_TAIL_REG,
129 					 HCLGE_RING_TX_HEAD_REG,
130 					 HCLGE_RING_TX_FBD_NUM_REG,
131 					 HCLGE_RING_TX_OFFSET_REG,
132 					 HCLGE_RING_TX_EBD_NUM_REG,
133 					 HCLGE_RING_TX_EBD_OFFSET_REG,
134 					 HCLGE_RING_TX_BD_ERR_REG,
135 					 HCLGE_RING_EN_REG};
136 
137 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
138 					     HCLGE_TQP_INTR_GL0_REG,
139 					     HCLGE_TQP_INTR_GL1_REG,
140 					     HCLGE_TQP_INTR_GL2_REG,
141 					     HCLGE_TQP_INTR_RL_REG};
142 
143 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
144 	"App    Loopback test",
145 	"Serdes serial Loopback test",
146 	"Serdes parallel Loopback test",
147 	"Phy    Loopback test"
148 };
149 
150 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
151 	{"mac_tx_mac_pause_num",
152 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
153 	{"mac_rx_mac_pause_num",
154 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
155 	{"mac_tx_control_pkt_num",
156 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
157 	{"mac_rx_control_pkt_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
159 	{"mac_tx_pfc_pkt_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
161 	{"mac_tx_pfc_pri0_pkt_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
163 	{"mac_tx_pfc_pri1_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
165 	{"mac_tx_pfc_pri2_pkt_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
167 	{"mac_tx_pfc_pri3_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
169 	{"mac_tx_pfc_pri4_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
171 	{"mac_tx_pfc_pri5_pkt_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
173 	{"mac_tx_pfc_pri6_pkt_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
175 	{"mac_tx_pfc_pri7_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
177 	{"mac_rx_pfc_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
179 	{"mac_rx_pfc_pri0_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
181 	{"mac_rx_pfc_pri1_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
183 	{"mac_rx_pfc_pri2_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
185 	{"mac_rx_pfc_pri3_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
187 	{"mac_rx_pfc_pri4_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
189 	{"mac_rx_pfc_pri5_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
191 	{"mac_rx_pfc_pri6_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
193 	{"mac_rx_pfc_pri7_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
195 	{"mac_tx_total_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
197 	{"mac_tx_total_oct_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
199 	{"mac_tx_good_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
201 	{"mac_tx_bad_pkt_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
203 	{"mac_tx_good_oct_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
205 	{"mac_tx_bad_oct_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
207 	{"mac_tx_uni_pkt_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
209 	{"mac_tx_multi_pkt_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
211 	{"mac_tx_broad_pkt_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
213 	{"mac_tx_undersize_pkt_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
215 	{"mac_tx_oversize_pkt_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
217 	{"mac_tx_64_oct_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
219 	{"mac_tx_65_127_oct_pkt_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
221 	{"mac_tx_128_255_oct_pkt_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
223 	{"mac_tx_256_511_oct_pkt_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
225 	{"mac_tx_512_1023_oct_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
227 	{"mac_tx_1024_1518_oct_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
229 	{"mac_tx_1519_2047_oct_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
231 	{"mac_tx_2048_4095_oct_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
233 	{"mac_tx_4096_8191_oct_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
235 	{"mac_tx_8192_9216_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
237 	{"mac_tx_9217_12287_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
239 	{"mac_tx_12288_16383_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
241 	{"mac_tx_1519_max_good_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
243 	{"mac_tx_1519_max_bad_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
245 	{"mac_rx_total_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
247 	{"mac_rx_total_oct_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
249 	{"mac_rx_good_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
251 	{"mac_rx_bad_pkt_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
253 	{"mac_rx_good_oct_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
255 	{"mac_rx_bad_oct_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
257 	{"mac_rx_uni_pkt_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
259 	{"mac_rx_multi_pkt_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
261 	{"mac_rx_broad_pkt_num",
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
263 	{"mac_rx_undersize_pkt_num",
264 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
265 	{"mac_rx_oversize_pkt_num",
266 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
267 	{"mac_rx_64_oct_pkt_num",
268 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
269 	{"mac_rx_65_127_oct_pkt_num",
270 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
271 	{"mac_rx_128_255_oct_pkt_num",
272 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
273 	{"mac_rx_256_511_oct_pkt_num",
274 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
275 	{"mac_rx_512_1023_oct_pkt_num",
276 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
277 	{"mac_rx_1024_1518_oct_pkt_num",
278 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
279 	{"mac_rx_1519_2047_oct_pkt_num",
280 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
281 	{"mac_rx_2048_4095_oct_pkt_num",
282 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
283 	{"mac_rx_4096_8191_oct_pkt_num",
284 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
285 	{"mac_rx_8192_9216_oct_pkt_num",
286 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
287 	{"mac_rx_9217_12287_oct_pkt_num",
288 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
289 	{"mac_rx_12288_16383_oct_pkt_num",
290 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
291 	{"mac_rx_1519_max_good_pkt_num",
292 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
293 	{"mac_rx_1519_max_bad_pkt_num",
294 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
295 
296 	{"mac_tx_fragment_pkt_num",
297 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
298 	{"mac_tx_undermin_pkt_num",
299 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
300 	{"mac_tx_jabber_pkt_num",
301 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
302 	{"mac_tx_err_all_pkt_num",
303 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
304 	{"mac_tx_from_app_good_pkt_num",
305 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
306 	{"mac_tx_from_app_bad_pkt_num",
307 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
308 	{"mac_rx_fragment_pkt_num",
309 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
310 	{"mac_rx_undermin_pkt_num",
311 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
312 	{"mac_rx_jabber_pkt_num",
313 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
314 	{"mac_rx_fcs_err_pkt_num",
315 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
316 	{"mac_rx_send_app_good_pkt_num",
317 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
318 	{"mac_rx_send_app_bad_pkt_num",
319 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
320 };
321 
322 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
323 	{
324 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
325 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
326 		.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
327 		.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
328 		.i_port_bitmap = 0x1,
329 	},
330 };
331 
332 static const u8 hclge_hash_key[] = {
333 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
334 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
335 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
336 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
337 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
338 };
339 
340 static const u32 hclge_dfx_bd_offset_list[] = {
341 	HCLGE_DFX_BIOS_BD_OFFSET,
342 	HCLGE_DFX_SSU_0_BD_OFFSET,
343 	HCLGE_DFX_SSU_1_BD_OFFSET,
344 	HCLGE_DFX_IGU_BD_OFFSET,
345 	HCLGE_DFX_RPU_0_BD_OFFSET,
346 	HCLGE_DFX_RPU_1_BD_OFFSET,
347 	HCLGE_DFX_NCSI_BD_OFFSET,
348 	HCLGE_DFX_RTC_BD_OFFSET,
349 	HCLGE_DFX_PPP_BD_OFFSET,
350 	HCLGE_DFX_RCB_BD_OFFSET,
351 	HCLGE_DFX_TQP_BD_OFFSET,
352 	HCLGE_DFX_SSU_2_BD_OFFSET
353 };
354 
355 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
356 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
357 	HCLGE_OPC_DFX_SSU_REG_0,
358 	HCLGE_OPC_DFX_SSU_REG_1,
359 	HCLGE_OPC_DFX_IGU_EGU_REG,
360 	HCLGE_OPC_DFX_RPU_REG_0,
361 	HCLGE_OPC_DFX_RPU_REG_1,
362 	HCLGE_OPC_DFX_NCSI_REG,
363 	HCLGE_OPC_DFX_RTC_REG,
364 	HCLGE_OPC_DFX_PPP_REG,
365 	HCLGE_OPC_DFX_RCB_REG,
366 	HCLGE_OPC_DFX_TQP_REG,
367 	HCLGE_OPC_DFX_SSU_REG_2
368 };
369 
370 static const struct key_info meta_data_key_info[] = {
371 	{ PACKET_TYPE_ID, 6},
372 	{ IP_FRAGEMENT, 1},
373 	{ ROCE_TYPE, 1},
374 	{ NEXT_KEY, 5},
375 	{ VLAN_NUMBER, 2},
376 	{ SRC_VPORT, 12},
377 	{ DST_VPORT, 12},
378 	{ TUNNEL_PACKET, 1},
379 };
380 
381 static const struct key_info tuple_key_info[] = {
382 	{ OUTER_DST_MAC, 48},
383 	{ OUTER_SRC_MAC, 48},
384 	{ OUTER_VLAN_TAG_FST, 16},
385 	{ OUTER_VLAN_TAG_SEC, 16},
386 	{ OUTER_ETH_TYPE, 16},
387 	{ OUTER_L2_RSV, 16},
388 	{ OUTER_IP_TOS, 8},
389 	{ OUTER_IP_PROTO, 8},
390 	{ OUTER_SRC_IP, 32},
391 	{ OUTER_DST_IP, 32},
392 	{ OUTER_L3_RSV, 16},
393 	{ OUTER_SRC_PORT, 16},
394 	{ OUTER_DST_PORT, 16},
395 	{ OUTER_L4_RSV, 32},
396 	{ OUTER_TUN_VNI, 24},
397 	{ OUTER_TUN_FLOW_ID, 8},
398 	{ INNER_DST_MAC, 48},
399 	{ INNER_SRC_MAC, 48},
400 	{ INNER_VLAN_TAG_FST, 16},
401 	{ INNER_VLAN_TAG_SEC, 16},
402 	{ INNER_ETH_TYPE, 16},
403 	{ INNER_L2_RSV, 16},
404 	{ INNER_IP_TOS, 8},
405 	{ INNER_IP_PROTO, 8},
406 	{ INNER_SRC_IP, 32},
407 	{ INNER_DST_IP, 32},
408 	{ INNER_L3_RSV, 16},
409 	{ INNER_SRC_PORT, 16},
410 	{ INNER_DST_PORT, 16},
411 	{ INNER_L4_RSV, 32},
412 };
413 
414 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
415 {
416 #define HCLGE_MAC_CMD_NUM 21
417 
418 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
419 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
420 	__le64 *desc_data;
421 	int i, k, n;
422 	int ret;
423 
424 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
425 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
426 	if (ret) {
427 		dev_err(&hdev->pdev->dev,
428 			"Get MAC pkt stats fail, status = %d.\n", ret);
429 
430 		return ret;
431 	}
432 
433 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
434 		/* for special opcode 0032, only the first desc has the head */
435 		if (unlikely(i == 0)) {
436 			desc_data = (__le64 *)(&desc[i].data[0]);
437 			n = HCLGE_RD_FIRST_STATS_NUM;
438 		} else {
439 			desc_data = (__le64 *)(&desc[i]);
440 			n = HCLGE_RD_OTHER_STATS_NUM;
441 		}
442 
443 		for (k = 0; k < n; k++) {
444 			*data += le64_to_cpu(*desc_data);
445 			data++;
446 			desc_data++;
447 		}
448 	}
449 
450 	return 0;
451 }
452 
453 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
454 {
455 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
456 	struct hclge_desc *desc;
457 	__le64 *desc_data;
458 	u16 i, k, n;
459 	int ret;
460 
461 	/* This may be called inside atomic sections,
462 	 * so GFP_ATOMIC is more suitalbe here
463 	 */
464 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
465 	if (!desc)
466 		return -ENOMEM;
467 
468 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
469 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
470 	if (ret) {
471 		kfree(desc);
472 		return ret;
473 	}
474 
475 	for (i = 0; i < desc_num; i++) {
476 		/* for special opcode 0034, only the first desc has the head */
477 		if (i == 0) {
478 			desc_data = (__le64 *)(&desc[i].data[0]);
479 			n = HCLGE_RD_FIRST_STATS_NUM;
480 		} else {
481 			desc_data = (__le64 *)(&desc[i]);
482 			n = HCLGE_RD_OTHER_STATS_NUM;
483 		}
484 
485 		for (k = 0; k < n; k++) {
486 			*data += le64_to_cpu(*desc_data);
487 			data++;
488 			desc_data++;
489 		}
490 	}
491 
492 	kfree(desc);
493 
494 	return 0;
495 }
496 
497 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
498 {
499 	struct hclge_desc desc;
500 	__le32 *desc_data;
501 	u32 reg_num;
502 	int ret;
503 
504 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
505 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
506 	if (ret)
507 		return ret;
508 
509 	desc_data = (__le32 *)(&desc.data[0]);
510 	reg_num = le32_to_cpu(*desc_data);
511 
512 	*desc_num = 1 + ((reg_num - 3) >> 2) +
513 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
514 
515 	return 0;
516 }
517 
518 static int hclge_mac_update_stats(struct hclge_dev *hdev)
519 {
520 	u32 desc_num;
521 	int ret;
522 
523 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
524 
525 	/* The firmware supports the new statistics acquisition method */
526 	if (!ret)
527 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
528 	else if (ret == -EOPNOTSUPP)
529 		ret = hclge_mac_update_stats_defective(hdev);
530 	else
531 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
532 
533 	return ret;
534 }
535 
536 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
537 {
538 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
539 	struct hclge_vport *vport = hclge_get_vport(handle);
540 	struct hclge_dev *hdev = vport->back;
541 	struct hnae3_queue *queue;
542 	struct hclge_desc desc[1];
543 	struct hclge_tqp *tqp;
544 	int ret, i;
545 
546 	for (i = 0; i < kinfo->num_tqps; i++) {
547 		queue = handle->kinfo.tqp[i];
548 		tqp = container_of(queue, struct hclge_tqp, q);
549 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
550 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
551 					   true);
552 
553 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
554 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
555 		if (ret) {
556 			dev_err(&hdev->pdev->dev,
557 				"Query tqp stat fail, status = %d,queue = %d\n",
558 				ret, i);
559 			return ret;
560 		}
561 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
562 			le32_to_cpu(desc[0].data[1]);
563 	}
564 
565 	for (i = 0; i < kinfo->num_tqps; i++) {
566 		queue = handle->kinfo.tqp[i];
567 		tqp = container_of(queue, struct hclge_tqp, q);
568 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
569 		hclge_cmd_setup_basic_desc(&desc[0],
570 					   HCLGE_OPC_QUERY_TX_STATUS,
571 					   true);
572 
573 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
574 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
575 		if (ret) {
576 			dev_err(&hdev->pdev->dev,
577 				"Query tqp stat fail, status = %d,queue = %d\n",
578 				ret, i);
579 			return ret;
580 		}
581 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
582 			le32_to_cpu(desc[0].data[1]);
583 	}
584 
585 	return 0;
586 }
587 
588 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
589 {
590 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
591 	struct hclge_tqp *tqp;
592 	u64 *buff = data;
593 	int i;
594 
595 	for (i = 0; i < kinfo->num_tqps; i++) {
596 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
597 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
598 	}
599 
600 	for (i = 0; i < kinfo->num_tqps; i++) {
601 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
603 	}
604 
605 	return buff;
606 }
607 
608 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
609 {
610 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
611 
612 	/* each tqp has TX & RX two queues */
613 	return kinfo->num_tqps * (2);
614 }
615 
616 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
617 {
618 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
619 	u8 *buff = data;
620 	int i = 0;
621 
622 	for (i = 0; i < kinfo->num_tqps; i++) {
623 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
624 			struct hclge_tqp, q);
625 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
626 			 tqp->index);
627 		buff = buff + ETH_GSTRING_LEN;
628 	}
629 
630 	for (i = 0; i < kinfo->num_tqps; i++) {
631 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
632 			struct hclge_tqp, q);
633 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
634 			 tqp->index);
635 		buff = buff + ETH_GSTRING_LEN;
636 	}
637 
638 	return buff;
639 }
640 
641 static u64 *hclge_comm_get_stats(const void *comm_stats,
642 				 const struct hclge_comm_stats_str strs[],
643 				 int size, u64 *data)
644 {
645 	u64 *buf = data;
646 	u32 i;
647 
648 	for (i = 0; i < size; i++)
649 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
650 
651 	return buf + size;
652 }
653 
654 static u8 *hclge_comm_get_strings(u32 stringset,
655 				  const struct hclge_comm_stats_str strs[],
656 				  int size, u8 *data)
657 {
658 	char *buff = (char *)data;
659 	u32 i;
660 
661 	if (stringset != ETH_SS_STATS)
662 		return buff;
663 
664 	for (i = 0; i < size; i++) {
665 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
666 		buff = buff + ETH_GSTRING_LEN;
667 	}
668 
669 	return (u8 *)buff;
670 }
671 
672 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
673 {
674 	struct hnae3_handle *handle;
675 	int status;
676 
677 	handle = &hdev->vport[0].nic;
678 	if (handle->client) {
679 		status = hclge_tqps_update_stats(handle);
680 		if (status) {
681 			dev_err(&hdev->pdev->dev,
682 				"Update TQPS stats fail, status = %d.\n",
683 				status);
684 		}
685 	}
686 
687 	status = hclge_mac_update_stats(hdev);
688 	if (status)
689 		dev_err(&hdev->pdev->dev,
690 			"Update MAC stats fail, status = %d.\n", status);
691 }
692 
693 static void hclge_update_stats(struct hnae3_handle *handle,
694 			       struct net_device_stats *net_stats)
695 {
696 	struct hclge_vport *vport = hclge_get_vport(handle);
697 	struct hclge_dev *hdev = vport->back;
698 	int status;
699 
700 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
701 		return;
702 
703 	status = hclge_mac_update_stats(hdev);
704 	if (status)
705 		dev_err(&hdev->pdev->dev,
706 			"Update MAC stats fail, status = %d.\n",
707 			status);
708 
709 	status = hclge_tqps_update_stats(handle);
710 	if (status)
711 		dev_err(&hdev->pdev->dev,
712 			"Update TQPS stats fail, status = %d.\n",
713 			status);
714 
715 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
716 }
717 
718 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
719 {
720 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
721 		HNAE3_SUPPORT_PHY_LOOPBACK |\
722 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
723 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
724 
725 	struct hclge_vport *vport = hclge_get_vport(handle);
726 	struct hclge_dev *hdev = vport->back;
727 	int count = 0;
728 
729 	/* Loopback test support rules:
730 	 * mac: only GE mode support
731 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
732 	 * phy: only support when phy device exist on board
733 	 */
734 	if (stringset == ETH_SS_TEST) {
735 		/* clear loopback bit flags at first */
736 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
737 		if (hdev->pdev->revision >= 0x21 ||
738 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
739 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
740 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
741 			count += 1;
742 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
743 		}
744 
745 		count += 2;
746 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
747 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
748 
749 		if (hdev->hw.mac.phydev) {
750 			count += 1;
751 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
752 		}
753 
754 	} else if (stringset == ETH_SS_STATS) {
755 		count = ARRAY_SIZE(g_mac_stats_string) +
756 			hclge_tqps_get_sset_count(handle, stringset);
757 	}
758 
759 	return count;
760 }
761 
762 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
763 			      u8 *data)
764 {
765 	u8 *p = (char *)data;
766 	int size;
767 
768 	if (stringset == ETH_SS_STATS) {
769 		size = ARRAY_SIZE(g_mac_stats_string);
770 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
771 					   size, p);
772 		p = hclge_tqps_get_strings(handle, p);
773 	} else if (stringset == ETH_SS_TEST) {
774 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
775 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
776 			       ETH_GSTRING_LEN);
777 			p += ETH_GSTRING_LEN;
778 		}
779 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
780 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
781 			       ETH_GSTRING_LEN);
782 			p += ETH_GSTRING_LEN;
783 		}
784 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
785 			memcpy(p,
786 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
787 			       ETH_GSTRING_LEN);
788 			p += ETH_GSTRING_LEN;
789 		}
790 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
791 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
792 			       ETH_GSTRING_LEN);
793 			p += ETH_GSTRING_LEN;
794 		}
795 	}
796 }
797 
798 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
799 {
800 	struct hclge_vport *vport = hclge_get_vport(handle);
801 	struct hclge_dev *hdev = vport->back;
802 	u64 *p;
803 
804 	p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
805 				 ARRAY_SIZE(g_mac_stats_string), data);
806 	p = hclge_tqps_get_stats(handle, p);
807 }
808 
809 static void hclge_get_mac_stat(struct hnae3_handle *handle,
810 			       struct hns3_mac_stats *mac_stats)
811 {
812 	struct hclge_vport *vport = hclge_get_vport(handle);
813 	struct hclge_dev *hdev = vport->back;
814 
815 	hclge_update_stats(handle, NULL);
816 
817 	mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
818 	mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
819 }
820 
821 static int hclge_parse_func_status(struct hclge_dev *hdev,
822 				   struct hclge_func_status_cmd *status)
823 {
824 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
825 		return -EINVAL;
826 
827 	/* Set the pf to main pf */
828 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
829 		hdev->flag |= HCLGE_FLAG_MAIN;
830 	else
831 		hdev->flag &= ~HCLGE_FLAG_MAIN;
832 
833 	return 0;
834 }
835 
836 static int hclge_query_function_status(struct hclge_dev *hdev)
837 {
838 #define HCLGE_QUERY_MAX_CNT	5
839 
840 	struct hclge_func_status_cmd *req;
841 	struct hclge_desc desc;
842 	int timeout = 0;
843 	int ret;
844 
845 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
846 	req = (struct hclge_func_status_cmd *)desc.data;
847 
848 	do {
849 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
850 		if (ret) {
851 			dev_err(&hdev->pdev->dev,
852 				"query function status failed %d.\n", ret);
853 			return ret;
854 		}
855 
856 		/* Check pf reset is done */
857 		if (req->pf_state)
858 			break;
859 		usleep_range(1000, 2000);
860 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
861 
862 	ret = hclge_parse_func_status(hdev, req);
863 
864 	return ret;
865 }
866 
867 static int hclge_query_pf_resource(struct hclge_dev *hdev)
868 {
869 	struct hclge_pf_res_cmd *req;
870 	struct hclge_desc desc;
871 	int ret;
872 
873 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
874 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
875 	if (ret) {
876 		dev_err(&hdev->pdev->dev,
877 			"query pf resource failed %d.\n", ret);
878 		return ret;
879 	}
880 
881 	req = (struct hclge_pf_res_cmd *)desc.data;
882 	hdev->num_tqps = __le16_to_cpu(req->tqp_num);
883 	hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
884 
885 	if (req->tx_buf_size)
886 		hdev->tx_buf_size =
887 			__le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
888 	else
889 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
890 
891 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
892 
893 	if (req->dv_buf_size)
894 		hdev->dv_buf_size =
895 			__le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
896 	else
897 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
898 
899 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
900 
901 	if (hnae3_dev_roce_supported(hdev)) {
902 		hdev->roce_base_msix_offset =
903 		hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
904 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
905 		hdev->num_roce_msi =
906 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
907 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
908 
909 		/* nic's msix numbers is always equals to the roce's. */
910 		hdev->num_nic_msi = hdev->num_roce_msi;
911 
912 		/* PF should have NIC vectors and Roce vectors,
913 		 * NIC vectors are queued before Roce vectors.
914 		 */
915 		hdev->num_msi = hdev->num_roce_msi +
916 				hdev->roce_base_msix_offset;
917 	} else {
918 		hdev->num_msi =
919 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
920 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
921 
922 		hdev->num_nic_msi = hdev->num_msi;
923 	}
924 
925 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
926 		dev_err(&hdev->pdev->dev,
927 			"Just %u msi resources, not enough for pf(min:2).\n",
928 			hdev->num_nic_msi);
929 		return -EINVAL;
930 	}
931 
932 	return 0;
933 }
934 
935 static int hclge_parse_speed(int speed_cmd, int *speed)
936 {
937 	switch (speed_cmd) {
938 	case 6:
939 		*speed = HCLGE_MAC_SPEED_10M;
940 		break;
941 	case 7:
942 		*speed = HCLGE_MAC_SPEED_100M;
943 		break;
944 	case 0:
945 		*speed = HCLGE_MAC_SPEED_1G;
946 		break;
947 	case 1:
948 		*speed = HCLGE_MAC_SPEED_10G;
949 		break;
950 	case 2:
951 		*speed = HCLGE_MAC_SPEED_25G;
952 		break;
953 	case 3:
954 		*speed = HCLGE_MAC_SPEED_40G;
955 		break;
956 	case 4:
957 		*speed = HCLGE_MAC_SPEED_50G;
958 		break;
959 	case 5:
960 		*speed = HCLGE_MAC_SPEED_100G;
961 		break;
962 	default:
963 		return -EINVAL;
964 	}
965 
966 	return 0;
967 }
968 
969 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
970 {
971 	struct hclge_vport *vport = hclge_get_vport(handle);
972 	struct hclge_dev *hdev = vport->back;
973 	u32 speed_ability = hdev->hw.mac.speed_ability;
974 	u32 speed_bit = 0;
975 
976 	switch (speed) {
977 	case HCLGE_MAC_SPEED_10M:
978 		speed_bit = HCLGE_SUPPORT_10M_BIT;
979 		break;
980 	case HCLGE_MAC_SPEED_100M:
981 		speed_bit = HCLGE_SUPPORT_100M_BIT;
982 		break;
983 	case HCLGE_MAC_SPEED_1G:
984 		speed_bit = HCLGE_SUPPORT_1G_BIT;
985 		break;
986 	case HCLGE_MAC_SPEED_10G:
987 		speed_bit = HCLGE_SUPPORT_10G_BIT;
988 		break;
989 	case HCLGE_MAC_SPEED_25G:
990 		speed_bit = HCLGE_SUPPORT_25G_BIT;
991 		break;
992 	case HCLGE_MAC_SPEED_40G:
993 		speed_bit = HCLGE_SUPPORT_40G_BIT;
994 		break;
995 	case HCLGE_MAC_SPEED_50G:
996 		speed_bit = HCLGE_SUPPORT_50G_BIT;
997 		break;
998 	case HCLGE_MAC_SPEED_100G:
999 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1000 		break;
1001 	default:
1002 		return -EINVAL;
1003 	}
1004 
1005 	if (speed_bit & speed_ability)
1006 		return 0;
1007 
1008 	return -EINVAL;
1009 }
1010 
1011 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1012 {
1013 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1014 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1015 				 mac->supported);
1016 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1017 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1018 				 mac->supported);
1019 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1020 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1021 				 mac->supported);
1022 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1023 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1024 				 mac->supported);
1025 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1026 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1027 				 mac->supported);
1028 }
1029 
1030 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1031 {
1032 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1033 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1034 				 mac->supported);
1035 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1036 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1037 				 mac->supported);
1038 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1039 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1040 				 mac->supported);
1041 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1042 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1043 				 mac->supported);
1044 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1045 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1046 				 mac->supported);
1047 }
1048 
1049 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1050 {
1051 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1052 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1053 				 mac->supported);
1054 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1055 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1056 				 mac->supported);
1057 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1058 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1059 				 mac->supported);
1060 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1061 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1062 				 mac->supported);
1063 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1064 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1065 				 mac->supported);
1066 }
1067 
1068 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1069 {
1070 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1071 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1072 				 mac->supported);
1073 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1074 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1075 				 mac->supported);
1076 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1077 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1078 				 mac->supported);
1079 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1080 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1081 				 mac->supported);
1082 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1083 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1084 				 mac->supported);
1085 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1086 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1087 				 mac->supported);
1088 }
1089 
1090 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1091 {
1092 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1093 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1094 
1095 	switch (mac->speed) {
1096 	case HCLGE_MAC_SPEED_10G:
1097 	case HCLGE_MAC_SPEED_40G:
1098 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1099 				 mac->supported);
1100 		mac->fec_ability =
1101 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1102 		break;
1103 	case HCLGE_MAC_SPEED_25G:
1104 	case HCLGE_MAC_SPEED_50G:
1105 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1106 				 mac->supported);
1107 		mac->fec_ability =
1108 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1109 			BIT(HNAE3_FEC_AUTO);
1110 		break;
1111 	case HCLGE_MAC_SPEED_100G:
1112 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1113 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1114 		break;
1115 	default:
1116 		mac->fec_ability = 0;
1117 		break;
1118 	}
1119 }
1120 
1121 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1122 					u8 speed_ability)
1123 {
1124 	struct hclge_mac *mac = &hdev->hw.mac;
1125 
1126 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1127 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1128 				 mac->supported);
1129 
1130 	hclge_convert_setting_sr(mac, speed_ability);
1131 	hclge_convert_setting_lr(mac, speed_ability);
1132 	hclge_convert_setting_cr(mac, speed_ability);
1133 	if (hdev->pdev->revision >= 0x21)
1134 		hclge_convert_setting_fec(mac);
1135 
1136 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1137 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1138 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1139 }
1140 
1141 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1142 					    u8 speed_ability)
1143 {
1144 	struct hclge_mac *mac = &hdev->hw.mac;
1145 
1146 	hclge_convert_setting_kr(mac, speed_ability);
1147 	if (hdev->pdev->revision >= 0x21)
1148 		hclge_convert_setting_fec(mac);
1149 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1150 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1151 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1152 }
1153 
1154 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1155 					 u8 speed_ability)
1156 {
1157 	unsigned long *supported = hdev->hw.mac.supported;
1158 
1159 	/* default to support all speed for GE port */
1160 	if (!speed_ability)
1161 		speed_ability = HCLGE_SUPPORT_GE;
1162 
1163 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1164 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1165 				 supported);
1166 
1167 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1168 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1169 				 supported);
1170 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1171 				 supported);
1172 	}
1173 
1174 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1175 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1176 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1177 	}
1178 
1179 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1180 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1181 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1182 	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1183 }
1184 
1185 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1186 {
1187 	u8 media_type = hdev->hw.mac.media_type;
1188 
1189 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1190 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1191 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1192 		hclge_parse_copper_link_mode(hdev, speed_ability);
1193 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1194 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1195 }
1196 
1197 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1198 {
1199 	struct hclge_cfg_param_cmd *req;
1200 	u64 mac_addr_tmp_high;
1201 	u64 mac_addr_tmp;
1202 	unsigned int i;
1203 
1204 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1205 
1206 	/* get the configuration */
1207 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1208 					      HCLGE_CFG_VMDQ_M,
1209 					      HCLGE_CFG_VMDQ_S);
1210 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1211 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1212 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1213 					    HCLGE_CFG_TQP_DESC_N_M,
1214 					    HCLGE_CFG_TQP_DESC_N_S);
1215 
1216 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1217 					HCLGE_CFG_PHY_ADDR_M,
1218 					HCLGE_CFG_PHY_ADDR_S);
1219 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1220 					  HCLGE_CFG_MEDIA_TP_M,
1221 					  HCLGE_CFG_MEDIA_TP_S);
1222 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1223 					  HCLGE_CFG_RX_BUF_LEN_M,
1224 					  HCLGE_CFG_RX_BUF_LEN_S);
1225 	/* get mac_address */
1226 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1227 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1228 					    HCLGE_CFG_MAC_ADDR_H_M,
1229 					    HCLGE_CFG_MAC_ADDR_H_S);
1230 
1231 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1232 
1233 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1234 					     HCLGE_CFG_DEFAULT_SPEED_M,
1235 					     HCLGE_CFG_DEFAULT_SPEED_S);
1236 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1237 					    HCLGE_CFG_RSS_SIZE_M,
1238 					    HCLGE_CFG_RSS_SIZE_S);
1239 
1240 	for (i = 0; i < ETH_ALEN; i++)
1241 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1242 
1243 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1244 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1245 
1246 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1247 					     HCLGE_CFG_SPEED_ABILITY_M,
1248 					     HCLGE_CFG_SPEED_ABILITY_S);
1249 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1250 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1251 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1252 	if (!cfg->umv_space)
1253 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1254 }
1255 
1256 /* hclge_get_cfg: query the static parameter from flash
1257  * @hdev: pointer to struct hclge_dev
1258  * @hcfg: the config structure to be getted
1259  */
1260 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1261 {
1262 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1263 	struct hclge_cfg_param_cmd *req;
1264 	unsigned int i;
1265 	int ret;
1266 
1267 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1268 		u32 offset = 0;
1269 
1270 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1271 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1272 					   true);
1273 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1274 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1275 		/* Len should be united by 4 bytes when send to hardware */
1276 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1277 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1278 		req->offset = cpu_to_le32(offset);
1279 	}
1280 
1281 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1282 	if (ret) {
1283 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1284 		return ret;
1285 	}
1286 
1287 	hclge_parse_cfg(hcfg, desc);
1288 
1289 	return 0;
1290 }
1291 
1292 static int hclge_get_cap(struct hclge_dev *hdev)
1293 {
1294 	int ret;
1295 
1296 	ret = hclge_query_function_status(hdev);
1297 	if (ret) {
1298 		dev_err(&hdev->pdev->dev,
1299 			"query function status error %d.\n", ret);
1300 		return ret;
1301 	}
1302 
1303 	/* get pf resource */
1304 	ret = hclge_query_pf_resource(hdev);
1305 	if (ret)
1306 		dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1307 
1308 	return ret;
1309 }
1310 
1311 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1312 {
1313 #define HCLGE_MIN_TX_DESC	64
1314 #define HCLGE_MIN_RX_DESC	64
1315 
1316 	if (!is_kdump_kernel())
1317 		return;
1318 
1319 	dev_info(&hdev->pdev->dev,
1320 		 "Running kdump kernel. Using minimal resources\n");
1321 
1322 	/* minimal queue pairs equals to the number of vports */
1323 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1324 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1325 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1326 }
1327 
1328 static int hclge_configure(struct hclge_dev *hdev)
1329 {
1330 	struct hclge_cfg cfg;
1331 	unsigned int i;
1332 	int ret;
1333 
1334 	ret = hclge_get_cfg(hdev, &cfg);
1335 	if (ret) {
1336 		dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1337 		return ret;
1338 	}
1339 
1340 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1341 	hdev->base_tqp_pid = 0;
1342 	hdev->rss_size_max = cfg.rss_size_max;
1343 	hdev->rx_buf_len = cfg.rx_buf_len;
1344 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1345 	hdev->hw.mac.media_type = cfg.media_type;
1346 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1347 	hdev->num_tx_desc = cfg.tqp_desc_num;
1348 	hdev->num_rx_desc = cfg.tqp_desc_num;
1349 	hdev->tm_info.num_pg = 1;
1350 	hdev->tc_max = cfg.tc_num;
1351 	hdev->tm_info.hw_pfc_map = 0;
1352 	hdev->wanted_umv_size = cfg.umv_space;
1353 
1354 	if (hnae3_dev_fd_supported(hdev)) {
1355 		hdev->fd_en = true;
1356 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1357 	}
1358 
1359 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1360 	if (ret) {
1361 		dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1362 		return ret;
1363 	}
1364 
1365 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1366 
1367 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1368 	    (hdev->tc_max < 1)) {
1369 		dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1370 			 hdev->tc_max);
1371 		hdev->tc_max = 1;
1372 	}
1373 
1374 	/* Dev does not support DCB */
1375 	if (!hnae3_dev_dcb_supported(hdev)) {
1376 		hdev->tc_max = 1;
1377 		hdev->pfc_max = 0;
1378 	} else {
1379 		hdev->pfc_max = hdev->tc_max;
1380 	}
1381 
1382 	hdev->tm_info.num_tc = 1;
1383 
1384 	/* Currently not support uncontiuous tc */
1385 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1386 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1387 
1388 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1389 
1390 	hclge_init_kdump_kernel_config(hdev);
1391 
1392 	/* Set the init affinity based on pci func number */
1393 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1394 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1395 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1396 			&hdev->affinity_mask);
1397 
1398 	return ret;
1399 }
1400 
1401 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1402 			    unsigned int tso_mss_max)
1403 {
1404 	struct hclge_cfg_tso_status_cmd *req;
1405 	struct hclge_desc desc;
1406 	u16 tso_mss;
1407 
1408 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1409 
1410 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1411 
1412 	tso_mss = 0;
1413 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1414 			HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1415 	req->tso_mss_min = cpu_to_le16(tso_mss);
1416 
1417 	tso_mss = 0;
1418 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1419 			HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1420 	req->tso_mss_max = cpu_to_le16(tso_mss);
1421 
1422 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1423 }
1424 
1425 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1426 {
1427 	struct hclge_cfg_gro_status_cmd *req;
1428 	struct hclge_desc desc;
1429 	int ret;
1430 
1431 	if (!hnae3_dev_gro_supported(hdev))
1432 		return 0;
1433 
1434 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1435 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1436 
1437 	req->gro_en = cpu_to_le16(en ? 1 : 0);
1438 
1439 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1440 	if (ret)
1441 		dev_err(&hdev->pdev->dev,
1442 			"GRO hardware config cmd failed, ret = %d\n", ret);
1443 
1444 	return ret;
1445 }
1446 
1447 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1448 {
1449 	struct hclge_tqp *tqp;
1450 	int i;
1451 
1452 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1453 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1454 	if (!hdev->htqp)
1455 		return -ENOMEM;
1456 
1457 	tqp = hdev->htqp;
1458 
1459 	for (i = 0; i < hdev->num_tqps; i++) {
1460 		tqp->dev = &hdev->pdev->dev;
1461 		tqp->index = i;
1462 
1463 		tqp->q.ae_algo = &ae_algo;
1464 		tqp->q.buf_size = hdev->rx_buf_len;
1465 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1466 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1467 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1468 			i * HCLGE_TQP_REG_SIZE;
1469 
1470 		tqp++;
1471 	}
1472 
1473 	return 0;
1474 }
1475 
1476 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1477 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1478 {
1479 	struct hclge_tqp_map_cmd *req;
1480 	struct hclge_desc desc;
1481 	int ret;
1482 
1483 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1484 
1485 	req = (struct hclge_tqp_map_cmd *)desc.data;
1486 	req->tqp_id = cpu_to_le16(tqp_pid);
1487 	req->tqp_vf = func_id;
1488 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1489 	if (!is_pf)
1490 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1491 	req->tqp_vid = cpu_to_le16(tqp_vid);
1492 
1493 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1494 	if (ret)
1495 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1496 
1497 	return ret;
1498 }
1499 
1500 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1501 {
1502 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1503 	struct hclge_dev *hdev = vport->back;
1504 	int i, alloced;
1505 
1506 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1507 	     alloced < num_tqps; i++) {
1508 		if (!hdev->htqp[i].alloced) {
1509 			hdev->htqp[i].q.handle = &vport->nic;
1510 			hdev->htqp[i].q.tqp_index = alloced;
1511 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1512 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1513 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1514 			hdev->htqp[i].alloced = true;
1515 			alloced++;
1516 		}
1517 	}
1518 	vport->alloc_tqps = alloced;
1519 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1520 				vport->alloc_tqps / hdev->tm_info.num_tc);
1521 
1522 	/* ensure one to one mapping between irq and queue at default */
1523 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1524 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1525 
1526 	return 0;
1527 }
1528 
1529 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1530 			    u16 num_tx_desc, u16 num_rx_desc)
1531 
1532 {
1533 	struct hnae3_handle *nic = &vport->nic;
1534 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1535 	struct hclge_dev *hdev = vport->back;
1536 	int ret;
1537 
1538 	kinfo->num_tx_desc = num_tx_desc;
1539 	kinfo->num_rx_desc = num_rx_desc;
1540 
1541 	kinfo->rx_buf_len = hdev->rx_buf_len;
1542 
1543 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1544 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1545 	if (!kinfo->tqp)
1546 		return -ENOMEM;
1547 
1548 	ret = hclge_assign_tqp(vport, num_tqps);
1549 	if (ret)
1550 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1551 
1552 	return ret;
1553 }
1554 
1555 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1556 				  struct hclge_vport *vport)
1557 {
1558 	struct hnae3_handle *nic = &vport->nic;
1559 	struct hnae3_knic_private_info *kinfo;
1560 	u16 i;
1561 
1562 	kinfo = &nic->kinfo;
1563 	for (i = 0; i < vport->alloc_tqps; i++) {
1564 		struct hclge_tqp *q =
1565 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1566 		bool is_pf;
1567 		int ret;
1568 
1569 		is_pf = !(vport->vport_id);
1570 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1571 					     i, is_pf);
1572 		if (ret)
1573 			return ret;
1574 	}
1575 
1576 	return 0;
1577 }
1578 
1579 static int hclge_map_tqp(struct hclge_dev *hdev)
1580 {
1581 	struct hclge_vport *vport = hdev->vport;
1582 	u16 i, num_vport;
1583 
1584 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1585 	for (i = 0; i < num_vport; i++)	{
1586 		int ret;
1587 
1588 		ret = hclge_map_tqp_to_vport(hdev, vport);
1589 		if (ret)
1590 			return ret;
1591 
1592 		vport++;
1593 	}
1594 
1595 	return 0;
1596 }
1597 
1598 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1599 {
1600 	struct hnae3_handle *nic = &vport->nic;
1601 	struct hclge_dev *hdev = vport->back;
1602 	int ret;
1603 
1604 	nic->pdev = hdev->pdev;
1605 	nic->ae_algo = &ae_algo;
1606 	nic->numa_node_mask = hdev->numa_node_mask;
1607 
1608 	ret = hclge_knic_setup(vport, num_tqps,
1609 			       hdev->num_tx_desc, hdev->num_rx_desc);
1610 	if (ret)
1611 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1612 
1613 	return ret;
1614 }
1615 
1616 static int hclge_alloc_vport(struct hclge_dev *hdev)
1617 {
1618 	struct pci_dev *pdev = hdev->pdev;
1619 	struct hclge_vport *vport;
1620 	u32 tqp_main_vport;
1621 	u32 tqp_per_vport;
1622 	int num_vport, i;
1623 	int ret;
1624 
1625 	/* We need to alloc a vport for main NIC of PF */
1626 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1627 
1628 	if (hdev->num_tqps < num_vport) {
1629 		dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1630 			hdev->num_tqps, num_vport);
1631 		return -EINVAL;
1632 	}
1633 
1634 	/* Alloc the same number of TQPs for every vport */
1635 	tqp_per_vport = hdev->num_tqps / num_vport;
1636 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1637 
1638 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1639 			     GFP_KERNEL);
1640 	if (!vport)
1641 		return -ENOMEM;
1642 
1643 	hdev->vport = vport;
1644 	hdev->num_alloc_vport = num_vport;
1645 
1646 	if (IS_ENABLED(CONFIG_PCI_IOV))
1647 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1648 
1649 	for (i = 0; i < num_vport; i++) {
1650 		vport->back = hdev;
1651 		vport->vport_id = i;
1652 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1653 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1654 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1655 		INIT_LIST_HEAD(&vport->vlan_list);
1656 		INIT_LIST_HEAD(&vport->uc_mac_list);
1657 		INIT_LIST_HEAD(&vport->mc_mac_list);
1658 
1659 		if (i == 0)
1660 			ret = hclge_vport_setup(vport, tqp_main_vport);
1661 		else
1662 			ret = hclge_vport_setup(vport, tqp_per_vport);
1663 		if (ret) {
1664 			dev_err(&pdev->dev,
1665 				"vport setup failed for vport %d, %d\n",
1666 				i, ret);
1667 			return ret;
1668 		}
1669 
1670 		vport++;
1671 	}
1672 
1673 	return 0;
1674 }
1675 
1676 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1677 				    struct hclge_pkt_buf_alloc *buf_alloc)
1678 {
1679 /* TX buffer size is unit by 128 byte */
1680 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1681 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1682 	struct hclge_tx_buff_alloc_cmd *req;
1683 	struct hclge_desc desc;
1684 	int ret;
1685 	u8 i;
1686 
1687 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1688 
1689 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1690 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1691 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1692 
1693 		req->tx_pkt_buff[i] =
1694 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1695 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1696 	}
1697 
1698 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1699 	if (ret)
1700 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1701 			ret);
1702 
1703 	return ret;
1704 }
1705 
1706 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1707 				 struct hclge_pkt_buf_alloc *buf_alloc)
1708 {
1709 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1710 
1711 	if (ret)
1712 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1713 
1714 	return ret;
1715 }
1716 
1717 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1718 {
1719 	unsigned int i;
1720 	u32 cnt = 0;
1721 
1722 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1723 		if (hdev->hw_tc_map & BIT(i))
1724 			cnt++;
1725 	return cnt;
1726 }
1727 
1728 /* Get the number of pfc enabled TCs, which have private buffer */
1729 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1730 				  struct hclge_pkt_buf_alloc *buf_alloc)
1731 {
1732 	struct hclge_priv_buf *priv;
1733 	unsigned int i;
1734 	int cnt = 0;
1735 
1736 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1737 		priv = &buf_alloc->priv_buf[i];
1738 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1739 		    priv->enable)
1740 			cnt++;
1741 	}
1742 
1743 	return cnt;
1744 }
1745 
1746 /* Get the number of pfc disabled TCs, which have private buffer */
1747 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1748 				     struct hclge_pkt_buf_alloc *buf_alloc)
1749 {
1750 	struct hclge_priv_buf *priv;
1751 	unsigned int i;
1752 	int cnt = 0;
1753 
1754 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1755 		priv = &buf_alloc->priv_buf[i];
1756 		if (hdev->hw_tc_map & BIT(i) &&
1757 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1758 		    priv->enable)
1759 			cnt++;
1760 	}
1761 
1762 	return cnt;
1763 }
1764 
1765 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1766 {
1767 	struct hclge_priv_buf *priv;
1768 	u32 rx_priv = 0;
1769 	int i;
1770 
1771 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1772 		priv = &buf_alloc->priv_buf[i];
1773 		if (priv->enable)
1774 			rx_priv += priv->buf_size;
1775 	}
1776 	return rx_priv;
1777 }
1778 
1779 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1780 {
1781 	u32 i, total_tx_size = 0;
1782 
1783 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1784 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1785 
1786 	return total_tx_size;
1787 }
1788 
1789 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1790 				struct hclge_pkt_buf_alloc *buf_alloc,
1791 				u32 rx_all)
1792 {
1793 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1794 	u32 tc_num = hclge_get_tc_num(hdev);
1795 	u32 shared_buf, aligned_mps;
1796 	u32 rx_priv;
1797 	int i;
1798 
1799 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1800 
1801 	if (hnae3_dev_dcb_supported(hdev))
1802 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1803 					hdev->dv_buf_size;
1804 	else
1805 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1806 					+ hdev->dv_buf_size;
1807 
1808 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1809 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1810 			     HCLGE_BUF_SIZE_UNIT);
1811 
1812 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1813 	if (rx_all < rx_priv + shared_std)
1814 		return false;
1815 
1816 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1817 	buf_alloc->s_buf.buf_size = shared_buf;
1818 	if (hnae3_dev_dcb_supported(hdev)) {
1819 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1820 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1821 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1822 				  HCLGE_BUF_SIZE_UNIT);
1823 	} else {
1824 		buf_alloc->s_buf.self.high = aligned_mps +
1825 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1826 		buf_alloc->s_buf.self.low = aligned_mps;
1827 	}
1828 
1829 	if (hnae3_dev_dcb_supported(hdev)) {
1830 		hi_thrd = shared_buf - hdev->dv_buf_size;
1831 
1832 		if (tc_num <= NEED_RESERVE_TC_NUM)
1833 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1834 					/ BUF_MAX_PERCENT;
1835 
1836 		if (tc_num)
1837 			hi_thrd = hi_thrd / tc_num;
1838 
1839 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1840 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1841 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1842 	} else {
1843 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1844 		lo_thrd = aligned_mps;
1845 	}
1846 
1847 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1848 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1849 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1850 	}
1851 
1852 	return true;
1853 }
1854 
1855 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1856 				struct hclge_pkt_buf_alloc *buf_alloc)
1857 {
1858 	u32 i, total_size;
1859 
1860 	total_size = hdev->pkt_buf_size;
1861 
1862 	/* alloc tx buffer for all enabled tc */
1863 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1864 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1865 
1866 		if (hdev->hw_tc_map & BIT(i)) {
1867 			if (total_size < hdev->tx_buf_size)
1868 				return -ENOMEM;
1869 
1870 			priv->tx_buf_size = hdev->tx_buf_size;
1871 		} else {
1872 			priv->tx_buf_size = 0;
1873 		}
1874 
1875 		total_size -= priv->tx_buf_size;
1876 	}
1877 
1878 	return 0;
1879 }
1880 
1881 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1882 				  struct hclge_pkt_buf_alloc *buf_alloc)
1883 {
1884 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1885 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1886 	unsigned int i;
1887 
1888 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1889 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1890 
1891 		priv->enable = 0;
1892 		priv->wl.low = 0;
1893 		priv->wl.high = 0;
1894 		priv->buf_size = 0;
1895 
1896 		if (!(hdev->hw_tc_map & BIT(i)))
1897 			continue;
1898 
1899 		priv->enable = 1;
1900 
1901 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1902 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1903 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
1904 						HCLGE_BUF_SIZE_UNIT);
1905 		} else {
1906 			priv->wl.low = 0;
1907 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1908 					aligned_mps;
1909 		}
1910 
1911 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1912 	}
1913 
1914 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1915 }
1916 
1917 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1918 					  struct hclge_pkt_buf_alloc *buf_alloc)
1919 {
1920 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1921 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1922 	int i;
1923 
1924 	/* let the last to be cleared first */
1925 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1926 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1927 		unsigned int mask = BIT((unsigned int)i);
1928 
1929 		if (hdev->hw_tc_map & mask &&
1930 		    !(hdev->tm_info.hw_pfc_map & mask)) {
1931 			/* Clear the no pfc TC private buffer */
1932 			priv->wl.low = 0;
1933 			priv->wl.high = 0;
1934 			priv->buf_size = 0;
1935 			priv->enable = 0;
1936 			no_pfc_priv_num--;
1937 		}
1938 
1939 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1940 		    no_pfc_priv_num == 0)
1941 			break;
1942 	}
1943 
1944 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1945 }
1946 
1947 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1948 					struct hclge_pkt_buf_alloc *buf_alloc)
1949 {
1950 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1951 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1952 	int i;
1953 
1954 	/* let the last to be cleared first */
1955 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1956 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1957 		unsigned int mask = BIT((unsigned int)i);
1958 
1959 		if (hdev->hw_tc_map & mask &&
1960 		    hdev->tm_info.hw_pfc_map & mask) {
1961 			/* Reduce the number of pfc TC with private buffer */
1962 			priv->wl.low = 0;
1963 			priv->enable = 0;
1964 			priv->wl.high = 0;
1965 			priv->buf_size = 0;
1966 			pfc_priv_num--;
1967 		}
1968 
1969 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1970 		    pfc_priv_num == 0)
1971 			break;
1972 	}
1973 
1974 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1975 }
1976 
1977 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
1978 				      struct hclge_pkt_buf_alloc *buf_alloc)
1979 {
1980 #define COMPENSATE_BUFFER	0x3C00
1981 #define COMPENSATE_HALF_MPS_NUM	5
1982 #define PRIV_WL_GAP		0x1800
1983 
1984 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1985 	u32 tc_num = hclge_get_tc_num(hdev);
1986 	u32 half_mps = hdev->mps >> 1;
1987 	u32 min_rx_priv;
1988 	unsigned int i;
1989 
1990 	if (tc_num)
1991 		rx_priv = rx_priv / tc_num;
1992 
1993 	if (tc_num <= NEED_RESERVE_TC_NUM)
1994 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
1995 
1996 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
1997 			COMPENSATE_HALF_MPS_NUM * half_mps;
1998 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
1999 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2000 
2001 	if (rx_priv < min_rx_priv)
2002 		return false;
2003 
2004 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2005 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2006 
2007 		priv->enable = 0;
2008 		priv->wl.low = 0;
2009 		priv->wl.high = 0;
2010 		priv->buf_size = 0;
2011 
2012 		if (!(hdev->hw_tc_map & BIT(i)))
2013 			continue;
2014 
2015 		priv->enable = 1;
2016 		priv->buf_size = rx_priv;
2017 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2018 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2019 	}
2020 
2021 	buf_alloc->s_buf.buf_size = 0;
2022 
2023 	return true;
2024 }
2025 
2026 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2027  * @hdev: pointer to struct hclge_dev
2028  * @buf_alloc: pointer to buffer calculation data
2029  * @return: 0: calculate sucessful, negative: fail
2030  */
2031 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2032 				struct hclge_pkt_buf_alloc *buf_alloc)
2033 {
2034 	/* When DCB is not supported, rx private buffer is not allocated. */
2035 	if (!hnae3_dev_dcb_supported(hdev)) {
2036 		u32 rx_all = hdev->pkt_buf_size;
2037 
2038 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2039 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2040 			return -ENOMEM;
2041 
2042 		return 0;
2043 	}
2044 
2045 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2046 		return 0;
2047 
2048 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2049 		return 0;
2050 
2051 	/* try to decrease the buffer size */
2052 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2053 		return 0;
2054 
2055 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2056 		return 0;
2057 
2058 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2059 		return 0;
2060 
2061 	return -ENOMEM;
2062 }
2063 
2064 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2065 				   struct hclge_pkt_buf_alloc *buf_alloc)
2066 {
2067 	struct hclge_rx_priv_buff_cmd *req;
2068 	struct hclge_desc desc;
2069 	int ret;
2070 	int i;
2071 
2072 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2073 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2074 
2075 	/* Alloc private buffer TCs */
2076 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2077 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2078 
2079 		req->buf_num[i] =
2080 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2081 		req->buf_num[i] |=
2082 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2083 	}
2084 
2085 	req->shared_buf =
2086 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2087 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2088 
2089 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2090 	if (ret)
2091 		dev_err(&hdev->pdev->dev,
2092 			"rx private buffer alloc cmd failed %d\n", ret);
2093 
2094 	return ret;
2095 }
2096 
2097 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2098 				   struct hclge_pkt_buf_alloc *buf_alloc)
2099 {
2100 	struct hclge_rx_priv_wl_buf *req;
2101 	struct hclge_priv_buf *priv;
2102 	struct hclge_desc desc[2];
2103 	int i, j;
2104 	int ret;
2105 
2106 	for (i = 0; i < 2; i++) {
2107 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2108 					   false);
2109 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2110 
2111 		/* The first descriptor set the NEXT bit to 1 */
2112 		if (i == 0)
2113 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2114 		else
2115 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2116 
2117 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2118 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2119 
2120 			priv = &buf_alloc->priv_buf[idx];
2121 			req->tc_wl[j].high =
2122 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2123 			req->tc_wl[j].high |=
2124 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2125 			req->tc_wl[j].low =
2126 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2127 			req->tc_wl[j].low |=
2128 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2129 		}
2130 	}
2131 
2132 	/* Send 2 descriptor at one time */
2133 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2134 	if (ret)
2135 		dev_err(&hdev->pdev->dev,
2136 			"rx private waterline config cmd failed %d\n",
2137 			ret);
2138 	return ret;
2139 }
2140 
2141 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2142 				    struct hclge_pkt_buf_alloc *buf_alloc)
2143 {
2144 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2145 	struct hclge_rx_com_thrd *req;
2146 	struct hclge_desc desc[2];
2147 	struct hclge_tc_thrd *tc;
2148 	int i, j;
2149 	int ret;
2150 
2151 	for (i = 0; i < 2; i++) {
2152 		hclge_cmd_setup_basic_desc(&desc[i],
2153 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2154 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2155 
2156 		/* The first descriptor set the NEXT bit to 1 */
2157 		if (i == 0)
2158 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2159 		else
2160 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2161 
2162 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2163 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2164 
2165 			req->com_thrd[j].high =
2166 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2167 			req->com_thrd[j].high |=
2168 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2169 			req->com_thrd[j].low =
2170 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2171 			req->com_thrd[j].low |=
2172 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2173 		}
2174 	}
2175 
2176 	/* Send 2 descriptors at one time */
2177 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2178 	if (ret)
2179 		dev_err(&hdev->pdev->dev,
2180 			"common threshold config cmd failed %d\n", ret);
2181 	return ret;
2182 }
2183 
2184 static int hclge_common_wl_config(struct hclge_dev *hdev,
2185 				  struct hclge_pkt_buf_alloc *buf_alloc)
2186 {
2187 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2188 	struct hclge_rx_com_wl *req;
2189 	struct hclge_desc desc;
2190 	int ret;
2191 
2192 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2193 
2194 	req = (struct hclge_rx_com_wl *)desc.data;
2195 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2196 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2197 
2198 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2199 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2200 
2201 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2202 	if (ret)
2203 		dev_err(&hdev->pdev->dev,
2204 			"common waterline config cmd failed %d\n", ret);
2205 
2206 	return ret;
2207 }
2208 
2209 int hclge_buffer_alloc(struct hclge_dev *hdev)
2210 {
2211 	struct hclge_pkt_buf_alloc *pkt_buf;
2212 	int ret;
2213 
2214 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2215 	if (!pkt_buf)
2216 		return -ENOMEM;
2217 
2218 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2219 	if (ret) {
2220 		dev_err(&hdev->pdev->dev,
2221 			"could not calc tx buffer size for all TCs %d\n", ret);
2222 		goto out;
2223 	}
2224 
2225 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2226 	if (ret) {
2227 		dev_err(&hdev->pdev->dev,
2228 			"could not alloc tx buffers %d\n", ret);
2229 		goto out;
2230 	}
2231 
2232 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2233 	if (ret) {
2234 		dev_err(&hdev->pdev->dev,
2235 			"could not calc rx priv buffer size for all TCs %d\n",
2236 			ret);
2237 		goto out;
2238 	}
2239 
2240 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2241 	if (ret) {
2242 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2243 			ret);
2244 		goto out;
2245 	}
2246 
2247 	if (hnae3_dev_dcb_supported(hdev)) {
2248 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2249 		if (ret) {
2250 			dev_err(&hdev->pdev->dev,
2251 				"could not configure rx private waterline %d\n",
2252 				ret);
2253 			goto out;
2254 		}
2255 
2256 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2257 		if (ret) {
2258 			dev_err(&hdev->pdev->dev,
2259 				"could not configure common threshold %d\n",
2260 				ret);
2261 			goto out;
2262 		}
2263 	}
2264 
2265 	ret = hclge_common_wl_config(hdev, pkt_buf);
2266 	if (ret)
2267 		dev_err(&hdev->pdev->dev,
2268 			"could not configure common waterline %d\n", ret);
2269 
2270 out:
2271 	kfree(pkt_buf);
2272 	return ret;
2273 }
2274 
2275 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2276 {
2277 	struct hnae3_handle *roce = &vport->roce;
2278 	struct hnae3_handle *nic = &vport->nic;
2279 
2280 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2281 
2282 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2283 	    vport->back->num_msi_left == 0)
2284 		return -EINVAL;
2285 
2286 	roce->rinfo.base_vector = vport->back->roce_base_vector;
2287 
2288 	roce->rinfo.netdev = nic->kinfo.netdev;
2289 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
2290 
2291 	roce->pdev = nic->pdev;
2292 	roce->ae_algo = nic->ae_algo;
2293 	roce->numa_node_mask = nic->numa_node_mask;
2294 
2295 	return 0;
2296 }
2297 
2298 static int hclge_init_msi(struct hclge_dev *hdev)
2299 {
2300 	struct pci_dev *pdev = hdev->pdev;
2301 	int vectors;
2302 	int i;
2303 
2304 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2305 					hdev->num_msi,
2306 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2307 	if (vectors < 0) {
2308 		dev_err(&pdev->dev,
2309 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2310 			vectors);
2311 		return vectors;
2312 	}
2313 	if (vectors < hdev->num_msi)
2314 		dev_warn(&hdev->pdev->dev,
2315 			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2316 			 hdev->num_msi, vectors);
2317 
2318 	hdev->num_msi = vectors;
2319 	hdev->num_msi_left = vectors;
2320 
2321 	hdev->base_msi_vector = pdev->irq;
2322 	hdev->roce_base_vector = hdev->base_msi_vector +
2323 				hdev->roce_base_msix_offset;
2324 
2325 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2326 					   sizeof(u16), GFP_KERNEL);
2327 	if (!hdev->vector_status) {
2328 		pci_free_irq_vectors(pdev);
2329 		return -ENOMEM;
2330 	}
2331 
2332 	for (i = 0; i < hdev->num_msi; i++)
2333 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2334 
2335 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2336 					sizeof(int), GFP_KERNEL);
2337 	if (!hdev->vector_irq) {
2338 		pci_free_irq_vectors(pdev);
2339 		return -ENOMEM;
2340 	}
2341 
2342 	return 0;
2343 }
2344 
2345 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2346 {
2347 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2348 		duplex = HCLGE_MAC_FULL;
2349 
2350 	return duplex;
2351 }
2352 
2353 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2354 				      u8 duplex)
2355 {
2356 	struct hclge_config_mac_speed_dup_cmd *req;
2357 	struct hclge_desc desc;
2358 	int ret;
2359 
2360 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2361 
2362 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2363 
2364 	if (duplex)
2365 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2366 
2367 	switch (speed) {
2368 	case HCLGE_MAC_SPEED_10M:
2369 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2370 				HCLGE_CFG_SPEED_S, 6);
2371 		break;
2372 	case HCLGE_MAC_SPEED_100M:
2373 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2374 				HCLGE_CFG_SPEED_S, 7);
2375 		break;
2376 	case HCLGE_MAC_SPEED_1G:
2377 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2378 				HCLGE_CFG_SPEED_S, 0);
2379 		break;
2380 	case HCLGE_MAC_SPEED_10G:
2381 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2382 				HCLGE_CFG_SPEED_S, 1);
2383 		break;
2384 	case HCLGE_MAC_SPEED_25G:
2385 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2386 				HCLGE_CFG_SPEED_S, 2);
2387 		break;
2388 	case HCLGE_MAC_SPEED_40G:
2389 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2390 				HCLGE_CFG_SPEED_S, 3);
2391 		break;
2392 	case HCLGE_MAC_SPEED_50G:
2393 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2394 				HCLGE_CFG_SPEED_S, 4);
2395 		break;
2396 	case HCLGE_MAC_SPEED_100G:
2397 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2398 				HCLGE_CFG_SPEED_S, 5);
2399 		break;
2400 	default:
2401 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2402 		return -EINVAL;
2403 	}
2404 
2405 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2406 		      1);
2407 
2408 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2409 	if (ret) {
2410 		dev_err(&hdev->pdev->dev,
2411 			"mac speed/duplex config cmd failed %d.\n", ret);
2412 		return ret;
2413 	}
2414 
2415 	return 0;
2416 }
2417 
2418 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2419 {
2420 	int ret;
2421 
2422 	duplex = hclge_check_speed_dup(duplex, speed);
2423 	if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2424 		return 0;
2425 
2426 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2427 	if (ret)
2428 		return ret;
2429 
2430 	hdev->hw.mac.speed = speed;
2431 	hdev->hw.mac.duplex = duplex;
2432 
2433 	return 0;
2434 }
2435 
2436 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2437 				     u8 duplex)
2438 {
2439 	struct hclge_vport *vport = hclge_get_vport(handle);
2440 	struct hclge_dev *hdev = vport->back;
2441 
2442 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2443 }
2444 
2445 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2446 {
2447 	struct hclge_config_auto_neg_cmd *req;
2448 	struct hclge_desc desc;
2449 	u32 flag = 0;
2450 	int ret;
2451 
2452 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2453 
2454 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2455 	if (enable)
2456 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2457 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2458 
2459 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2460 	if (ret)
2461 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2462 			ret);
2463 
2464 	return ret;
2465 }
2466 
2467 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2468 {
2469 	struct hclge_vport *vport = hclge_get_vport(handle);
2470 	struct hclge_dev *hdev = vport->back;
2471 
2472 	if (!hdev->hw.mac.support_autoneg) {
2473 		if (enable) {
2474 			dev_err(&hdev->pdev->dev,
2475 				"autoneg is not supported by current port\n");
2476 			return -EOPNOTSUPP;
2477 		} else {
2478 			return 0;
2479 		}
2480 	}
2481 
2482 	return hclge_set_autoneg_en(hdev, enable);
2483 }
2484 
2485 static int hclge_get_autoneg(struct hnae3_handle *handle)
2486 {
2487 	struct hclge_vport *vport = hclge_get_vport(handle);
2488 	struct hclge_dev *hdev = vport->back;
2489 	struct phy_device *phydev = hdev->hw.mac.phydev;
2490 
2491 	if (phydev)
2492 		return phydev->autoneg;
2493 
2494 	return hdev->hw.mac.autoneg;
2495 }
2496 
2497 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2498 {
2499 	struct hclge_vport *vport = hclge_get_vport(handle);
2500 	struct hclge_dev *hdev = vport->back;
2501 	int ret;
2502 
2503 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2504 
2505 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2506 	if (ret)
2507 		return ret;
2508 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2509 }
2510 
2511 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2512 {
2513 	struct hclge_vport *vport = hclge_get_vport(handle);
2514 	struct hclge_dev *hdev = vport->back;
2515 
2516 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2517 		return hclge_set_autoneg_en(hdev, !halt);
2518 
2519 	return 0;
2520 }
2521 
2522 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2523 {
2524 	struct hclge_config_fec_cmd *req;
2525 	struct hclge_desc desc;
2526 	int ret;
2527 
2528 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2529 
2530 	req = (struct hclge_config_fec_cmd *)desc.data;
2531 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2532 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2533 	if (fec_mode & BIT(HNAE3_FEC_RS))
2534 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2535 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2536 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2537 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2538 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2539 
2540 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2541 	if (ret)
2542 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2543 
2544 	return ret;
2545 }
2546 
2547 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2548 {
2549 	struct hclge_vport *vport = hclge_get_vport(handle);
2550 	struct hclge_dev *hdev = vport->back;
2551 	struct hclge_mac *mac = &hdev->hw.mac;
2552 	int ret;
2553 
2554 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2555 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2556 		return -EINVAL;
2557 	}
2558 
2559 	ret = hclge_set_fec_hw(hdev, fec_mode);
2560 	if (ret)
2561 		return ret;
2562 
2563 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2564 	return 0;
2565 }
2566 
2567 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2568 			  u8 *fec_mode)
2569 {
2570 	struct hclge_vport *vport = hclge_get_vport(handle);
2571 	struct hclge_dev *hdev = vport->back;
2572 	struct hclge_mac *mac = &hdev->hw.mac;
2573 
2574 	if (fec_ability)
2575 		*fec_ability = mac->fec_ability;
2576 	if (fec_mode)
2577 		*fec_mode = mac->fec_mode;
2578 }
2579 
2580 static int hclge_mac_init(struct hclge_dev *hdev)
2581 {
2582 	struct hclge_mac *mac = &hdev->hw.mac;
2583 	int ret;
2584 
2585 	hdev->support_sfp_query = true;
2586 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2587 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2588 					 hdev->hw.mac.duplex);
2589 	if (ret) {
2590 		dev_err(&hdev->pdev->dev,
2591 			"Config mac speed dup fail ret=%d\n", ret);
2592 		return ret;
2593 	}
2594 
2595 	if (hdev->hw.mac.support_autoneg) {
2596 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2597 		if (ret) {
2598 			dev_err(&hdev->pdev->dev,
2599 				"Config mac autoneg fail ret=%d\n", ret);
2600 			return ret;
2601 		}
2602 	}
2603 
2604 	mac->link = 0;
2605 
2606 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2607 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2608 		if (ret) {
2609 			dev_err(&hdev->pdev->dev,
2610 				"Fec mode init fail, ret = %d\n", ret);
2611 			return ret;
2612 		}
2613 	}
2614 
2615 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2616 	if (ret) {
2617 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2618 		return ret;
2619 	}
2620 
2621 	ret = hclge_set_default_loopback(hdev);
2622 	if (ret)
2623 		return ret;
2624 
2625 	ret = hclge_buffer_alloc(hdev);
2626 	if (ret)
2627 		dev_err(&hdev->pdev->dev,
2628 			"allocate buffer fail, ret=%d\n", ret);
2629 
2630 	return ret;
2631 }
2632 
2633 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2634 {
2635 	if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2636 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2637 		queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2638 			      &hdev->mbx_service_task);
2639 }
2640 
2641 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2642 {
2643 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2644 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2645 		queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2646 			      &hdev->rst_service_task);
2647 }
2648 
2649 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2650 {
2651 	if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2652 	    !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2653 	    !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
2654 		hdev->hw_stats.stats_timer++;
2655 		hdev->fd_arfs_expire_timer++;
2656 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2657 				    system_wq, &hdev->service_task,
2658 				    delay_time);
2659 	}
2660 }
2661 
2662 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2663 {
2664 	struct hclge_link_status_cmd *req;
2665 	struct hclge_desc desc;
2666 	int link_status;
2667 	int ret;
2668 
2669 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2670 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2671 	if (ret) {
2672 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2673 			ret);
2674 		return ret;
2675 	}
2676 
2677 	req = (struct hclge_link_status_cmd *)desc.data;
2678 	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2679 
2680 	return !!link_status;
2681 }
2682 
2683 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2684 {
2685 	unsigned int mac_state;
2686 	int link_stat;
2687 
2688 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2689 		return 0;
2690 
2691 	mac_state = hclge_get_mac_link_status(hdev);
2692 
2693 	if (hdev->hw.mac.phydev) {
2694 		if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2695 			link_stat = mac_state &
2696 				hdev->hw.mac.phydev->link;
2697 		else
2698 			link_stat = 0;
2699 
2700 	} else {
2701 		link_stat = mac_state;
2702 	}
2703 
2704 	return !!link_stat;
2705 }
2706 
2707 static void hclge_update_link_status(struct hclge_dev *hdev)
2708 {
2709 	struct hnae3_client *rclient = hdev->roce_client;
2710 	struct hnae3_client *client = hdev->nic_client;
2711 	struct hnae3_handle *rhandle;
2712 	struct hnae3_handle *handle;
2713 	int state;
2714 	int i;
2715 
2716 	if (!client)
2717 		return;
2718 	state = hclge_get_mac_phy_link(hdev);
2719 	if (state != hdev->hw.mac.link) {
2720 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2721 			handle = &hdev->vport[i].nic;
2722 			client->ops->link_status_change(handle, state);
2723 			hclge_config_mac_tnl_int(hdev, state);
2724 			rhandle = &hdev->vport[i].roce;
2725 			if (rclient && rclient->ops->link_status_change)
2726 				rclient->ops->link_status_change(rhandle,
2727 								 state);
2728 		}
2729 		hdev->hw.mac.link = state;
2730 	}
2731 }
2732 
2733 static void hclge_update_port_capability(struct hclge_mac *mac)
2734 {
2735 	/* update fec ability by speed */
2736 	hclge_convert_setting_fec(mac);
2737 
2738 	/* firmware can not identify back plane type, the media type
2739 	 * read from configuration can help deal it
2740 	 */
2741 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2742 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2743 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2744 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2745 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2746 
2747 	if (mac->support_autoneg == true) {
2748 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2749 		linkmode_copy(mac->advertising, mac->supported);
2750 	} else {
2751 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2752 				   mac->supported);
2753 		linkmode_zero(mac->advertising);
2754 	}
2755 }
2756 
2757 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2758 {
2759 	struct hclge_sfp_info_cmd *resp;
2760 	struct hclge_desc desc;
2761 	int ret;
2762 
2763 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2764 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2765 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2766 	if (ret == -EOPNOTSUPP) {
2767 		dev_warn(&hdev->pdev->dev,
2768 			 "IMP do not support get SFP speed %d\n", ret);
2769 		return ret;
2770 	} else if (ret) {
2771 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2772 		return ret;
2773 	}
2774 
2775 	*speed = le32_to_cpu(resp->speed);
2776 
2777 	return 0;
2778 }
2779 
2780 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2781 {
2782 	struct hclge_sfp_info_cmd *resp;
2783 	struct hclge_desc desc;
2784 	int ret;
2785 
2786 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2787 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2788 
2789 	resp->query_type = QUERY_ACTIVE_SPEED;
2790 
2791 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2792 	if (ret == -EOPNOTSUPP) {
2793 		dev_warn(&hdev->pdev->dev,
2794 			 "IMP does not support get SFP info %d\n", ret);
2795 		return ret;
2796 	} else if (ret) {
2797 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2798 		return ret;
2799 	}
2800 
2801 	mac->speed = le32_to_cpu(resp->speed);
2802 	/* if resp->speed_ability is 0, it means it's an old version
2803 	 * firmware, do not update these params
2804 	 */
2805 	if (resp->speed_ability) {
2806 		mac->module_type = le32_to_cpu(resp->module_type);
2807 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
2808 		mac->autoneg = resp->autoneg;
2809 		mac->support_autoneg = resp->autoneg_ability;
2810 		mac->speed_type = QUERY_ACTIVE_SPEED;
2811 		if (!resp->active_fec)
2812 			mac->fec_mode = 0;
2813 		else
2814 			mac->fec_mode = BIT(resp->active_fec);
2815 	} else {
2816 		mac->speed_type = QUERY_SFP_SPEED;
2817 	}
2818 
2819 	return 0;
2820 }
2821 
2822 static int hclge_update_port_info(struct hclge_dev *hdev)
2823 {
2824 	struct hclge_mac *mac = &hdev->hw.mac;
2825 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
2826 	int ret;
2827 
2828 	/* get the port info from SFP cmd if not copper port */
2829 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2830 		return 0;
2831 
2832 	/* if IMP does not support get SFP/qSFP info, return directly */
2833 	if (!hdev->support_sfp_query)
2834 		return 0;
2835 
2836 	if (hdev->pdev->revision >= 0x21)
2837 		ret = hclge_get_sfp_info(hdev, mac);
2838 	else
2839 		ret = hclge_get_sfp_speed(hdev, &speed);
2840 
2841 	if (ret == -EOPNOTSUPP) {
2842 		hdev->support_sfp_query = false;
2843 		return ret;
2844 	} else if (ret) {
2845 		return ret;
2846 	}
2847 
2848 	if (hdev->pdev->revision >= 0x21) {
2849 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2850 			hclge_update_port_capability(mac);
2851 			return 0;
2852 		}
2853 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2854 					       HCLGE_MAC_FULL);
2855 	} else {
2856 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2857 			return 0; /* do nothing if no SFP */
2858 
2859 		/* must config full duplex for SFP */
2860 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2861 	}
2862 }
2863 
2864 static int hclge_get_status(struct hnae3_handle *handle)
2865 {
2866 	struct hclge_vport *vport = hclge_get_vport(handle);
2867 	struct hclge_dev *hdev = vport->back;
2868 
2869 	hclge_update_link_status(hdev);
2870 
2871 	return hdev->hw.mac.link;
2872 }
2873 
2874 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2875 {
2876 	u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2877 
2878 	/* fetch the events from their corresponding regs */
2879 	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2880 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2881 	msix_src_reg = hclge_read_dev(&hdev->hw,
2882 				      HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2883 
2884 	/* Assumption: If by any chance reset and mailbox events are reported
2885 	 * together then we will only process reset event in this go and will
2886 	 * defer the processing of the mailbox events. Since, we would have not
2887 	 * cleared RX CMDQ event this time we would receive again another
2888 	 * interrupt from H/W just for the mailbox.
2889 	 *
2890 	 * check for vector0 reset event sources
2891 	 */
2892 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2893 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2894 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2895 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2896 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2897 		hdev->rst_stats.imp_rst_cnt++;
2898 		return HCLGE_VECTOR0_EVENT_RST;
2899 	}
2900 
2901 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2902 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2903 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2904 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2905 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2906 		hdev->rst_stats.global_rst_cnt++;
2907 		return HCLGE_VECTOR0_EVENT_RST;
2908 	}
2909 
2910 	/* check for vector0 msix event source */
2911 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2912 		dev_info(&hdev->pdev->dev, "received event 0x%x\n",
2913 			 msix_src_reg);
2914 		*clearval = msix_src_reg;
2915 		return HCLGE_VECTOR0_EVENT_ERR;
2916 	}
2917 
2918 	/* check for vector0 mailbox(=CMDQ RX) event source */
2919 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2920 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2921 		*clearval = cmdq_src_reg;
2922 		return HCLGE_VECTOR0_EVENT_MBX;
2923 	}
2924 
2925 	/* print other vector0 event source */
2926 	dev_info(&hdev->pdev->dev,
2927 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
2928 		 cmdq_src_reg, msix_src_reg);
2929 	*clearval = msix_src_reg;
2930 
2931 	return HCLGE_VECTOR0_EVENT_OTHER;
2932 }
2933 
2934 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2935 				    u32 regclr)
2936 {
2937 	switch (event_type) {
2938 	case HCLGE_VECTOR0_EVENT_RST:
2939 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2940 		break;
2941 	case HCLGE_VECTOR0_EVENT_MBX:
2942 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2943 		break;
2944 	default:
2945 		break;
2946 	}
2947 }
2948 
2949 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2950 {
2951 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2952 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2953 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2954 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2955 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2956 }
2957 
2958 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2959 {
2960 	writel(enable ? 1 : 0, vector->addr);
2961 }
2962 
2963 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2964 {
2965 	struct hclge_dev *hdev = data;
2966 	u32 clearval = 0;
2967 	u32 event_cause;
2968 
2969 	hclge_enable_vector(&hdev->misc_vector, false);
2970 	event_cause = hclge_check_event_cause(hdev, &clearval);
2971 
2972 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
2973 	switch (event_cause) {
2974 	case HCLGE_VECTOR0_EVENT_ERR:
2975 		/* we do not know what type of reset is required now. This could
2976 		 * only be decided after we fetch the type of errors which
2977 		 * caused this event. Therefore, we will do below for now:
2978 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2979 		 *    have defered type of reset to be used.
2980 		 * 2. Schedule the reset serivce task.
2981 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2982 		 *    will fetch the correct type of reset.  This would be done
2983 		 *    by first decoding the types of errors.
2984 		 */
2985 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2986 		/* fall through */
2987 	case HCLGE_VECTOR0_EVENT_RST:
2988 		hclge_reset_task_schedule(hdev);
2989 		break;
2990 	case HCLGE_VECTOR0_EVENT_MBX:
2991 		/* If we are here then,
2992 		 * 1. Either we are not handling any mbx task and we are not
2993 		 *    scheduled as well
2994 		 *                        OR
2995 		 * 2. We could be handling a mbx task but nothing more is
2996 		 *    scheduled.
2997 		 * In both cases, we should schedule mbx task as there are more
2998 		 * mbx messages reported by this interrupt.
2999 		 */
3000 		hclge_mbx_task_schedule(hdev);
3001 		break;
3002 	default:
3003 		dev_warn(&hdev->pdev->dev,
3004 			 "received unknown or unhandled event of vector0\n");
3005 		break;
3006 	}
3007 
3008 	hclge_clear_event_cause(hdev, event_cause, clearval);
3009 
3010 	/* Enable interrupt if it is not cause by reset. And when
3011 	 * clearval equal to 0, it means interrupt status may be
3012 	 * cleared by hardware before driver reads status register.
3013 	 * For this case, vector0 interrupt also should be enabled.
3014 	 */
3015 	if (!clearval ||
3016 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3017 		hclge_enable_vector(&hdev->misc_vector, true);
3018 	}
3019 
3020 	return IRQ_HANDLED;
3021 }
3022 
3023 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3024 {
3025 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3026 		dev_warn(&hdev->pdev->dev,
3027 			 "vector(vector_id %d) has been freed.\n", vector_id);
3028 		return;
3029 	}
3030 
3031 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3032 	hdev->num_msi_left += 1;
3033 	hdev->num_msi_used -= 1;
3034 }
3035 
3036 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3037 {
3038 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3039 
3040 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3041 
3042 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3043 	hdev->vector_status[0] = 0;
3044 
3045 	hdev->num_msi_left -= 1;
3046 	hdev->num_msi_used += 1;
3047 }
3048 
3049 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3050 				      const cpumask_t *mask)
3051 {
3052 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3053 					      affinity_notify);
3054 
3055 	cpumask_copy(&hdev->affinity_mask, mask);
3056 }
3057 
3058 static void hclge_irq_affinity_release(struct kref *ref)
3059 {
3060 }
3061 
3062 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3063 {
3064 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3065 			      &hdev->affinity_mask);
3066 
3067 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3068 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3069 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3070 				  &hdev->affinity_notify);
3071 }
3072 
3073 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3074 {
3075 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3076 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3077 }
3078 
3079 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3080 {
3081 	int ret;
3082 
3083 	hclge_get_misc_vector(hdev);
3084 
3085 	/* this would be explicitly freed in the end */
3086 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3087 			  0, "hclge_misc", hdev);
3088 	if (ret) {
3089 		hclge_free_vector(hdev, 0);
3090 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3091 			hdev->misc_vector.vector_irq);
3092 	}
3093 
3094 	return ret;
3095 }
3096 
3097 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3098 {
3099 	free_irq(hdev->misc_vector.vector_irq, hdev);
3100 	hclge_free_vector(hdev, 0);
3101 }
3102 
3103 int hclge_notify_client(struct hclge_dev *hdev,
3104 			enum hnae3_reset_notify_type type)
3105 {
3106 	struct hnae3_client *client = hdev->nic_client;
3107 	u16 i;
3108 
3109 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3110 		return 0;
3111 
3112 	if (!client->ops->reset_notify)
3113 		return -EOPNOTSUPP;
3114 
3115 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3116 		struct hnae3_handle *handle = &hdev->vport[i].nic;
3117 		int ret;
3118 
3119 		ret = client->ops->reset_notify(handle, type);
3120 		if (ret) {
3121 			dev_err(&hdev->pdev->dev,
3122 				"notify nic client failed %d(%d)\n", type, ret);
3123 			return ret;
3124 		}
3125 	}
3126 
3127 	return 0;
3128 }
3129 
3130 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3131 				    enum hnae3_reset_notify_type type)
3132 {
3133 	struct hnae3_client *client = hdev->roce_client;
3134 	int ret = 0;
3135 	u16 i;
3136 
3137 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3138 		return 0;
3139 
3140 	if (!client->ops->reset_notify)
3141 		return -EOPNOTSUPP;
3142 
3143 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3144 		struct hnae3_handle *handle = &hdev->vport[i].roce;
3145 
3146 		ret = client->ops->reset_notify(handle, type);
3147 		if (ret) {
3148 			dev_err(&hdev->pdev->dev,
3149 				"notify roce client failed %d(%d)",
3150 				type, ret);
3151 			return ret;
3152 		}
3153 	}
3154 
3155 	return ret;
3156 }
3157 
3158 static int hclge_reset_wait(struct hclge_dev *hdev)
3159 {
3160 #define HCLGE_RESET_WATI_MS	100
3161 #define HCLGE_RESET_WAIT_CNT	200
3162 	u32 val, reg, reg_bit;
3163 	u32 cnt = 0;
3164 
3165 	switch (hdev->reset_type) {
3166 	case HNAE3_IMP_RESET:
3167 		reg = HCLGE_GLOBAL_RESET_REG;
3168 		reg_bit = HCLGE_IMP_RESET_BIT;
3169 		break;
3170 	case HNAE3_GLOBAL_RESET:
3171 		reg = HCLGE_GLOBAL_RESET_REG;
3172 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3173 		break;
3174 	case HNAE3_FUNC_RESET:
3175 		reg = HCLGE_FUN_RST_ING;
3176 		reg_bit = HCLGE_FUN_RST_ING_B;
3177 		break;
3178 	case HNAE3_FLR_RESET:
3179 		break;
3180 	default:
3181 		dev_err(&hdev->pdev->dev,
3182 			"Wait for unsupported reset type: %d\n",
3183 			hdev->reset_type);
3184 		return -EINVAL;
3185 	}
3186 
3187 	if (hdev->reset_type == HNAE3_FLR_RESET) {
3188 		while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3189 		       cnt++ < HCLGE_RESET_WAIT_CNT)
3190 			msleep(HCLGE_RESET_WATI_MS);
3191 
3192 		if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3193 			dev_err(&hdev->pdev->dev,
3194 				"flr wait timeout: %d\n", cnt);
3195 			return -EBUSY;
3196 		}
3197 
3198 		return 0;
3199 	}
3200 
3201 	val = hclge_read_dev(&hdev->hw, reg);
3202 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3203 		msleep(HCLGE_RESET_WATI_MS);
3204 		val = hclge_read_dev(&hdev->hw, reg);
3205 		cnt++;
3206 	}
3207 
3208 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3209 		dev_warn(&hdev->pdev->dev,
3210 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3211 		return -EBUSY;
3212 	}
3213 
3214 	return 0;
3215 }
3216 
3217 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3218 {
3219 	struct hclge_vf_rst_cmd *req;
3220 	struct hclge_desc desc;
3221 
3222 	req = (struct hclge_vf_rst_cmd *)desc.data;
3223 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3224 	req->dest_vfid = func_id;
3225 
3226 	if (reset)
3227 		req->vf_rst = 0x1;
3228 
3229 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3230 }
3231 
3232 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3233 {
3234 	int i;
3235 
3236 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3237 		struct hclge_vport *vport = &hdev->vport[i];
3238 		int ret;
3239 
3240 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3241 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3242 		if (ret) {
3243 			dev_err(&hdev->pdev->dev,
3244 				"set vf(%d) rst failed %d!\n",
3245 				vport->vport_id, ret);
3246 			return ret;
3247 		}
3248 
3249 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3250 			continue;
3251 
3252 		/* Inform VF to process the reset.
3253 		 * hclge_inform_reset_assert_to_vf may fail if VF
3254 		 * driver is not loaded.
3255 		 */
3256 		ret = hclge_inform_reset_assert_to_vf(vport);
3257 		if (ret)
3258 			dev_warn(&hdev->pdev->dev,
3259 				 "inform reset to vf(%d) failed %d!\n",
3260 				 vport->vport_id, ret);
3261 	}
3262 
3263 	return 0;
3264 }
3265 
3266 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3267 {
3268 	struct hclge_pf_rst_sync_cmd *req;
3269 	struct hclge_desc desc;
3270 	int cnt = 0;
3271 	int ret;
3272 
3273 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3274 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3275 
3276 	do {
3277 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3278 		/* for compatible with old firmware, wait
3279 		 * 100 ms for VF to stop IO
3280 		 */
3281 		if (ret == -EOPNOTSUPP) {
3282 			msleep(HCLGE_RESET_SYNC_TIME);
3283 			return 0;
3284 		} else if (ret) {
3285 			dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3286 				ret);
3287 			return ret;
3288 		} else if (req->all_vf_ready) {
3289 			return 0;
3290 		}
3291 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3292 		hclge_cmd_reuse_desc(&desc, true);
3293 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3294 
3295 	dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3296 	return -ETIME;
3297 }
3298 
3299 void hclge_report_hw_error(struct hclge_dev *hdev,
3300 			   enum hnae3_hw_error_type type)
3301 {
3302 	struct hnae3_client *client = hdev->nic_client;
3303 	u16 i;
3304 
3305 	if (!client || !client->ops->process_hw_error ||
3306 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3307 		return;
3308 
3309 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3310 		client->ops->process_hw_error(&hdev->vport[i].nic, type);
3311 }
3312 
3313 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3314 {
3315 	u32 reg_val;
3316 
3317 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3318 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3319 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3320 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3321 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3322 	}
3323 
3324 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3325 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3326 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3327 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3328 	}
3329 }
3330 
3331 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3332 {
3333 	struct hclge_desc desc;
3334 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3335 	int ret;
3336 
3337 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3338 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3339 	req->fun_reset_vfid = func_id;
3340 
3341 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3342 	if (ret)
3343 		dev_err(&hdev->pdev->dev,
3344 			"send function reset cmd fail, status =%d\n", ret);
3345 
3346 	return ret;
3347 }
3348 
3349 static void hclge_do_reset(struct hclge_dev *hdev)
3350 {
3351 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3352 	struct pci_dev *pdev = hdev->pdev;
3353 	u32 val;
3354 
3355 	if (hclge_get_hw_reset_stat(handle)) {
3356 		dev_info(&pdev->dev, "Hardware reset not finish\n");
3357 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3358 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3359 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3360 		return;
3361 	}
3362 
3363 	switch (hdev->reset_type) {
3364 	case HNAE3_GLOBAL_RESET:
3365 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3366 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3367 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3368 		dev_info(&pdev->dev, "Global Reset requested\n");
3369 		break;
3370 	case HNAE3_FUNC_RESET:
3371 		dev_info(&pdev->dev, "PF Reset requested\n");
3372 		/* schedule again to check later */
3373 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3374 		hclge_reset_task_schedule(hdev);
3375 		break;
3376 	case HNAE3_FLR_RESET:
3377 		dev_info(&pdev->dev, "FLR requested\n");
3378 		/* schedule again to check later */
3379 		set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3380 		hclge_reset_task_schedule(hdev);
3381 		break;
3382 	default:
3383 		dev_warn(&pdev->dev,
3384 			 "Unsupported reset type: %d\n", hdev->reset_type);
3385 		break;
3386 	}
3387 }
3388 
3389 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3390 						   unsigned long *addr)
3391 {
3392 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3393 	struct hclge_dev *hdev = ae_dev->priv;
3394 
3395 	/* first, resolve any unknown reset type to the known type(s) */
3396 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3397 		/* we will intentionally ignore any errors from this function
3398 		 *  as we will end up in *some* reset request in any case
3399 		 */
3400 		hclge_handle_hw_msix_error(hdev, addr);
3401 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3402 		/* We defered the clearing of the error event which caused
3403 		 * interrupt since it was not posssible to do that in
3404 		 * interrupt context (and this is the reason we introduced
3405 		 * new UNKNOWN reset type). Now, the errors have been
3406 		 * handled and cleared in hardware we can safely enable
3407 		 * interrupts. This is an exception to the norm.
3408 		 */
3409 		hclge_enable_vector(&hdev->misc_vector, true);
3410 	}
3411 
3412 	/* return the highest priority reset level amongst all */
3413 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3414 		rst_level = HNAE3_IMP_RESET;
3415 		clear_bit(HNAE3_IMP_RESET, addr);
3416 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3417 		clear_bit(HNAE3_FUNC_RESET, addr);
3418 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3419 		rst_level = HNAE3_GLOBAL_RESET;
3420 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3421 		clear_bit(HNAE3_FUNC_RESET, addr);
3422 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3423 		rst_level = HNAE3_FUNC_RESET;
3424 		clear_bit(HNAE3_FUNC_RESET, addr);
3425 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3426 		rst_level = HNAE3_FLR_RESET;
3427 		clear_bit(HNAE3_FLR_RESET, addr);
3428 	}
3429 
3430 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3431 	    rst_level < hdev->reset_type)
3432 		return HNAE3_NONE_RESET;
3433 
3434 	return rst_level;
3435 }
3436 
3437 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3438 {
3439 	u32 clearval = 0;
3440 
3441 	switch (hdev->reset_type) {
3442 	case HNAE3_IMP_RESET:
3443 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3444 		break;
3445 	case HNAE3_GLOBAL_RESET:
3446 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3447 		break;
3448 	default:
3449 		break;
3450 	}
3451 
3452 	if (!clearval)
3453 		return;
3454 
3455 	/* For revision 0x20, the reset interrupt source
3456 	 * can only be cleared after hardware reset done
3457 	 */
3458 	if (hdev->pdev->revision == 0x20)
3459 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3460 				clearval);
3461 
3462 	hclge_enable_vector(&hdev->misc_vector, true);
3463 }
3464 
3465 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3466 {
3467 	int ret = 0;
3468 
3469 	switch (hdev->reset_type) {
3470 	case HNAE3_FUNC_RESET:
3471 		/* fall through */
3472 	case HNAE3_FLR_RESET:
3473 		ret = hclge_set_all_vf_rst(hdev, true);
3474 		break;
3475 	default:
3476 		break;
3477 	}
3478 
3479 	return ret;
3480 }
3481 
3482 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3483 {
3484 	u32 reg_val;
3485 
3486 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3487 	if (enable)
3488 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3489 	else
3490 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3491 
3492 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3493 }
3494 
3495 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3496 {
3497 	u32 reg_val;
3498 	int ret = 0;
3499 
3500 	switch (hdev->reset_type) {
3501 	case HNAE3_FUNC_RESET:
3502 		/* to confirm whether all running VF is ready
3503 		 * before request PF reset
3504 		 */
3505 		ret = hclge_func_reset_sync_vf(hdev);
3506 		if (ret)
3507 			return ret;
3508 
3509 		ret = hclge_func_reset_cmd(hdev, 0);
3510 		if (ret) {
3511 			dev_err(&hdev->pdev->dev,
3512 				"asserting function reset fail %d!\n", ret);
3513 			return ret;
3514 		}
3515 
3516 		/* After performaning pf reset, it is not necessary to do the
3517 		 * mailbox handling or send any command to firmware, because
3518 		 * any mailbox handling or command to firmware is only valid
3519 		 * after hclge_cmd_init is called.
3520 		 */
3521 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3522 		hdev->rst_stats.pf_rst_cnt++;
3523 		break;
3524 	case HNAE3_FLR_RESET:
3525 		/* to confirm whether all running VF is ready
3526 		 * before request PF reset
3527 		 */
3528 		ret = hclge_func_reset_sync_vf(hdev);
3529 		if (ret)
3530 			return ret;
3531 
3532 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3533 		set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3534 		hdev->rst_stats.flr_rst_cnt++;
3535 		break;
3536 	case HNAE3_IMP_RESET:
3537 		hclge_handle_imp_error(hdev);
3538 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3539 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3540 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3541 		break;
3542 	default:
3543 		break;
3544 	}
3545 
3546 	/* inform hardware that preparatory work is done */
3547 	msleep(HCLGE_RESET_SYNC_TIME);
3548 	hclge_reset_handshake(hdev, true);
3549 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3550 
3551 	return ret;
3552 }
3553 
3554 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3555 {
3556 #define MAX_RESET_FAIL_CNT 5
3557 
3558 	if (hdev->reset_pending) {
3559 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3560 			 hdev->reset_pending);
3561 		return true;
3562 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3563 		   HCLGE_RESET_INT_M) {
3564 		dev_info(&hdev->pdev->dev,
3565 			 "reset failed because new reset interrupt\n");
3566 		hclge_clear_reset_cause(hdev);
3567 		return false;
3568 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3569 		hdev->rst_stats.reset_fail_cnt++;
3570 		set_bit(hdev->reset_type, &hdev->reset_pending);
3571 		dev_info(&hdev->pdev->dev,
3572 			 "re-schedule reset task(%d)\n",
3573 			 hdev->rst_stats.reset_fail_cnt);
3574 		return true;
3575 	}
3576 
3577 	hclge_clear_reset_cause(hdev);
3578 
3579 	/* recover the handshake status when reset fail */
3580 	hclge_reset_handshake(hdev, true);
3581 
3582 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3583 	return false;
3584 }
3585 
3586 static int hclge_set_rst_done(struct hclge_dev *hdev)
3587 {
3588 	struct hclge_pf_rst_done_cmd *req;
3589 	struct hclge_desc desc;
3590 	int ret;
3591 
3592 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
3593 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3594 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3595 
3596 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3597 	/* To be compatible with the old firmware, which does not support
3598 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3599 	 * return success
3600 	 */
3601 	if (ret == -EOPNOTSUPP) {
3602 		dev_warn(&hdev->pdev->dev,
3603 			 "current firmware does not support command(0x%x)!\n",
3604 			 HCLGE_OPC_PF_RST_DONE);
3605 		return 0;
3606 	} else if (ret) {
3607 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3608 			ret);
3609 	}
3610 
3611 	return ret;
3612 }
3613 
3614 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3615 {
3616 	int ret = 0;
3617 
3618 	switch (hdev->reset_type) {
3619 	case HNAE3_FUNC_RESET:
3620 		/* fall through */
3621 	case HNAE3_FLR_RESET:
3622 		ret = hclge_set_all_vf_rst(hdev, false);
3623 		break;
3624 	case HNAE3_GLOBAL_RESET:
3625 		/* fall through */
3626 	case HNAE3_IMP_RESET:
3627 		ret = hclge_set_rst_done(hdev);
3628 		break;
3629 	default:
3630 		break;
3631 	}
3632 
3633 	/* clear up the handshake status after re-initialize done */
3634 	hclge_reset_handshake(hdev, false);
3635 
3636 	return ret;
3637 }
3638 
3639 static int hclge_reset_stack(struct hclge_dev *hdev)
3640 {
3641 	int ret;
3642 
3643 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3644 	if (ret)
3645 		return ret;
3646 
3647 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3648 	if (ret)
3649 		return ret;
3650 
3651 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3652 	if (ret)
3653 		return ret;
3654 
3655 	return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3656 }
3657 
3658 static void hclge_reset(struct hclge_dev *hdev)
3659 {
3660 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3661 	enum hnae3_reset_type reset_level;
3662 	int ret;
3663 
3664 	/* Initialize ae_dev reset status as well, in case enet layer wants to
3665 	 * know if device is undergoing reset
3666 	 */
3667 	ae_dev->reset_type = hdev->reset_type;
3668 	hdev->rst_stats.reset_cnt++;
3669 	/* perform reset of the stack & ae device for a client */
3670 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3671 	if (ret)
3672 		goto err_reset;
3673 
3674 	ret = hclge_reset_prepare_down(hdev);
3675 	if (ret)
3676 		goto err_reset;
3677 
3678 	rtnl_lock();
3679 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3680 	if (ret)
3681 		goto err_reset_lock;
3682 
3683 	rtnl_unlock();
3684 
3685 	ret = hclge_reset_prepare_wait(hdev);
3686 	if (ret)
3687 		goto err_reset;
3688 
3689 	if (hclge_reset_wait(hdev))
3690 		goto err_reset;
3691 
3692 	hdev->rst_stats.hw_reset_done_cnt++;
3693 
3694 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3695 	if (ret)
3696 		goto err_reset;
3697 
3698 	rtnl_lock();
3699 
3700 	ret = hclge_reset_stack(hdev);
3701 	if (ret)
3702 		goto err_reset_lock;
3703 
3704 	hclge_clear_reset_cause(hdev);
3705 
3706 	ret = hclge_reset_prepare_up(hdev);
3707 	if (ret)
3708 		goto err_reset_lock;
3709 
3710 	rtnl_unlock();
3711 
3712 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3713 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3714 	 * times
3715 	 */
3716 	if (ret &&
3717 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3718 		goto err_reset;
3719 
3720 	rtnl_lock();
3721 
3722 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3723 	if (ret)
3724 		goto err_reset_lock;
3725 
3726 	rtnl_unlock();
3727 
3728 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3729 	if (ret)
3730 		goto err_reset;
3731 
3732 	hdev->last_reset_time = jiffies;
3733 	hdev->rst_stats.reset_fail_cnt = 0;
3734 	hdev->rst_stats.reset_done_cnt++;
3735 	ae_dev->reset_type = HNAE3_NONE_RESET;
3736 
3737 	/* if default_reset_request has a higher level reset request,
3738 	 * it should be handled as soon as possible. since some errors
3739 	 * need this kind of reset to fix.
3740 	 */
3741 	reset_level = hclge_get_reset_level(ae_dev,
3742 					    &hdev->default_reset_request);
3743 	if (reset_level != HNAE3_NONE_RESET)
3744 		set_bit(reset_level, &hdev->reset_request);
3745 
3746 	return;
3747 
3748 err_reset_lock:
3749 	rtnl_unlock();
3750 err_reset:
3751 	if (hclge_reset_err_handle(hdev))
3752 		hclge_reset_task_schedule(hdev);
3753 }
3754 
3755 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3756 {
3757 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3758 	struct hclge_dev *hdev = ae_dev->priv;
3759 
3760 	/* We might end up getting called broadly because of 2 below cases:
3761 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3762 	 *    normalcy is to reset.
3763 	 * 2. A new reset request from the stack due to timeout
3764 	 *
3765 	 * For the first case,error event might not have ae handle available.
3766 	 * check if this is a new reset request and we are not here just because
3767 	 * last reset attempt did not succeed and watchdog hit us again. We will
3768 	 * know this if last reset request did not occur very recently (watchdog
3769 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3770 	 * In case of new request we reset the "reset level" to PF reset.
3771 	 * And if it is a repeat reset request of the most recent one then we
3772 	 * want to make sure we throttle the reset request. Therefore, we will
3773 	 * not allow it again before 3*HZ times.
3774 	 */
3775 	if (!handle)
3776 		handle = &hdev->vport[0].nic;
3777 
3778 	if (time_before(jiffies, (hdev->last_reset_time +
3779 				  HCLGE_RESET_INTERVAL))) {
3780 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3781 		return;
3782 	} else if (hdev->default_reset_request)
3783 		hdev->reset_level =
3784 			hclge_get_reset_level(ae_dev,
3785 					      &hdev->default_reset_request);
3786 	else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3787 		hdev->reset_level = HNAE3_FUNC_RESET;
3788 
3789 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3790 		 hdev->reset_level);
3791 
3792 	/* request reset & schedule reset task */
3793 	set_bit(hdev->reset_level, &hdev->reset_request);
3794 	hclge_reset_task_schedule(hdev);
3795 
3796 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3797 		hdev->reset_level++;
3798 }
3799 
3800 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3801 					enum hnae3_reset_type rst_type)
3802 {
3803 	struct hclge_dev *hdev = ae_dev->priv;
3804 
3805 	set_bit(rst_type, &hdev->default_reset_request);
3806 }
3807 
3808 static void hclge_reset_timer(struct timer_list *t)
3809 {
3810 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3811 
3812 	/* if default_reset_request has no value, it means that this reset
3813 	 * request has already be handled, so just return here
3814 	 */
3815 	if (!hdev->default_reset_request)
3816 		return;
3817 
3818 	dev_info(&hdev->pdev->dev,
3819 		 "triggering reset in reset timer\n");
3820 	hclge_reset_event(hdev->pdev, NULL);
3821 }
3822 
3823 static void hclge_reset_subtask(struct hclge_dev *hdev)
3824 {
3825 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3826 
3827 	/* check if there is any ongoing reset in the hardware. This status can
3828 	 * be checked from reset_pending. If there is then, we need to wait for
3829 	 * hardware to complete reset.
3830 	 *    a. If we are able to figure out in reasonable time that hardware
3831 	 *       has fully resetted then, we can proceed with driver, client
3832 	 *       reset.
3833 	 *    b. else, we can come back later to check this status so re-sched
3834 	 *       now.
3835 	 */
3836 	hdev->last_reset_time = jiffies;
3837 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3838 	if (hdev->reset_type != HNAE3_NONE_RESET)
3839 		hclge_reset(hdev);
3840 
3841 	/* check if we got any *new* reset requests to be honored */
3842 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3843 	if (hdev->reset_type != HNAE3_NONE_RESET)
3844 		hclge_do_reset(hdev);
3845 
3846 	hdev->reset_type = HNAE3_NONE_RESET;
3847 }
3848 
3849 static void hclge_reset_service_task(struct work_struct *work)
3850 {
3851 	struct hclge_dev *hdev =
3852 		container_of(work, struct hclge_dev, rst_service_task);
3853 
3854 	if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3855 		return;
3856 
3857 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3858 
3859 	hclge_reset_subtask(hdev);
3860 
3861 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3862 }
3863 
3864 static void hclge_mailbox_service_task(struct work_struct *work)
3865 {
3866 	struct hclge_dev *hdev =
3867 		container_of(work, struct hclge_dev, mbx_service_task);
3868 
3869 	if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3870 		return;
3871 
3872 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3873 
3874 	hclge_mbx_handler(hdev);
3875 
3876 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3877 }
3878 
3879 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3880 {
3881 	int i;
3882 
3883 	/* start from vport 1 for PF is always alive */
3884 	for (i = 1; i < hdev->num_alloc_vport; i++) {
3885 		struct hclge_vport *vport = &hdev->vport[i];
3886 
3887 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3888 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3889 
3890 		/* If vf is not alive, set to default value */
3891 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3892 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3893 	}
3894 }
3895 
3896 static void hclge_service_task(struct work_struct *work)
3897 {
3898 	struct hclge_dev *hdev =
3899 		container_of(work, struct hclge_dev, service_task.work);
3900 
3901 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3902 
3903 	if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3904 		hclge_update_stats_for_all(hdev);
3905 		hdev->hw_stats.stats_timer = 0;
3906 	}
3907 
3908 	hclge_update_port_info(hdev);
3909 	hclge_update_link_status(hdev);
3910 	hclge_update_vport_alive(hdev);
3911 	hclge_sync_vlan_filter(hdev);
3912 	if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3913 		hclge_rfs_filter_expire(hdev);
3914 		hdev->fd_arfs_expire_timer = 0;
3915 	}
3916 
3917 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
3918 }
3919 
3920 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3921 {
3922 	/* VF handle has no client */
3923 	if (!handle->client)
3924 		return container_of(handle, struct hclge_vport, nic);
3925 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
3926 		return container_of(handle, struct hclge_vport, roce);
3927 	else
3928 		return container_of(handle, struct hclge_vport, nic);
3929 }
3930 
3931 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3932 			    struct hnae3_vector_info *vector_info)
3933 {
3934 	struct hclge_vport *vport = hclge_get_vport(handle);
3935 	struct hnae3_vector_info *vector = vector_info;
3936 	struct hclge_dev *hdev = vport->back;
3937 	int alloc = 0;
3938 	int i, j;
3939 
3940 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
3941 	vector_num = min(hdev->num_msi_left, vector_num);
3942 
3943 	for (j = 0; j < vector_num; j++) {
3944 		for (i = 1; i < hdev->num_msi; i++) {
3945 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3946 				vector->vector = pci_irq_vector(hdev->pdev, i);
3947 				vector->io_addr = hdev->hw.io_base +
3948 					HCLGE_VECTOR_REG_BASE +
3949 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
3950 					vport->vport_id *
3951 					HCLGE_VECTOR_VF_OFFSET;
3952 				hdev->vector_status[i] = vport->vport_id;
3953 				hdev->vector_irq[i] = vector->vector;
3954 
3955 				vector++;
3956 				alloc++;
3957 
3958 				break;
3959 			}
3960 		}
3961 	}
3962 	hdev->num_msi_left -= alloc;
3963 	hdev->num_msi_used += alloc;
3964 
3965 	return alloc;
3966 }
3967 
3968 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3969 {
3970 	int i;
3971 
3972 	for (i = 0; i < hdev->num_msi; i++)
3973 		if (vector == hdev->vector_irq[i])
3974 			return i;
3975 
3976 	return -EINVAL;
3977 }
3978 
3979 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3980 {
3981 	struct hclge_vport *vport = hclge_get_vport(handle);
3982 	struct hclge_dev *hdev = vport->back;
3983 	int vector_id;
3984 
3985 	vector_id = hclge_get_vector_index(hdev, vector);
3986 	if (vector_id < 0) {
3987 		dev_err(&hdev->pdev->dev,
3988 			"Get vector index fail. vector_id =%d\n", vector_id);
3989 		return vector_id;
3990 	}
3991 
3992 	hclge_free_vector(hdev, vector_id);
3993 
3994 	return 0;
3995 }
3996 
3997 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3998 {
3999 	return HCLGE_RSS_KEY_SIZE;
4000 }
4001 
4002 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4003 {
4004 	return HCLGE_RSS_IND_TBL_SIZE;
4005 }
4006 
4007 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4008 				  const u8 hfunc, const u8 *key)
4009 {
4010 	struct hclge_rss_config_cmd *req;
4011 	unsigned int key_offset = 0;
4012 	struct hclge_desc desc;
4013 	int key_counts;
4014 	int key_size;
4015 	int ret;
4016 
4017 	key_counts = HCLGE_RSS_KEY_SIZE;
4018 	req = (struct hclge_rss_config_cmd *)desc.data;
4019 
4020 	while (key_counts) {
4021 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4022 					   false);
4023 
4024 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4025 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4026 
4027 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4028 		memcpy(req->hash_key,
4029 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4030 
4031 		key_counts -= key_size;
4032 		key_offset++;
4033 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4034 		if (ret) {
4035 			dev_err(&hdev->pdev->dev,
4036 				"Configure RSS config fail, status = %d\n",
4037 				ret);
4038 			return ret;
4039 		}
4040 	}
4041 	return 0;
4042 }
4043 
4044 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4045 {
4046 	struct hclge_rss_indirection_table_cmd *req;
4047 	struct hclge_desc desc;
4048 	int i, j;
4049 	int ret;
4050 
4051 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4052 
4053 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4054 		hclge_cmd_setup_basic_desc
4055 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4056 
4057 		req->start_table_index =
4058 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4059 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4060 
4061 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4062 			req->rss_result[j] =
4063 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4064 
4065 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4066 		if (ret) {
4067 			dev_err(&hdev->pdev->dev,
4068 				"Configure rss indir table fail,status = %d\n",
4069 				ret);
4070 			return ret;
4071 		}
4072 	}
4073 	return 0;
4074 }
4075 
4076 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4077 				 u16 *tc_size, u16 *tc_offset)
4078 {
4079 	struct hclge_rss_tc_mode_cmd *req;
4080 	struct hclge_desc desc;
4081 	int ret;
4082 	int i;
4083 
4084 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4085 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4086 
4087 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4088 		u16 mode = 0;
4089 
4090 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4091 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4092 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4093 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4094 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4095 
4096 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4097 	}
4098 
4099 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4100 	if (ret)
4101 		dev_err(&hdev->pdev->dev,
4102 			"Configure rss tc mode fail, status = %d\n", ret);
4103 
4104 	return ret;
4105 }
4106 
4107 static void hclge_get_rss_type(struct hclge_vport *vport)
4108 {
4109 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4110 	    vport->rss_tuple_sets.ipv4_udp_en ||
4111 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4112 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4113 	    vport->rss_tuple_sets.ipv6_udp_en ||
4114 	    vport->rss_tuple_sets.ipv6_sctp_en)
4115 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4116 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4117 		 vport->rss_tuple_sets.ipv6_fragment_en)
4118 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4119 	else
4120 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4121 }
4122 
4123 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4124 {
4125 	struct hclge_rss_input_tuple_cmd *req;
4126 	struct hclge_desc desc;
4127 	int ret;
4128 
4129 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4130 
4131 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4132 
4133 	/* Get the tuple cfg from pf */
4134 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4135 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4136 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4137 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4138 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4139 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4140 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4141 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4142 	hclge_get_rss_type(&hdev->vport[0]);
4143 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4144 	if (ret)
4145 		dev_err(&hdev->pdev->dev,
4146 			"Configure rss input fail, status = %d\n", ret);
4147 	return ret;
4148 }
4149 
4150 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4151 			 u8 *key, u8 *hfunc)
4152 {
4153 	struct hclge_vport *vport = hclge_get_vport(handle);
4154 	int i;
4155 
4156 	/* Get hash algorithm */
4157 	if (hfunc) {
4158 		switch (vport->rss_algo) {
4159 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4160 			*hfunc = ETH_RSS_HASH_TOP;
4161 			break;
4162 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4163 			*hfunc = ETH_RSS_HASH_XOR;
4164 			break;
4165 		default:
4166 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4167 			break;
4168 		}
4169 	}
4170 
4171 	/* Get the RSS Key required by the user */
4172 	if (key)
4173 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4174 
4175 	/* Get indirect table */
4176 	if (indir)
4177 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4178 			indir[i] =  vport->rss_indirection_tbl[i];
4179 
4180 	return 0;
4181 }
4182 
4183 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4184 			 const  u8 *key, const  u8 hfunc)
4185 {
4186 	struct hclge_vport *vport = hclge_get_vport(handle);
4187 	struct hclge_dev *hdev = vport->back;
4188 	u8 hash_algo;
4189 	int ret, i;
4190 
4191 	/* Set the RSS Hash Key if specififed by the user */
4192 	if (key) {
4193 		switch (hfunc) {
4194 		case ETH_RSS_HASH_TOP:
4195 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4196 			break;
4197 		case ETH_RSS_HASH_XOR:
4198 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4199 			break;
4200 		case ETH_RSS_HASH_NO_CHANGE:
4201 			hash_algo = vport->rss_algo;
4202 			break;
4203 		default:
4204 			return -EINVAL;
4205 		}
4206 
4207 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4208 		if (ret)
4209 			return ret;
4210 
4211 		/* Update the shadow RSS key with user specified qids */
4212 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4213 		vport->rss_algo = hash_algo;
4214 	}
4215 
4216 	/* Update the shadow RSS table with user specified qids */
4217 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4218 		vport->rss_indirection_tbl[i] = indir[i];
4219 
4220 	/* Update the hardware */
4221 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4222 }
4223 
4224 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4225 {
4226 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4227 
4228 	if (nfc->data & RXH_L4_B_2_3)
4229 		hash_sets |= HCLGE_D_PORT_BIT;
4230 	else
4231 		hash_sets &= ~HCLGE_D_PORT_BIT;
4232 
4233 	if (nfc->data & RXH_IP_SRC)
4234 		hash_sets |= HCLGE_S_IP_BIT;
4235 	else
4236 		hash_sets &= ~HCLGE_S_IP_BIT;
4237 
4238 	if (nfc->data & RXH_IP_DST)
4239 		hash_sets |= HCLGE_D_IP_BIT;
4240 	else
4241 		hash_sets &= ~HCLGE_D_IP_BIT;
4242 
4243 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4244 		hash_sets |= HCLGE_V_TAG_BIT;
4245 
4246 	return hash_sets;
4247 }
4248 
4249 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4250 			       struct ethtool_rxnfc *nfc)
4251 {
4252 	struct hclge_vport *vport = hclge_get_vport(handle);
4253 	struct hclge_dev *hdev = vport->back;
4254 	struct hclge_rss_input_tuple_cmd *req;
4255 	struct hclge_desc desc;
4256 	u8 tuple_sets;
4257 	int ret;
4258 
4259 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4260 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4261 		return -EINVAL;
4262 
4263 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4264 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4265 
4266 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4267 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4268 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4269 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4270 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4271 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4272 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4273 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4274 
4275 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4276 	switch (nfc->flow_type) {
4277 	case TCP_V4_FLOW:
4278 		req->ipv4_tcp_en = tuple_sets;
4279 		break;
4280 	case TCP_V6_FLOW:
4281 		req->ipv6_tcp_en = tuple_sets;
4282 		break;
4283 	case UDP_V4_FLOW:
4284 		req->ipv4_udp_en = tuple_sets;
4285 		break;
4286 	case UDP_V6_FLOW:
4287 		req->ipv6_udp_en = tuple_sets;
4288 		break;
4289 	case SCTP_V4_FLOW:
4290 		req->ipv4_sctp_en = tuple_sets;
4291 		break;
4292 	case SCTP_V6_FLOW:
4293 		if ((nfc->data & RXH_L4_B_0_1) ||
4294 		    (nfc->data & RXH_L4_B_2_3))
4295 			return -EINVAL;
4296 
4297 		req->ipv6_sctp_en = tuple_sets;
4298 		break;
4299 	case IPV4_FLOW:
4300 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4301 		break;
4302 	case IPV6_FLOW:
4303 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4304 		break;
4305 	default:
4306 		return -EINVAL;
4307 	}
4308 
4309 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4310 	if (ret) {
4311 		dev_err(&hdev->pdev->dev,
4312 			"Set rss tuple fail, status = %d\n", ret);
4313 		return ret;
4314 	}
4315 
4316 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4317 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4318 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4319 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4320 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4321 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4322 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4323 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4324 	hclge_get_rss_type(vport);
4325 	return 0;
4326 }
4327 
4328 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4329 			       struct ethtool_rxnfc *nfc)
4330 {
4331 	struct hclge_vport *vport = hclge_get_vport(handle);
4332 	u8 tuple_sets;
4333 
4334 	nfc->data = 0;
4335 
4336 	switch (nfc->flow_type) {
4337 	case TCP_V4_FLOW:
4338 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4339 		break;
4340 	case UDP_V4_FLOW:
4341 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4342 		break;
4343 	case TCP_V6_FLOW:
4344 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4345 		break;
4346 	case UDP_V6_FLOW:
4347 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4348 		break;
4349 	case SCTP_V4_FLOW:
4350 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4351 		break;
4352 	case SCTP_V6_FLOW:
4353 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4354 		break;
4355 	case IPV4_FLOW:
4356 	case IPV6_FLOW:
4357 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4358 		break;
4359 	default:
4360 		return -EINVAL;
4361 	}
4362 
4363 	if (!tuple_sets)
4364 		return 0;
4365 
4366 	if (tuple_sets & HCLGE_D_PORT_BIT)
4367 		nfc->data |= RXH_L4_B_2_3;
4368 	if (tuple_sets & HCLGE_S_PORT_BIT)
4369 		nfc->data |= RXH_L4_B_0_1;
4370 	if (tuple_sets & HCLGE_D_IP_BIT)
4371 		nfc->data |= RXH_IP_DST;
4372 	if (tuple_sets & HCLGE_S_IP_BIT)
4373 		nfc->data |= RXH_IP_SRC;
4374 
4375 	return 0;
4376 }
4377 
4378 static int hclge_get_tc_size(struct hnae3_handle *handle)
4379 {
4380 	struct hclge_vport *vport = hclge_get_vport(handle);
4381 	struct hclge_dev *hdev = vport->back;
4382 
4383 	return hdev->rss_size_max;
4384 }
4385 
4386 int hclge_rss_init_hw(struct hclge_dev *hdev)
4387 {
4388 	struct hclge_vport *vport = hdev->vport;
4389 	u8 *rss_indir = vport[0].rss_indirection_tbl;
4390 	u16 rss_size = vport[0].alloc_rss_size;
4391 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4392 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4393 	u8 *key = vport[0].rss_hash_key;
4394 	u8 hfunc = vport[0].rss_algo;
4395 	u16 tc_valid[HCLGE_MAX_TC_NUM];
4396 	u16 roundup_size;
4397 	unsigned int i;
4398 	int ret;
4399 
4400 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4401 	if (ret)
4402 		return ret;
4403 
4404 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4405 	if (ret)
4406 		return ret;
4407 
4408 	ret = hclge_set_rss_input_tuple(hdev);
4409 	if (ret)
4410 		return ret;
4411 
4412 	/* Each TC have the same queue size, and tc_size set to hardware is
4413 	 * the log2 of roundup power of two of rss_size, the acutal queue
4414 	 * size is limited by indirection table.
4415 	 */
4416 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4417 		dev_err(&hdev->pdev->dev,
4418 			"Configure rss tc size failed, invalid TC_SIZE = %d\n",
4419 			rss_size);
4420 		return -EINVAL;
4421 	}
4422 
4423 	roundup_size = roundup_pow_of_two(rss_size);
4424 	roundup_size = ilog2(roundup_size);
4425 
4426 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4427 		tc_valid[i] = 0;
4428 
4429 		if (!(hdev->hw_tc_map & BIT(i)))
4430 			continue;
4431 
4432 		tc_valid[i] = 1;
4433 		tc_size[i] = roundup_size;
4434 		tc_offset[i] = rss_size * i;
4435 	}
4436 
4437 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4438 }
4439 
4440 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4441 {
4442 	struct hclge_vport *vport = hdev->vport;
4443 	int i, j;
4444 
4445 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4446 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4447 			vport[j].rss_indirection_tbl[i] =
4448 				i % vport[j].alloc_rss_size;
4449 	}
4450 }
4451 
4452 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4453 {
4454 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4455 	struct hclge_vport *vport = hdev->vport;
4456 
4457 	if (hdev->pdev->revision >= 0x21)
4458 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4459 
4460 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4461 		vport[i].rss_tuple_sets.ipv4_tcp_en =
4462 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4463 		vport[i].rss_tuple_sets.ipv4_udp_en =
4464 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4465 		vport[i].rss_tuple_sets.ipv4_sctp_en =
4466 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4467 		vport[i].rss_tuple_sets.ipv4_fragment_en =
4468 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4469 		vport[i].rss_tuple_sets.ipv6_tcp_en =
4470 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4471 		vport[i].rss_tuple_sets.ipv6_udp_en =
4472 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4473 		vport[i].rss_tuple_sets.ipv6_sctp_en =
4474 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4475 		vport[i].rss_tuple_sets.ipv6_fragment_en =
4476 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4477 
4478 		vport[i].rss_algo = rss_algo;
4479 
4480 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
4481 		       HCLGE_RSS_KEY_SIZE);
4482 	}
4483 
4484 	hclge_rss_indir_init_cfg(hdev);
4485 }
4486 
4487 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4488 				int vector_id, bool en,
4489 				struct hnae3_ring_chain_node *ring_chain)
4490 {
4491 	struct hclge_dev *hdev = vport->back;
4492 	struct hnae3_ring_chain_node *node;
4493 	struct hclge_desc desc;
4494 	struct hclge_ctrl_vector_chain_cmd *req =
4495 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
4496 	enum hclge_cmd_status status;
4497 	enum hclge_opcode_type op;
4498 	u16 tqp_type_and_id;
4499 	int i;
4500 
4501 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4502 	hclge_cmd_setup_basic_desc(&desc, op, false);
4503 	req->int_vector_id = vector_id;
4504 
4505 	i = 0;
4506 	for (node = ring_chain; node; node = node->next) {
4507 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4508 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4509 				HCLGE_INT_TYPE_S,
4510 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4511 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4512 				HCLGE_TQP_ID_S, node->tqp_index);
4513 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4514 				HCLGE_INT_GL_IDX_S,
4515 				hnae3_get_field(node->int_gl_idx,
4516 						HNAE3_RING_GL_IDX_M,
4517 						HNAE3_RING_GL_IDX_S));
4518 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4519 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4520 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4521 			req->vfid = vport->vport_id;
4522 
4523 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
4524 			if (status) {
4525 				dev_err(&hdev->pdev->dev,
4526 					"Map TQP fail, status is %d.\n",
4527 					status);
4528 				return -EIO;
4529 			}
4530 			i = 0;
4531 
4532 			hclge_cmd_setup_basic_desc(&desc,
4533 						   op,
4534 						   false);
4535 			req->int_vector_id = vector_id;
4536 		}
4537 	}
4538 
4539 	if (i > 0) {
4540 		req->int_cause_num = i;
4541 		req->vfid = vport->vport_id;
4542 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
4543 		if (status) {
4544 			dev_err(&hdev->pdev->dev,
4545 				"Map TQP fail, status is %d.\n", status);
4546 			return -EIO;
4547 		}
4548 	}
4549 
4550 	return 0;
4551 }
4552 
4553 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4554 				    struct hnae3_ring_chain_node *ring_chain)
4555 {
4556 	struct hclge_vport *vport = hclge_get_vport(handle);
4557 	struct hclge_dev *hdev = vport->back;
4558 	int vector_id;
4559 
4560 	vector_id = hclge_get_vector_index(hdev, vector);
4561 	if (vector_id < 0) {
4562 		dev_err(&hdev->pdev->dev,
4563 			"Get vector index fail. vector_id =%d\n", vector_id);
4564 		return vector_id;
4565 	}
4566 
4567 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4568 }
4569 
4570 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4571 				       struct hnae3_ring_chain_node *ring_chain)
4572 {
4573 	struct hclge_vport *vport = hclge_get_vport(handle);
4574 	struct hclge_dev *hdev = vport->back;
4575 	int vector_id, ret;
4576 
4577 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4578 		return 0;
4579 
4580 	vector_id = hclge_get_vector_index(hdev, vector);
4581 	if (vector_id < 0) {
4582 		dev_err(&handle->pdev->dev,
4583 			"Get vector index fail. ret =%d\n", vector_id);
4584 		return vector_id;
4585 	}
4586 
4587 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4588 	if (ret)
4589 		dev_err(&handle->pdev->dev,
4590 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4591 			vector_id, ret);
4592 
4593 	return ret;
4594 }
4595 
4596 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4597 			       struct hclge_promisc_param *param)
4598 {
4599 	struct hclge_promisc_cfg_cmd *req;
4600 	struct hclge_desc desc;
4601 	int ret;
4602 
4603 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4604 
4605 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
4606 	req->vf_id = param->vf_id;
4607 
4608 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4609 	 * pdev revision(0x20), new revision support them. The
4610 	 * value of this two fields will not return error when driver
4611 	 * send command to fireware in revision(0x20).
4612 	 */
4613 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4614 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4615 
4616 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4617 	if (ret)
4618 		dev_err(&hdev->pdev->dev,
4619 			"Set promisc mode fail, status is %d.\n", ret);
4620 
4621 	return ret;
4622 }
4623 
4624 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4625 			      bool en_mc, bool en_bc, int vport_id)
4626 {
4627 	if (!param)
4628 		return;
4629 
4630 	memset(param, 0, sizeof(struct hclge_promisc_param));
4631 	if (en_uc)
4632 		param->enable = HCLGE_PROMISC_EN_UC;
4633 	if (en_mc)
4634 		param->enable |= HCLGE_PROMISC_EN_MC;
4635 	if (en_bc)
4636 		param->enable |= HCLGE_PROMISC_EN_BC;
4637 	param->vf_id = vport_id;
4638 }
4639 
4640 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4641 				  bool en_mc_pmc)
4642 {
4643 	struct hclge_vport *vport = hclge_get_vport(handle);
4644 	struct hclge_dev *hdev = vport->back;
4645 	struct hclge_promisc_param param;
4646 	bool en_bc_pmc = true;
4647 
4648 	/* For revision 0x20, if broadcast promisc enabled, vlan filter is
4649 	 * always bypassed. So broadcast promisc should be disabled until
4650 	 * user enable promisc mode
4651 	 */
4652 	if (handle->pdev->revision == 0x20)
4653 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4654 
4655 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4656 				 vport->vport_id);
4657 	return hclge_cmd_set_promisc_mode(hdev, &param);
4658 }
4659 
4660 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4661 {
4662 	struct hclge_get_fd_mode_cmd *req;
4663 	struct hclge_desc desc;
4664 	int ret;
4665 
4666 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4667 
4668 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
4669 
4670 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4671 	if (ret) {
4672 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4673 		return ret;
4674 	}
4675 
4676 	*fd_mode = req->mode;
4677 
4678 	return ret;
4679 }
4680 
4681 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4682 				   u32 *stage1_entry_num,
4683 				   u32 *stage2_entry_num,
4684 				   u16 *stage1_counter_num,
4685 				   u16 *stage2_counter_num)
4686 {
4687 	struct hclge_get_fd_allocation_cmd *req;
4688 	struct hclge_desc desc;
4689 	int ret;
4690 
4691 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4692 
4693 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4694 
4695 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4696 	if (ret) {
4697 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4698 			ret);
4699 		return ret;
4700 	}
4701 
4702 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4703 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4704 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4705 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4706 
4707 	return ret;
4708 }
4709 
4710 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4711 {
4712 	struct hclge_set_fd_key_config_cmd *req;
4713 	struct hclge_fd_key_cfg *stage;
4714 	struct hclge_desc desc;
4715 	int ret;
4716 
4717 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4718 
4719 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4720 	stage = &hdev->fd_cfg.key_cfg[stage_num];
4721 	req->stage = stage_num;
4722 	req->key_select = stage->key_sel;
4723 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4724 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4725 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4726 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4727 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4728 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4729 
4730 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4731 	if (ret)
4732 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4733 
4734 	return ret;
4735 }
4736 
4737 static int hclge_init_fd_config(struct hclge_dev *hdev)
4738 {
4739 #define LOW_2_WORDS		0x03
4740 	struct hclge_fd_key_cfg *key_cfg;
4741 	int ret;
4742 
4743 	if (!hnae3_dev_fd_supported(hdev))
4744 		return 0;
4745 
4746 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4747 	if (ret)
4748 		return ret;
4749 
4750 	switch (hdev->fd_cfg.fd_mode) {
4751 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4752 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4753 		break;
4754 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4755 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4756 		break;
4757 	default:
4758 		dev_err(&hdev->pdev->dev,
4759 			"Unsupported flow director mode %d\n",
4760 			hdev->fd_cfg.fd_mode);
4761 		return -EOPNOTSUPP;
4762 	}
4763 
4764 	hdev->fd_cfg.proto_support =
4765 		TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4766 		UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4767 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4768 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4769 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4770 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4771 	key_cfg->outer_sipv6_word_en = 0;
4772 	key_cfg->outer_dipv6_word_en = 0;
4773 
4774 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4775 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4776 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4777 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4778 
4779 	/* If use max 400bit key, we can support tuples for ether type */
4780 	if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4781 		hdev->fd_cfg.proto_support |= ETHER_FLOW;
4782 		key_cfg->tuple_active |=
4783 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4784 	}
4785 
4786 	/* roce_type is used to filter roce frames
4787 	 * dst_vport is used to specify the rule
4788 	 */
4789 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4790 
4791 	ret = hclge_get_fd_allocation(hdev,
4792 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4793 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4794 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4795 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4796 	if (ret)
4797 		return ret;
4798 
4799 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4800 }
4801 
4802 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4803 				int loc, u8 *key, bool is_add)
4804 {
4805 	struct hclge_fd_tcam_config_1_cmd *req1;
4806 	struct hclge_fd_tcam_config_2_cmd *req2;
4807 	struct hclge_fd_tcam_config_3_cmd *req3;
4808 	struct hclge_desc desc[3];
4809 	int ret;
4810 
4811 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4812 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4813 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4814 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4815 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4816 
4817 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4818 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4819 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4820 
4821 	req1->stage = stage;
4822 	req1->xy_sel = sel_x ? 1 : 0;
4823 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4824 	req1->index = cpu_to_le32(loc);
4825 	req1->entry_vld = sel_x ? is_add : 0;
4826 
4827 	if (key) {
4828 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4829 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4830 		       sizeof(req2->tcam_data));
4831 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4832 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4833 	}
4834 
4835 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
4836 	if (ret)
4837 		dev_err(&hdev->pdev->dev,
4838 			"config tcam key fail, ret=%d\n",
4839 			ret);
4840 
4841 	return ret;
4842 }
4843 
4844 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4845 			      struct hclge_fd_ad_data *action)
4846 {
4847 	struct hclge_fd_ad_config_cmd *req;
4848 	struct hclge_desc desc;
4849 	u64 ad_data = 0;
4850 	int ret;
4851 
4852 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4853 
4854 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
4855 	req->index = cpu_to_le32(loc);
4856 	req->stage = stage;
4857 
4858 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4859 		      action->write_rule_id_to_bd);
4860 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4861 			action->rule_id);
4862 	ad_data <<= 32;
4863 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4864 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4865 		      action->forward_to_direct_queue);
4866 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4867 			action->queue_id);
4868 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4869 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4870 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4871 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4872 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4873 			action->counter_id);
4874 
4875 	req->ad_data = cpu_to_le64(ad_data);
4876 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4877 	if (ret)
4878 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4879 
4880 	return ret;
4881 }
4882 
4883 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4884 				   struct hclge_fd_rule *rule)
4885 {
4886 	u16 tmp_x_s, tmp_y_s;
4887 	u32 tmp_x_l, tmp_y_l;
4888 	int i;
4889 
4890 	if (rule->unused_tuple & tuple_bit)
4891 		return true;
4892 
4893 	switch (tuple_bit) {
4894 	case 0:
4895 		return false;
4896 	case BIT(INNER_DST_MAC):
4897 		for (i = 0; i < ETH_ALEN; i++) {
4898 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4899 			       rule->tuples_mask.dst_mac[i]);
4900 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4901 			       rule->tuples_mask.dst_mac[i]);
4902 		}
4903 
4904 		return true;
4905 	case BIT(INNER_SRC_MAC):
4906 		for (i = 0; i < ETH_ALEN; i++) {
4907 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4908 			       rule->tuples.src_mac[i]);
4909 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4910 			       rule->tuples.src_mac[i]);
4911 		}
4912 
4913 		return true;
4914 	case BIT(INNER_VLAN_TAG_FST):
4915 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4916 		       rule->tuples_mask.vlan_tag1);
4917 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4918 		       rule->tuples_mask.vlan_tag1);
4919 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4920 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4921 
4922 		return true;
4923 	case BIT(INNER_ETH_TYPE):
4924 		calc_x(tmp_x_s, rule->tuples.ether_proto,
4925 		       rule->tuples_mask.ether_proto);
4926 		calc_y(tmp_y_s, rule->tuples.ether_proto,
4927 		       rule->tuples_mask.ether_proto);
4928 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4929 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4930 
4931 		return true;
4932 	case BIT(INNER_IP_TOS):
4933 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4934 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4935 
4936 		return true;
4937 	case BIT(INNER_IP_PROTO):
4938 		calc_x(*key_x, rule->tuples.ip_proto,
4939 		       rule->tuples_mask.ip_proto);
4940 		calc_y(*key_y, rule->tuples.ip_proto,
4941 		       rule->tuples_mask.ip_proto);
4942 
4943 		return true;
4944 	case BIT(INNER_SRC_IP):
4945 		calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4946 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
4947 		calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4948 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
4949 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4950 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4951 
4952 		return true;
4953 	case BIT(INNER_DST_IP):
4954 		calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4955 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
4956 		calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4957 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
4958 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4959 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4960 
4961 		return true;
4962 	case BIT(INNER_SRC_PORT):
4963 		calc_x(tmp_x_s, rule->tuples.src_port,
4964 		       rule->tuples_mask.src_port);
4965 		calc_y(tmp_y_s, rule->tuples.src_port,
4966 		       rule->tuples_mask.src_port);
4967 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4968 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4969 
4970 		return true;
4971 	case BIT(INNER_DST_PORT):
4972 		calc_x(tmp_x_s, rule->tuples.dst_port,
4973 		       rule->tuples_mask.dst_port);
4974 		calc_y(tmp_y_s, rule->tuples.dst_port,
4975 		       rule->tuples_mask.dst_port);
4976 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4977 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4978 
4979 		return true;
4980 	default:
4981 		return false;
4982 	}
4983 }
4984 
4985 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4986 				 u8 vf_id, u8 network_port_id)
4987 {
4988 	u32 port_number = 0;
4989 
4990 	if (port_type == HOST_PORT) {
4991 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4992 				pf_id);
4993 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4994 				vf_id);
4995 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4996 	} else {
4997 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4998 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
4999 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5000 	}
5001 
5002 	return port_number;
5003 }
5004 
5005 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5006 				       __le32 *key_x, __le32 *key_y,
5007 				       struct hclge_fd_rule *rule)
5008 {
5009 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5010 	u8 cur_pos = 0, tuple_size, shift_bits;
5011 	unsigned int i;
5012 
5013 	for (i = 0; i < MAX_META_DATA; i++) {
5014 		tuple_size = meta_data_key_info[i].key_length;
5015 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5016 
5017 		switch (tuple_bit) {
5018 		case BIT(ROCE_TYPE):
5019 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5020 			cur_pos += tuple_size;
5021 			break;
5022 		case BIT(DST_VPORT):
5023 			port_number = hclge_get_port_number(HOST_PORT, 0,
5024 							    rule->vf_id, 0);
5025 			hnae3_set_field(meta_data,
5026 					GENMASK(cur_pos + tuple_size, cur_pos),
5027 					cur_pos, port_number);
5028 			cur_pos += tuple_size;
5029 			break;
5030 		default:
5031 			break;
5032 		}
5033 	}
5034 
5035 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5036 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5037 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5038 
5039 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5040 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5041 }
5042 
5043 /* A complete key is combined with meta data key and tuple key.
5044  * Meta data key is stored at the MSB region, and tuple key is stored at
5045  * the LSB region, unused bits will be filled 0.
5046  */
5047 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5048 			    struct hclge_fd_rule *rule)
5049 {
5050 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5051 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5052 	u8 *cur_key_x, *cur_key_y;
5053 	unsigned int i;
5054 	int ret, tuple_size;
5055 	u8 meta_data_region;
5056 
5057 	memset(key_x, 0, sizeof(key_x));
5058 	memset(key_y, 0, sizeof(key_y));
5059 	cur_key_x = key_x;
5060 	cur_key_y = key_y;
5061 
5062 	for (i = 0 ; i < MAX_TUPLE; i++) {
5063 		bool tuple_valid;
5064 		u32 check_tuple;
5065 
5066 		tuple_size = tuple_key_info[i].key_length / 8;
5067 		check_tuple = key_cfg->tuple_active & BIT(i);
5068 
5069 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5070 						     cur_key_y, rule);
5071 		if (tuple_valid) {
5072 			cur_key_x += tuple_size;
5073 			cur_key_y += tuple_size;
5074 		}
5075 	}
5076 
5077 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5078 			MAX_META_DATA_LENGTH / 8;
5079 
5080 	hclge_fd_convert_meta_data(key_cfg,
5081 				   (__le32 *)(key_x + meta_data_region),
5082 				   (__le32 *)(key_y + meta_data_region),
5083 				   rule);
5084 
5085 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5086 				   true);
5087 	if (ret) {
5088 		dev_err(&hdev->pdev->dev,
5089 			"fd key_y config fail, loc=%d, ret=%d\n",
5090 			rule->queue_id, ret);
5091 		return ret;
5092 	}
5093 
5094 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5095 				   true);
5096 	if (ret)
5097 		dev_err(&hdev->pdev->dev,
5098 			"fd key_x config fail, loc=%d, ret=%d\n",
5099 			rule->queue_id, ret);
5100 	return ret;
5101 }
5102 
5103 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5104 			       struct hclge_fd_rule *rule)
5105 {
5106 	struct hclge_fd_ad_data ad_data;
5107 
5108 	ad_data.ad_id = rule->location;
5109 
5110 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5111 		ad_data.drop_packet = true;
5112 		ad_data.forward_to_direct_queue = false;
5113 		ad_data.queue_id = 0;
5114 	} else {
5115 		ad_data.drop_packet = false;
5116 		ad_data.forward_to_direct_queue = true;
5117 		ad_data.queue_id = rule->queue_id;
5118 	}
5119 
5120 	ad_data.use_counter = false;
5121 	ad_data.counter_id = 0;
5122 
5123 	ad_data.use_next_stage = false;
5124 	ad_data.next_input_key = 0;
5125 
5126 	ad_data.write_rule_id_to_bd = true;
5127 	ad_data.rule_id = rule->location;
5128 
5129 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5130 }
5131 
5132 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5133 			       struct ethtool_rx_flow_spec *fs, u32 *unused)
5134 {
5135 	struct ethtool_tcpip4_spec *tcp_ip4_spec;
5136 	struct ethtool_usrip4_spec *usr_ip4_spec;
5137 	struct ethtool_tcpip6_spec *tcp_ip6_spec;
5138 	struct ethtool_usrip6_spec *usr_ip6_spec;
5139 	struct ethhdr *ether_spec;
5140 
5141 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5142 		return -EINVAL;
5143 
5144 	if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5145 		return -EOPNOTSUPP;
5146 
5147 	if ((fs->flow_type & FLOW_EXT) &&
5148 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5149 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5150 		return -EOPNOTSUPP;
5151 	}
5152 
5153 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5154 	case SCTP_V4_FLOW:
5155 	case TCP_V4_FLOW:
5156 	case UDP_V4_FLOW:
5157 		tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5158 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5159 
5160 		if (!tcp_ip4_spec->ip4src)
5161 			*unused |= BIT(INNER_SRC_IP);
5162 
5163 		if (!tcp_ip4_spec->ip4dst)
5164 			*unused |= BIT(INNER_DST_IP);
5165 
5166 		if (!tcp_ip4_spec->psrc)
5167 			*unused |= BIT(INNER_SRC_PORT);
5168 
5169 		if (!tcp_ip4_spec->pdst)
5170 			*unused |= BIT(INNER_DST_PORT);
5171 
5172 		if (!tcp_ip4_spec->tos)
5173 			*unused |= BIT(INNER_IP_TOS);
5174 
5175 		break;
5176 	case IP_USER_FLOW:
5177 		usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5178 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5179 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5180 
5181 		if (!usr_ip4_spec->ip4src)
5182 			*unused |= BIT(INNER_SRC_IP);
5183 
5184 		if (!usr_ip4_spec->ip4dst)
5185 			*unused |= BIT(INNER_DST_IP);
5186 
5187 		if (!usr_ip4_spec->tos)
5188 			*unused |= BIT(INNER_IP_TOS);
5189 
5190 		if (!usr_ip4_spec->proto)
5191 			*unused |= BIT(INNER_IP_PROTO);
5192 
5193 		if (usr_ip4_spec->l4_4_bytes)
5194 			return -EOPNOTSUPP;
5195 
5196 		if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5197 			return -EOPNOTSUPP;
5198 
5199 		break;
5200 	case SCTP_V6_FLOW:
5201 	case TCP_V6_FLOW:
5202 	case UDP_V6_FLOW:
5203 		tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5204 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5205 			BIT(INNER_IP_TOS);
5206 
5207 		/* check whether src/dst ip address used */
5208 		if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5209 		    !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5210 			*unused |= BIT(INNER_SRC_IP);
5211 
5212 		if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5213 		    !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5214 			*unused |= BIT(INNER_DST_IP);
5215 
5216 		if (!tcp_ip6_spec->psrc)
5217 			*unused |= BIT(INNER_SRC_PORT);
5218 
5219 		if (!tcp_ip6_spec->pdst)
5220 			*unused |= BIT(INNER_DST_PORT);
5221 
5222 		if (tcp_ip6_spec->tclass)
5223 			return -EOPNOTSUPP;
5224 
5225 		break;
5226 	case IPV6_USER_FLOW:
5227 		usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5228 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5229 			BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5230 			BIT(INNER_DST_PORT);
5231 
5232 		/* check whether src/dst ip address used */
5233 		if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5234 		    !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5235 			*unused |= BIT(INNER_SRC_IP);
5236 
5237 		if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5238 		    !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5239 			*unused |= BIT(INNER_DST_IP);
5240 
5241 		if (!usr_ip6_spec->l4_proto)
5242 			*unused |= BIT(INNER_IP_PROTO);
5243 
5244 		if (usr_ip6_spec->tclass)
5245 			return -EOPNOTSUPP;
5246 
5247 		if (usr_ip6_spec->l4_4_bytes)
5248 			return -EOPNOTSUPP;
5249 
5250 		break;
5251 	case ETHER_FLOW:
5252 		ether_spec = &fs->h_u.ether_spec;
5253 		*unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5254 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5255 			BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5256 
5257 		if (is_zero_ether_addr(ether_spec->h_source))
5258 			*unused |= BIT(INNER_SRC_MAC);
5259 
5260 		if (is_zero_ether_addr(ether_spec->h_dest))
5261 			*unused |= BIT(INNER_DST_MAC);
5262 
5263 		if (!ether_spec->h_proto)
5264 			*unused |= BIT(INNER_ETH_TYPE);
5265 
5266 		break;
5267 	default:
5268 		return -EOPNOTSUPP;
5269 	}
5270 
5271 	if ((fs->flow_type & FLOW_EXT)) {
5272 		if (fs->h_ext.vlan_etype)
5273 			return -EOPNOTSUPP;
5274 		if (!fs->h_ext.vlan_tci)
5275 			*unused |= BIT(INNER_VLAN_TAG_FST);
5276 
5277 		if (fs->m_ext.vlan_tci) {
5278 			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5279 				return -EINVAL;
5280 		}
5281 	} else {
5282 		*unused |= BIT(INNER_VLAN_TAG_FST);
5283 	}
5284 
5285 	if (fs->flow_type & FLOW_MAC_EXT) {
5286 		if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5287 			return -EOPNOTSUPP;
5288 
5289 		if (is_zero_ether_addr(fs->h_ext.h_dest))
5290 			*unused |= BIT(INNER_DST_MAC);
5291 		else
5292 			*unused &= ~(BIT(INNER_DST_MAC));
5293 	}
5294 
5295 	return 0;
5296 }
5297 
5298 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5299 {
5300 	struct hclge_fd_rule *rule = NULL;
5301 	struct hlist_node *node2;
5302 
5303 	spin_lock_bh(&hdev->fd_rule_lock);
5304 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5305 		if (rule->location >= location)
5306 			break;
5307 	}
5308 
5309 	spin_unlock_bh(&hdev->fd_rule_lock);
5310 
5311 	return  rule && rule->location == location;
5312 }
5313 
5314 /* make sure being called after lock up with fd_rule_lock */
5315 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5316 				     struct hclge_fd_rule *new_rule,
5317 				     u16 location,
5318 				     bool is_add)
5319 {
5320 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
5321 	struct hlist_node *node2;
5322 
5323 	if (is_add && !new_rule)
5324 		return -EINVAL;
5325 
5326 	hlist_for_each_entry_safe(rule, node2,
5327 				  &hdev->fd_rule_list, rule_node) {
5328 		if (rule->location >= location)
5329 			break;
5330 		parent = rule;
5331 	}
5332 
5333 	if (rule && rule->location == location) {
5334 		hlist_del(&rule->rule_node);
5335 		kfree(rule);
5336 		hdev->hclge_fd_rule_num--;
5337 
5338 		if (!is_add) {
5339 			if (!hdev->hclge_fd_rule_num)
5340 				hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5341 			clear_bit(location, hdev->fd_bmap);
5342 
5343 			return 0;
5344 		}
5345 	} else if (!is_add) {
5346 		dev_err(&hdev->pdev->dev,
5347 			"delete fail, rule %d is inexistent\n",
5348 			location);
5349 		return -EINVAL;
5350 	}
5351 
5352 	INIT_HLIST_NODE(&new_rule->rule_node);
5353 
5354 	if (parent)
5355 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5356 	else
5357 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5358 
5359 	set_bit(location, hdev->fd_bmap);
5360 	hdev->hclge_fd_rule_num++;
5361 	hdev->fd_active_type = new_rule->rule_type;
5362 
5363 	return 0;
5364 }
5365 
5366 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5367 			      struct ethtool_rx_flow_spec *fs,
5368 			      struct hclge_fd_rule *rule)
5369 {
5370 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5371 
5372 	switch (flow_type) {
5373 	case SCTP_V4_FLOW:
5374 	case TCP_V4_FLOW:
5375 	case UDP_V4_FLOW:
5376 		rule->tuples.src_ip[IPV4_INDEX] =
5377 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5378 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5379 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5380 
5381 		rule->tuples.dst_ip[IPV4_INDEX] =
5382 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5383 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5384 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5385 
5386 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5387 		rule->tuples_mask.src_port =
5388 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5389 
5390 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5391 		rule->tuples_mask.dst_port =
5392 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5393 
5394 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5395 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5396 
5397 		rule->tuples.ether_proto = ETH_P_IP;
5398 		rule->tuples_mask.ether_proto = 0xFFFF;
5399 
5400 		break;
5401 	case IP_USER_FLOW:
5402 		rule->tuples.src_ip[IPV4_INDEX] =
5403 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5404 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5405 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5406 
5407 		rule->tuples.dst_ip[IPV4_INDEX] =
5408 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5409 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5410 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5411 
5412 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5413 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5414 
5415 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5416 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5417 
5418 		rule->tuples.ether_proto = ETH_P_IP;
5419 		rule->tuples_mask.ether_proto = 0xFFFF;
5420 
5421 		break;
5422 	case SCTP_V6_FLOW:
5423 	case TCP_V6_FLOW:
5424 	case UDP_V6_FLOW:
5425 		be32_to_cpu_array(rule->tuples.src_ip,
5426 				  fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5427 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5428 				  fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5429 
5430 		be32_to_cpu_array(rule->tuples.dst_ip,
5431 				  fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5432 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5433 				  fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5434 
5435 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5436 		rule->tuples_mask.src_port =
5437 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5438 
5439 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5440 		rule->tuples_mask.dst_port =
5441 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5442 
5443 		rule->tuples.ether_proto = ETH_P_IPV6;
5444 		rule->tuples_mask.ether_proto = 0xFFFF;
5445 
5446 		break;
5447 	case IPV6_USER_FLOW:
5448 		be32_to_cpu_array(rule->tuples.src_ip,
5449 				  fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5450 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5451 				  fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5452 
5453 		be32_to_cpu_array(rule->tuples.dst_ip,
5454 				  fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5455 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5456 				  fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5457 
5458 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5459 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5460 
5461 		rule->tuples.ether_proto = ETH_P_IPV6;
5462 		rule->tuples_mask.ether_proto = 0xFFFF;
5463 
5464 		break;
5465 	case ETHER_FLOW:
5466 		ether_addr_copy(rule->tuples.src_mac,
5467 				fs->h_u.ether_spec.h_source);
5468 		ether_addr_copy(rule->tuples_mask.src_mac,
5469 				fs->m_u.ether_spec.h_source);
5470 
5471 		ether_addr_copy(rule->tuples.dst_mac,
5472 				fs->h_u.ether_spec.h_dest);
5473 		ether_addr_copy(rule->tuples_mask.dst_mac,
5474 				fs->m_u.ether_spec.h_dest);
5475 
5476 		rule->tuples.ether_proto =
5477 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
5478 		rule->tuples_mask.ether_proto =
5479 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
5480 
5481 		break;
5482 	default:
5483 		return -EOPNOTSUPP;
5484 	}
5485 
5486 	switch (flow_type) {
5487 	case SCTP_V4_FLOW:
5488 	case SCTP_V6_FLOW:
5489 		rule->tuples.ip_proto = IPPROTO_SCTP;
5490 		rule->tuples_mask.ip_proto = 0xFF;
5491 		break;
5492 	case TCP_V4_FLOW:
5493 	case TCP_V6_FLOW:
5494 		rule->tuples.ip_proto = IPPROTO_TCP;
5495 		rule->tuples_mask.ip_proto = 0xFF;
5496 		break;
5497 	case UDP_V4_FLOW:
5498 	case UDP_V6_FLOW:
5499 		rule->tuples.ip_proto = IPPROTO_UDP;
5500 		rule->tuples_mask.ip_proto = 0xFF;
5501 		break;
5502 	default:
5503 		break;
5504 	}
5505 
5506 	if ((fs->flow_type & FLOW_EXT)) {
5507 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5508 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5509 	}
5510 
5511 	if (fs->flow_type & FLOW_MAC_EXT) {
5512 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5513 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5514 	}
5515 
5516 	return 0;
5517 }
5518 
5519 /* make sure being called after lock up with fd_rule_lock */
5520 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5521 				struct hclge_fd_rule *rule)
5522 {
5523 	int ret;
5524 
5525 	if (!rule) {
5526 		dev_err(&hdev->pdev->dev,
5527 			"The flow director rule is NULL\n");
5528 		return -EINVAL;
5529 	}
5530 
5531 	/* it will never fail here, so needn't to check return value */
5532 	hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5533 
5534 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5535 	if (ret)
5536 		goto clear_rule;
5537 
5538 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5539 	if (ret)
5540 		goto clear_rule;
5541 
5542 	return 0;
5543 
5544 clear_rule:
5545 	hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5546 	return ret;
5547 }
5548 
5549 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5550 			      struct ethtool_rxnfc *cmd)
5551 {
5552 	struct hclge_vport *vport = hclge_get_vport(handle);
5553 	struct hclge_dev *hdev = vport->back;
5554 	u16 dst_vport_id = 0, q_index = 0;
5555 	struct ethtool_rx_flow_spec *fs;
5556 	struct hclge_fd_rule *rule;
5557 	u32 unused = 0;
5558 	u8 action;
5559 	int ret;
5560 
5561 	if (!hnae3_dev_fd_supported(hdev))
5562 		return -EOPNOTSUPP;
5563 
5564 	if (!hdev->fd_en) {
5565 		dev_warn(&hdev->pdev->dev,
5566 			 "Please enable flow director first\n");
5567 		return -EOPNOTSUPP;
5568 	}
5569 
5570 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5571 
5572 	ret = hclge_fd_check_spec(hdev, fs, &unused);
5573 	if (ret) {
5574 		dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5575 		return ret;
5576 	}
5577 
5578 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5579 		action = HCLGE_FD_ACTION_DROP_PACKET;
5580 	} else {
5581 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5582 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5583 		u16 tqps;
5584 
5585 		if (vf > hdev->num_req_vfs) {
5586 			dev_err(&hdev->pdev->dev,
5587 				"Error: vf id (%d) > max vf num (%d)\n",
5588 				vf, hdev->num_req_vfs);
5589 			return -EINVAL;
5590 		}
5591 
5592 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5593 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5594 
5595 		if (ring >= tqps) {
5596 			dev_err(&hdev->pdev->dev,
5597 				"Error: queue id (%d) > max tqp num (%d)\n",
5598 				ring, tqps - 1);
5599 			return -EINVAL;
5600 		}
5601 
5602 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5603 		q_index = ring;
5604 	}
5605 
5606 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5607 	if (!rule)
5608 		return -ENOMEM;
5609 
5610 	ret = hclge_fd_get_tuple(hdev, fs, rule);
5611 	if (ret) {
5612 		kfree(rule);
5613 		return ret;
5614 	}
5615 
5616 	rule->flow_type = fs->flow_type;
5617 
5618 	rule->location = fs->location;
5619 	rule->unused_tuple = unused;
5620 	rule->vf_id = dst_vport_id;
5621 	rule->queue_id = q_index;
5622 	rule->action = action;
5623 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
5624 
5625 	/* to avoid rule conflict, when user configure rule by ethtool,
5626 	 * we need to clear all arfs rules
5627 	 */
5628 	hclge_clear_arfs_rules(handle);
5629 
5630 	spin_lock_bh(&hdev->fd_rule_lock);
5631 	ret = hclge_fd_config_rule(hdev, rule);
5632 
5633 	spin_unlock_bh(&hdev->fd_rule_lock);
5634 
5635 	return ret;
5636 }
5637 
5638 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5639 			      struct ethtool_rxnfc *cmd)
5640 {
5641 	struct hclge_vport *vport = hclge_get_vport(handle);
5642 	struct hclge_dev *hdev = vport->back;
5643 	struct ethtool_rx_flow_spec *fs;
5644 	int ret;
5645 
5646 	if (!hnae3_dev_fd_supported(hdev))
5647 		return -EOPNOTSUPP;
5648 
5649 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5650 
5651 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5652 		return -EINVAL;
5653 
5654 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
5655 		dev_err(&hdev->pdev->dev,
5656 			"Delete fail, rule %d is inexistent\n", fs->location);
5657 		return -ENOENT;
5658 	}
5659 
5660 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5661 				   NULL, false);
5662 	if (ret)
5663 		return ret;
5664 
5665 	spin_lock_bh(&hdev->fd_rule_lock);
5666 	ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5667 
5668 	spin_unlock_bh(&hdev->fd_rule_lock);
5669 
5670 	return ret;
5671 }
5672 
5673 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5674 				     bool clear_list)
5675 {
5676 	struct hclge_vport *vport = hclge_get_vport(handle);
5677 	struct hclge_dev *hdev = vport->back;
5678 	struct hclge_fd_rule *rule;
5679 	struct hlist_node *node;
5680 	u16 location;
5681 
5682 	if (!hnae3_dev_fd_supported(hdev))
5683 		return;
5684 
5685 	spin_lock_bh(&hdev->fd_rule_lock);
5686 	for_each_set_bit(location, hdev->fd_bmap,
5687 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5688 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5689 				     NULL, false);
5690 
5691 	if (clear_list) {
5692 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5693 					  rule_node) {
5694 			hlist_del(&rule->rule_node);
5695 			kfree(rule);
5696 		}
5697 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5698 		hdev->hclge_fd_rule_num = 0;
5699 		bitmap_zero(hdev->fd_bmap,
5700 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5701 	}
5702 
5703 	spin_unlock_bh(&hdev->fd_rule_lock);
5704 }
5705 
5706 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5707 {
5708 	struct hclge_vport *vport = hclge_get_vport(handle);
5709 	struct hclge_dev *hdev = vport->back;
5710 	struct hclge_fd_rule *rule;
5711 	struct hlist_node *node;
5712 	int ret;
5713 
5714 	/* Return ok here, because reset error handling will check this
5715 	 * return value. If error is returned here, the reset process will
5716 	 * fail.
5717 	 */
5718 	if (!hnae3_dev_fd_supported(hdev))
5719 		return 0;
5720 
5721 	/* if fd is disabled, should not restore it when reset */
5722 	if (!hdev->fd_en)
5723 		return 0;
5724 
5725 	spin_lock_bh(&hdev->fd_rule_lock);
5726 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5727 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5728 		if (!ret)
5729 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5730 
5731 		if (ret) {
5732 			dev_warn(&hdev->pdev->dev,
5733 				 "Restore rule %d failed, remove it\n",
5734 				 rule->location);
5735 			clear_bit(rule->location, hdev->fd_bmap);
5736 			hlist_del(&rule->rule_node);
5737 			kfree(rule);
5738 			hdev->hclge_fd_rule_num--;
5739 		}
5740 	}
5741 
5742 	if (hdev->hclge_fd_rule_num)
5743 		hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5744 
5745 	spin_unlock_bh(&hdev->fd_rule_lock);
5746 
5747 	return 0;
5748 }
5749 
5750 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5751 				 struct ethtool_rxnfc *cmd)
5752 {
5753 	struct hclge_vport *vport = hclge_get_vport(handle);
5754 	struct hclge_dev *hdev = vport->back;
5755 
5756 	if (!hnae3_dev_fd_supported(hdev))
5757 		return -EOPNOTSUPP;
5758 
5759 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
5760 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5761 
5762 	return 0;
5763 }
5764 
5765 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5766 				  struct ethtool_rxnfc *cmd)
5767 {
5768 	struct hclge_vport *vport = hclge_get_vport(handle);
5769 	struct hclge_fd_rule *rule = NULL;
5770 	struct hclge_dev *hdev = vport->back;
5771 	struct ethtool_rx_flow_spec *fs;
5772 	struct hlist_node *node2;
5773 
5774 	if (!hnae3_dev_fd_supported(hdev))
5775 		return -EOPNOTSUPP;
5776 
5777 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5778 
5779 	spin_lock_bh(&hdev->fd_rule_lock);
5780 
5781 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5782 		if (rule->location >= fs->location)
5783 			break;
5784 	}
5785 
5786 	if (!rule || fs->location != rule->location) {
5787 		spin_unlock_bh(&hdev->fd_rule_lock);
5788 
5789 		return -ENOENT;
5790 	}
5791 
5792 	fs->flow_type = rule->flow_type;
5793 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5794 	case SCTP_V4_FLOW:
5795 	case TCP_V4_FLOW:
5796 	case UDP_V4_FLOW:
5797 		fs->h_u.tcp_ip4_spec.ip4src =
5798 				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5799 		fs->m_u.tcp_ip4_spec.ip4src =
5800 			rule->unused_tuple & BIT(INNER_SRC_IP) ?
5801 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5802 
5803 		fs->h_u.tcp_ip4_spec.ip4dst =
5804 				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5805 		fs->m_u.tcp_ip4_spec.ip4dst =
5806 			rule->unused_tuple & BIT(INNER_DST_IP) ?
5807 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5808 
5809 		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5810 		fs->m_u.tcp_ip4_spec.psrc =
5811 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5812 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5813 
5814 		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5815 		fs->m_u.tcp_ip4_spec.pdst =
5816 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5817 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5818 
5819 		fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5820 		fs->m_u.tcp_ip4_spec.tos =
5821 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5822 				0 : rule->tuples_mask.ip_tos;
5823 
5824 		break;
5825 	case IP_USER_FLOW:
5826 		fs->h_u.usr_ip4_spec.ip4src =
5827 				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5828 		fs->m_u.tcp_ip4_spec.ip4src =
5829 			rule->unused_tuple & BIT(INNER_SRC_IP) ?
5830 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5831 
5832 		fs->h_u.usr_ip4_spec.ip4dst =
5833 				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5834 		fs->m_u.usr_ip4_spec.ip4dst =
5835 			rule->unused_tuple & BIT(INNER_DST_IP) ?
5836 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5837 
5838 		fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5839 		fs->m_u.usr_ip4_spec.tos =
5840 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5841 				0 : rule->tuples_mask.ip_tos;
5842 
5843 		fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5844 		fs->m_u.usr_ip4_spec.proto =
5845 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5846 				0 : rule->tuples_mask.ip_proto;
5847 
5848 		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5849 
5850 		break;
5851 	case SCTP_V6_FLOW:
5852 	case TCP_V6_FLOW:
5853 	case UDP_V6_FLOW:
5854 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5855 				  rule->tuples.src_ip, IPV6_SIZE);
5856 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5857 			memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5858 			       sizeof(int) * IPV6_SIZE);
5859 		else
5860 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5861 					  rule->tuples_mask.src_ip, IPV6_SIZE);
5862 
5863 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5864 				  rule->tuples.dst_ip, IPV6_SIZE);
5865 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5866 			memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5867 			       sizeof(int) * IPV6_SIZE);
5868 		else
5869 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5870 					  rule->tuples_mask.dst_ip, IPV6_SIZE);
5871 
5872 		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5873 		fs->m_u.tcp_ip6_spec.psrc =
5874 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5875 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5876 
5877 		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5878 		fs->m_u.tcp_ip6_spec.pdst =
5879 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5880 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5881 
5882 		break;
5883 	case IPV6_USER_FLOW:
5884 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5885 				  rule->tuples.src_ip, IPV6_SIZE);
5886 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5887 			memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5888 			       sizeof(int) * IPV6_SIZE);
5889 		else
5890 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5891 					  rule->tuples_mask.src_ip, IPV6_SIZE);
5892 
5893 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5894 				  rule->tuples.dst_ip, IPV6_SIZE);
5895 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5896 			memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5897 			       sizeof(int) * IPV6_SIZE);
5898 		else
5899 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5900 					  rule->tuples_mask.dst_ip, IPV6_SIZE);
5901 
5902 		fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5903 		fs->m_u.usr_ip6_spec.l4_proto =
5904 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5905 				0 : rule->tuples_mask.ip_proto;
5906 
5907 		break;
5908 	case ETHER_FLOW:
5909 		ether_addr_copy(fs->h_u.ether_spec.h_source,
5910 				rule->tuples.src_mac);
5911 		if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5912 			eth_zero_addr(fs->m_u.ether_spec.h_source);
5913 		else
5914 			ether_addr_copy(fs->m_u.ether_spec.h_source,
5915 					rule->tuples_mask.src_mac);
5916 
5917 		ether_addr_copy(fs->h_u.ether_spec.h_dest,
5918 				rule->tuples.dst_mac);
5919 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5920 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5921 		else
5922 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5923 					rule->tuples_mask.dst_mac);
5924 
5925 		fs->h_u.ether_spec.h_proto =
5926 				cpu_to_be16(rule->tuples.ether_proto);
5927 		fs->m_u.ether_spec.h_proto =
5928 				rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5929 				0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5930 
5931 		break;
5932 	default:
5933 		spin_unlock_bh(&hdev->fd_rule_lock);
5934 		return -EOPNOTSUPP;
5935 	}
5936 
5937 	if (fs->flow_type & FLOW_EXT) {
5938 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5939 		fs->m_ext.vlan_tci =
5940 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5941 				cpu_to_be16(VLAN_VID_MASK) :
5942 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
5943 	}
5944 
5945 	if (fs->flow_type & FLOW_MAC_EXT) {
5946 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5947 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5948 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5949 		else
5950 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5951 					rule->tuples_mask.dst_mac);
5952 	}
5953 
5954 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5955 		fs->ring_cookie = RX_CLS_FLOW_DISC;
5956 	} else {
5957 		u64 vf_id;
5958 
5959 		fs->ring_cookie = rule->queue_id;
5960 		vf_id = rule->vf_id;
5961 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5962 		fs->ring_cookie |= vf_id;
5963 	}
5964 
5965 	spin_unlock_bh(&hdev->fd_rule_lock);
5966 
5967 	return 0;
5968 }
5969 
5970 static int hclge_get_all_rules(struct hnae3_handle *handle,
5971 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
5972 {
5973 	struct hclge_vport *vport = hclge_get_vport(handle);
5974 	struct hclge_dev *hdev = vport->back;
5975 	struct hclge_fd_rule *rule;
5976 	struct hlist_node *node2;
5977 	int cnt = 0;
5978 
5979 	if (!hnae3_dev_fd_supported(hdev))
5980 		return -EOPNOTSUPP;
5981 
5982 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5983 
5984 	spin_lock_bh(&hdev->fd_rule_lock);
5985 	hlist_for_each_entry_safe(rule, node2,
5986 				  &hdev->fd_rule_list, rule_node) {
5987 		if (cnt == cmd->rule_cnt) {
5988 			spin_unlock_bh(&hdev->fd_rule_lock);
5989 			return -EMSGSIZE;
5990 		}
5991 
5992 		rule_locs[cnt] = rule->location;
5993 		cnt++;
5994 	}
5995 
5996 	spin_unlock_bh(&hdev->fd_rule_lock);
5997 
5998 	cmd->rule_cnt = cnt;
5999 
6000 	return 0;
6001 }
6002 
6003 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6004 				     struct hclge_fd_rule_tuples *tuples)
6005 {
6006 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6007 	tuples->ip_proto = fkeys->basic.ip_proto;
6008 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6009 
6010 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6011 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6012 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6013 	} else {
6014 		memcpy(tuples->src_ip,
6015 		       fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
6016 		       sizeof(tuples->src_ip));
6017 		memcpy(tuples->dst_ip,
6018 		       fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
6019 		       sizeof(tuples->dst_ip));
6020 	}
6021 }
6022 
6023 /* traverse all rules, check whether an existed rule has the same tuples */
6024 static struct hclge_fd_rule *
6025 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6026 			  const struct hclge_fd_rule_tuples *tuples)
6027 {
6028 	struct hclge_fd_rule *rule = NULL;
6029 	struct hlist_node *node;
6030 
6031 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6032 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6033 			return rule;
6034 	}
6035 
6036 	return NULL;
6037 }
6038 
6039 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6040 				     struct hclge_fd_rule *rule)
6041 {
6042 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6043 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6044 			     BIT(INNER_SRC_PORT);
6045 	rule->action = 0;
6046 	rule->vf_id = 0;
6047 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6048 	if (tuples->ether_proto == ETH_P_IP) {
6049 		if (tuples->ip_proto == IPPROTO_TCP)
6050 			rule->flow_type = TCP_V4_FLOW;
6051 		else
6052 			rule->flow_type = UDP_V4_FLOW;
6053 	} else {
6054 		if (tuples->ip_proto == IPPROTO_TCP)
6055 			rule->flow_type = TCP_V6_FLOW;
6056 		else
6057 			rule->flow_type = UDP_V6_FLOW;
6058 	}
6059 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6060 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6061 }
6062 
6063 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6064 				      u16 flow_id, struct flow_keys *fkeys)
6065 {
6066 	struct hclge_vport *vport = hclge_get_vport(handle);
6067 	struct hclge_fd_rule_tuples new_tuples;
6068 	struct hclge_dev *hdev = vport->back;
6069 	struct hclge_fd_rule *rule;
6070 	u16 tmp_queue_id;
6071 	u16 bit_id;
6072 	int ret;
6073 
6074 	if (!hnae3_dev_fd_supported(hdev))
6075 		return -EOPNOTSUPP;
6076 
6077 	memset(&new_tuples, 0, sizeof(new_tuples));
6078 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6079 
6080 	spin_lock_bh(&hdev->fd_rule_lock);
6081 
6082 	/* when there is already fd rule existed add by user,
6083 	 * arfs should not work
6084 	 */
6085 	if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6086 		spin_unlock_bh(&hdev->fd_rule_lock);
6087 
6088 		return -EOPNOTSUPP;
6089 	}
6090 
6091 	/* check is there flow director filter existed for this flow,
6092 	 * if not, create a new filter for it;
6093 	 * if filter exist with different queue id, modify the filter;
6094 	 * if filter exist with same queue id, do nothing
6095 	 */
6096 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6097 	if (!rule) {
6098 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6099 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6100 			spin_unlock_bh(&hdev->fd_rule_lock);
6101 
6102 			return -ENOSPC;
6103 		}
6104 
6105 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6106 		if (!rule) {
6107 			spin_unlock_bh(&hdev->fd_rule_lock);
6108 
6109 			return -ENOMEM;
6110 		}
6111 
6112 		set_bit(bit_id, hdev->fd_bmap);
6113 		rule->location = bit_id;
6114 		rule->flow_id = flow_id;
6115 		rule->queue_id = queue_id;
6116 		hclge_fd_build_arfs_rule(&new_tuples, rule);
6117 		ret = hclge_fd_config_rule(hdev, rule);
6118 
6119 		spin_unlock_bh(&hdev->fd_rule_lock);
6120 
6121 		if (ret)
6122 			return ret;
6123 
6124 		return rule->location;
6125 	}
6126 
6127 	spin_unlock_bh(&hdev->fd_rule_lock);
6128 
6129 	if (rule->queue_id == queue_id)
6130 		return rule->location;
6131 
6132 	tmp_queue_id = rule->queue_id;
6133 	rule->queue_id = queue_id;
6134 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6135 	if (ret) {
6136 		rule->queue_id = tmp_queue_id;
6137 		return ret;
6138 	}
6139 
6140 	return rule->location;
6141 }
6142 
6143 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6144 {
6145 #ifdef CONFIG_RFS_ACCEL
6146 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6147 	struct hclge_fd_rule *rule;
6148 	struct hlist_node *node;
6149 	HLIST_HEAD(del_list);
6150 
6151 	spin_lock_bh(&hdev->fd_rule_lock);
6152 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6153 		spin_unlock_bh(&hdev->fd_rule_lock);
6154 		return;
6155 	}
6156 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6157 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6158 					rule->flow_id, rule->location)) {
6159 			hlist_del_init(&rule->rule_node);
6160 			hlist_add_head(&rule->rule_node, &del_list);
6161 			hdev->hclge_fd_rule_num--;
6162 			clear_bit(rule->location, hdev->fd_bmap);
6163 		}
6164 	}
6165 	spin_unlock_bh(&hdev->fd_rule_lock);
6166 
6167 	hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6168 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6169 				     rule->location, NULL, false);
6170 		kfree(rule);
6171 	}
6172 #endif
6173 }
6174 
6175 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6176 {
6177 #ifdef CONFIG_RFS_ACCEL
6178 	struct hclge_vport *vport = hclge_get_vport(handle);
6179 	struct hclge_dev *hdev = vport->back;
6180 
6181 	if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6182 		hclge_del_all_fd_entries(handle, true);
6183 #endif
6184 }
6185 
6186 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6187 {
6188 	struct hclge_vport *vport = hclge_get_vport(handle);
6189 	struct hclge_dev *hdev = vport->back;
6190 
6191 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6192 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6193 }
6194 
6195 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6196 {
6197 	struct hclge_vport *vport = hclge_get_vport(handle);
6198 	struct hclge_dev *hdev = vport->back;
6199 
6200 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6201 }
6202 
6203 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6204 {
6205 	struct hclge_vport *vport = hclge_get_vport(handle);
6206 	struct hclge_dev *hdev = vport->back;
6207 
6208 	return hdev->rst_stats.hw_reset_done_cnt;
6209 }
6210 
6211 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6212 {
6213 	struct hclge_vport *vport = hclge_get_vport(handle);
6214 	struct hclge_dev *hdev = vport->back;
6215 	bool clear;
6216 
6217 	hdev->fd_en = enable;
6218 	clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6219 	if (!enable)
6220 		hclge_del_all_fd_entries(handle, clear);
6221 	else
6222 		hclge_restore_fd_entries(handle);
6223 }
6224 
6225 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6226 {
6227 	struct hclge_desc desc;
6228 	struct hclge_config_mac_mode_cmd *req =
6229 		(struct hclge_config_mac_mode_cmd *)desc.data;
6230 	u32 loop_en = 0;
6231 	int ret;
6232 
6233 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6234 
6235 	if (enable) {
6236 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6237 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6238 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6239 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6240 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6241 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6242 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6243 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6244 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6245 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6246 	}
6247 
6248 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6249 
6250 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6251 	if (ret)
6252 		dev_err(&hdev->pdev->dev,
6253 			"mac enable fail, ret =%d.\n", ret);
6254 }
6255 
6256 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6257 				     u8 switch_param, u8 param_mask)
6258 {
6259 	struct hclge_mac_vlan_switch_cmd *req;
6260 	struct hclge_desc desc;
6261 	u32 func_id;
6262 	int ret;
6263 
6264 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6265 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6266 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6267 				   false);
6268 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6269 	req->func_id = cpu_to_le32(func_id);
6270 	req->switch_param = switch_param;
6271 	req->param_mask = param_mask;
6272 
6273 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6274 	if (ret)
6275 		dev_err(&hdev->pdev->dev,
6276 			"set mac vlan switch parameter fail, ret = %d\n", ret);
6277 	return ret;
6278 }
6279 
6280 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6281 				       int link_ret)
6282 {
6283 #define HCLGE_PHY_LINK_STATUS_NUM  200
6284 
6285 	struct phy_device *phydev = hdev->hw.mac.phydev;
6286 	int i = 0;
6287 	int ret;
6288 
6289 	do {
6290 		ret = phy_read_status(phydev);
6291 		if (ret) {
6292 			dev_err(&hdev->pdev->dev,
6293 				"phy update link status fail, ret = %d\n", ret);
6294 			return;
6295 		}
6296 
6297 		if (phydev->link == link_ret)
6298 			break;
6299 
6300 		msleep(HCLGE_LINK_STATUS_MS);
6301 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6302 }
6303 
6304 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6305 {
6306 #define HCLGE_MAC_LINK_STATUS_NUM  100
6307 
6308 	int i = 0;
6309 	int ret;
6310 
6311 	do {
6312 		ret = hclge_get_mac_link_status(hdev);
6313 		if (ret < 0)
6314 			return ret;
6315 		else if (ret == link_ret)
6316 			return 0;
6317 
6318 		msleep(HCLGE_LINK_STATUS_MS);
6319 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6320 	return -EBUSY;
6321 }
6322 
6323 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6324 					  bool is_phy)
6325 {
6326 #define HCLGE_LINK_STATUS_DOWN 0
6327 #define HCLGE_LINK_STATUS_UP   1
6328 
6329 	int link_ret;
6330 
6331 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6332 
6333 	if (is_phy)
6334 		hclge_phy_link_status_wait(hdev, link_ret);
6335 
6336 	return hclge_mac_link_status_wait(hdev, link_ret);
6337 }
6338 
6339 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6340 {
6341 	struct hclge_config_mac_mode_cmd *req;
6342 	struct hclge_desc desc;
6343 	u32 loop_en;
6344 	int ret;
6345 
6346 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6347 	/* 1 Read out the MAC mode config at first */
6348 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6349 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6350 	if (ret) {
6351 		dev_err(&hdev->pdev->dev,
6352 			"mac loopback get fail, ret =%d.\n", ret);
6353 		return ret;
6354 	}
6355 
6356 	/* 2 Then setup the loopback flag */
6357 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6358 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6359 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6360 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6361 
6362 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6363 
6364 	/* 3 Config mac work mode with loopback flag
6365 	 * and its original configure parameters
6366 	 */
6367 	hclge_cmd_reuse_desc(&desc, false);
6368 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6369 	if (ret)
6370 		dev_err(&hdev->pdev->dev,
6371 			"mac loopback set fail, ret =%d.\n", ret);
6372 	return ret;
6373 }
6374 
6375 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6376 				     enum hnae3_loop loop_mode)
6377 {
6378 #define HCLGE_SERDES_RETRY_MS	10
6379 #define HCLGE_SERDES_RETRY_NUM	100
6380 
6381 	struct hclge_serdes_lb_cmd *req;
6382 	struct hclge_desc desc;
6383 	int ret, i = 0;
6384 	u8 loop_mode_b;
6385 
6386 	req = (struct hclge_serdes_lb_cmd *)desc.data;
6387 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6388 
6389 	switch (loop_mode) {
6390 	case HNAE3_LOOP_SERIAL_SERDES:
6391 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6392 		break;
6393 	case HNAE3_LOOP_PARALLEL_SERDES:
6394 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6395 		break;
6396 	default:
6397 		dev_err(&hdev->pdev->dev,
6398 			"unsupported serdes loopback mode %d\n", loop_mode);
6399 		return -ENOTSUPP;
6400 	}
6401 
6402 	if (en) {
6403 		req->enable = loop_mode_b;
6404 		req->mask = loop_mode_b;
6405 	} else {
6406 		req->mask = loop_mode_b;
6407 	}
6408 
6409 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6410 	if (ret) {
6411 		dev_err(&hdev->pdev->dev,
6412 			"serdes loopback set fail, ret = %d\n", ret);
6413 		return ret;
6414 	}
6415 
6416 	do {
6417 		msleep(HCLGE_SERDES_RETRY_MS);
6418 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6419 					   true);
6420 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6421 		if (ret) {
6422 			dev_err(&hdev->pdev->dev,
6423 				"serdes loopback get, ret = %d\n", ret);
6424 			return ret;
6425 		}
6426 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
6427 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6428 
6429 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6430 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6431 		return -EBUSY;
6432 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6433 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6434 		return -EIO;
6435 	}
6436 	return ret;
6437 }
6438 
6439 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6440 				     enum hnae3_loop loop_mode)
6441 {
6442 	int ret;
6443 
6444 	ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6445 	if (ret)
6446 		return ret;
6447 
6448 	hclge_cfg_mac_mode(hdev, en);
6449 
6450 	ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
6451 	if (ret)
6452 		dev_err(&hdev->pdev->dev,
6453 			"serdes loopback config mac mode timeout\n");
6454 
6455 	return ret;
6456 }
6457 
6458 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6459 				     struct phy_device *phydev)
6460 {
6461 	int ret;
6462 
6463 	if (!phydev->suspended) {
6464 		ret = phy_suspend(phydev);
6465 		if (ret)
6466 			return ret;
6467 	}
6468 
6469 	ret = phy_resume(phydev);
6470 	if (ret)
6471 		return ret;
6472 
6473 	return phy_loopback(phydev, true);
6474 }
6475 
6476 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6477 				      struct phy_device *phydev)
6478 {
6479 	int ret;
6480 
6481 	ret = phy_loopback(phydev, false);
6482 	if (ret)
6483 		return ret;
6484 
6485 	return phy_suspend(phydev);
6486 }
6487 
6488 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6489 {
6490 	struct phy_device *phydev = hdev->hw.mac.phydev;
6491 	int ret;
6492 
6493 	if (!phydev)
6494 		return -ENOTSUPP;
6495 
6496 	if (en)
6497 		ret = hclge_enable_phy_loopback(hdev, phydev);
6498 	else
6499 		ret = hclge_disable_phy_loopback(hdev, phydev);
6500 	if (ret) {
6501 		dev_err(&hdev->pdev->dev,
6502 			"set phy loopback fail, ret = %d\n", ret);
6503 		return ret;
6504 	}
6505 
6506 	hclge_cfg_mac_mode(hdev, en);
6507 
6508 	ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
6509 	if (ret)
6510 		dev_err(&hdev->pdev->dev,
6511 			"phy loopback config mac mode timeout\n");
6512 
6513 	return ret;
6514 }
6515 
6516 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6517 			    int stream_id, bool enable)
6518 {
6519 	struct hclge_desc desc;
6520 	struct hclge_cfg_com_tqp_queue_cmd *req =
6521 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6522 	int ret;
6523 
6524 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6525 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6526 	req->stream_id = cpu_to_le16(stream_id);
6527 	if (enable)
6528 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6529 
6530 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6531 	if (ret)
6532 		dev_err(&hdev->pdev->dev,
6533 			"Tqp enable fail, status =%d.\n", ret);
6534 	return ret;
6535 }
6536 
6537 static int hclge_set_loopback(struct hnae3_handle *handle,
6538 			      enum hnae3_loop loop_mode, bool en)
6539 {
6540 	struct hclge_vport *vport = hclge_get_vport(handle);
6541 	struct hnae3_knic_private_info *kinfo;
6542 	struct hclge_dev *hdev = vport->back;
6543 	int i, ret;
6544 
6545 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6546 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6547 	 * the same, the packets are looped back in the SSU. If SSU loopback
6548 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6549 	 */
6550 	if (hdev->pdev->revision >= 0x21) {
6551 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6552 
6553 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6554 						HCLGE_SWITCH_ALW_LPBK_MASK);
6555 		if (ret)
6556 			return ret;
6557 	}
6558 
6559 	switch (loop_mode) {
6560 	case HNAE3_LOOP_APP:
6561 		ret = hclge_set_app_loopback(hdev, en);
6562 		break;
6563 	case HNAE3_LOOP_SERIAL_SERDES:
6564 	case HNAE3_LOOP_PARALLEL_SERDES:
6565 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6566 		break;
6567 	case HNAE3_LOOP_PHY:
6568 		ret = hclge_set_phy_loopback(hdev, en);
6569 		break;
6570 	default:
6571 		ret = -ENOTSUPP;
6572 		dev_err(&hdev->pdev->dev,
6573 			"loop_mode %d is not supported\n", loop_mode);
6574 		break;
6575 	}
6576 
6577 	if (ret)
6578 		return ret;
6579 
6580 	kinfo = &vport->nic.kinfo;
6581 	for (i = 0; i < kinfo->num_tqps; i++) {
6582 		ret = hclge_tqp_enable(hdev, i, 0, en);
6583 		if (ret)
6584 			return ret;
6585 	}
6586 
6587 	return 0;
6588 }
6589 
6590 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6591 {
6592 	int ret;
6593 
6594 	ret = hclge_set_app_loopback(hdev, false);
6595 	if (ret)
6596 		return ret;
6597 
6598 	ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6599 	if (ret)
6600 		return ret;
6601 
6602 	return hclge_cfg_serdes_loopback(hdev, false,
6603 					 HNAE3_LOOP_PARALLEL_SERDES);
6604 }
6605 
6606 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6607 {
6608 	struct hclge_vport *vport = hclge_get_vport(handle);
6609 	struct hnae3_knic_private_info *kinfo;
6610 	struct hnae3_queue *queue;
6611 	struct hclge_tqp *tqp;
6612 	int i;
6613 
6614 	kinfo = &vport->nic.kinfo;
6615 	for (i = 0; i < kinfo->num_tqps; i++) {
6616 		queue = handle->kinfo.tqp[i];
6617 		tqp = container_of(queue, struct hclge_tqp, q);
6618 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6619 	}
6620 }
6621 
6622 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6623 {
6624 	struct hclge_vport *vport = hclge_get_vport(handle);
6625 	struct hclge_dev *hdev = vport->back;
6626 
6627 	if (enable) {
6628 		hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6629 	} else {
6630 		/* Set the DOWN flag here to disable the service to be
6631 		 * scheduled again
6632 		 */
6633 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
6634 		cancel_delayed_work_sync(&hdev->service_task);
6635 		clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6636 	}
6637 }
6638 
6639 static int hclge_ae_start(struct hnae3_handle *handle)
6640 {
6641 	struct hclge_vport *vport = hclge_get_vport(handle);
6642 	struct hclge_dev *hdev = vport->back;
6643 
6644 	/* mac enable */
6645 	hclge_cfg_mac_mode(hdev, true);
6646 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6647 	hdev->hw.mac.link = 0;
6648 
6649 	/* reset tqp stats */
6650 	hclge_reset_tqp_stats(handle);
6651 
6652 	hclge_mac_start_phy(hdev);
6653 
6654 	return 0;
6655 }
6656 
6657 static void hclge_ae_stop(struct hnae3_handle *handle)
6658 {
6659 	struct hclge_vport *vport = hclge_get_vport(handle);
6660 	struct hclge_dev *hdev = vport->back;
6661 	int i;
6662 
6663 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
6664 
6665 	hclge_clear_arfs_rules(handle);
6666 
6667 	/* If it is not PF reset, the firmware will disable the MAC,
6668 	 * so it only need to stop phy here.
6669 	 */
6670 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6671 	    hdev->reset_type != HNAE3_FUNC_RESET) {
6672 		hclge_mac_stop_phy(hdev);
6673 		hclge_update_link_status(hdev);
6674 		return;
6675 	}
6676 
6677 	for (i = 0; i < handle->kinfo.num_tqps; i++)
6678 		hclge_reset_tqp(handle, i);
6679 
6680 	hclge_config_mac_tnl_int(hdev, false);
6681 
6682 	/* Mac disable */
6683 	hclge_cfg_mac_mode(hdev, false);
6684 
6685 	hclge_mac_stop_phy(hdev);
6686 
6687 	/* reset tqp stats */
6688 	hclge_reset_tqp_stats(handle);
6689 	hclge_update_link_status(hdev);
6690 }
6691 
6692 int hclge_vport_start(struct hclge_vport *vport)
6693 {
6694 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6695 	vport->last_active_jiffies = jiffies;
6696 	return 0;
6697 }
6698 
6699 void hclge_vport_stop(struct hclge_vport *vport)
6700 {
6701 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6702 }
6703 
6704 static int hclge_client_start(struct hnae3_handle *handle)
6705 {
6706 	struct hclge_vport *vport = hclge_get_vport(handle);
6707 
6708 	return hclge_vport_start(vport);
6709 }
6710 
6711 static void hclge_client_stop(struct hnae3_handle *handle)
6712 {
6713 	struct hclge_vport *vport = hclge_get_vport(handle);
6714 
6715 	hclge_vport_stop(vport);
6716 }
6717 
6718 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6719 					 u16 cmdq_resp, u8  resp_code,
6720 					 enum hclge_mac_vlan_tbl_opcode op)
6721 {
6722 	struct hclge_dev *hdev = vport->back;
6723 
6724 	if (cmdq_resp) {
6725 		dev_err(&hdev->pdev->dev,
6726 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6727 			cmdq_resp);
6728 		return -EIO;
6729 	}
6730 
6731 	if (op == HCLGE_MAC_VLAN_ADD) {
6732 		if ((!resp_code) || (resp_code == 1)) {
6733 			return 0;
6734 		} else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6735 			dev_err(&hdev->pdev->dev,
6736 				"add mac addr failed for uc_overflow.\n");
6737 			return -ENOSPC;
6738 		} else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6739 			dev_err(&hdev->pdev->dev,
6740 				"add mac addr failed for mc_overflow.\n");
6741 			return -ENOSPC;
6742 		}
6743 
6744 		dev_err(&hdev->pdev->dev,
6745 			"add mac addr failed for undefined, code=%u.\n",
6746 			resp_code);
6747 		return -EIO;
6748 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
6749 		if (!resp_code) {
6750 			return 0;
6751 		} else if (resp_code == 1) {
6752 			dev_dbg(&hdev->pdev->dev,
6753 				"remove mac addr failed for miss.\n");
6754 			return -ENOENT;
6755 		}
6756 
6757 		dev_err(&hdev->pdev->dev,
6758 			"remove mac addr failed for undefined, code=%u.\n",
6759 			resp_code);
6760 		return -EIO;
6761 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
6762 		if (!resp_code) {
6763 			return 0;
6764 		} else if (resp_code == 1) {
6765 			dev_dbg(&hdev->pdev->dev,
6766 				"lookup mac addr failed for miss.\n");
6767 			return -ENOENT;
6768 		}
6769 
6770 		dev_err(&hdev->pdev->dev,
6771 			"lookup mac addr failed for undefined, code=%u.\n",
6772 			resp_code);
6773 		return -EIO;
6774 	}
6775 
6776 	dev_err(&hdev->pdev->dev,
6777 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6778 
6779 	return -EINVAL;
6780 }
6781 
6782 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6783 {
6784 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6785 
6786 	unsigned int word_num;
6787 	unsigned int bit_num;
6788 
6789 	if (vfid > 255 || vfid < 0)
6790 		return -EIO;
6791 
6792 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6793 		word_num = vfid / 32;
6794 		bit_num  = vfid % 32;
6795 		if (clr)
6796 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6797 		else
6798 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6799 	} else {
6800 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6801 		bit_num  = vfid % 32;
6802 		if (clr)
6803 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6804 		else
6805 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6806 	}
6807 
6808 	return 0;
6809 }
6810 
6811 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6812 {
6813 #define HCLGE_DESC_NUMBER 3
6814 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6815 	int i, j;
6816 
6817 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6818 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6819 			if (desc[i].data[j])
6820 				return false;
6821 
6822 	return true;
6823 }
6824 
6825 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6826 				   const u8 *addr, bool is_mc)
6827 {
6828 	const unsigned char *mac_addr = addr;
6829 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6830 		       (mac_addr[0]) | (mac_addr[1] << 8);
6831 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6832 
6833 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6834 	if (is_mc) {
6835 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6836 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6837 	}
6838 
6839 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6840 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6841 }
6842 
6843 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6844 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
6845 {
6846 	struct hclge_dev *hdev = vport->back;
6847 	struct hclge_desc desc;
6848 	u8 resp_code;
6849 	u16 retval;
6850 	int ret;
6851 
6852 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6853 
6854 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6855 
6856 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6857 	if (ret) {
6858 		dev_err(&hdev->pdev->dev,
6859 			"del mac addr failed for cmd_send, ret =%d.\n",
6860 			ret);
6861 		return ret;
6862 	}
6863 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6864 	retval = le16_to_cpu(desc.retval);
6865 
6866 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6867 					     HCLGE_MAC_VLAN_REMOVE);
6868 }
6869 
6870 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6871 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
6872 				     struct hclge_desc *desc,
6873 				     bool is_mc)
6874 {
6875 	struct hclge_dev *hdev = vport->back;
6876 	u8 resp_code;
6877 	u16 retval;
6878 	int ret;
6879 
6880 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6881 	if (is_mc) {
6882 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6883 		memcpy(desc[0].data,
6884 		       req,
6885 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6886 		hclge_cmd_setup_basic_desc(&desc[1],
6887 					   HCLGE_OPC_MAC_VLAN_ADD,
6888 					   true);
6889 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6890 		hclge_cmd_setup_basic_desc(&desc[2],
6891 					   HCLGE_OPC_MAC_VLAN_ADD,
6892 					   true);
6893 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
6894 	} else {
6895 		memcpy(desc[0].data,
6896 		       req,
6897 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6898 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
6899 	}
6900 	if (ret) {
6901 		dev_err(&hdev->pdev->dev,
6902 			"lookup mac addr failed for cmd_send, ret =%d.\n",
6903 			ret);
6904 		return ret;
6905 	}
6906 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6907 	retval = le16_to_cpu(desc[0].retval);
6908 
6909 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6910 					     HCLGE_MAC_VLAN_LKUP);
6911 }
6912 
6913 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6914 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
6915 				  struct hclge_desc *mc_desc)
6916 {
6917 	struct hclge_dev *hdev = vport->back;
6918 	int cfg_status;
6919 	u8 resp_code;
6920 	u16 retval;
6921 	int ret;
6922 
6923 	if (!mc_desc) {
6924 		struct hclge_desc desc;
6925 
6926 		hclge_cmd_setup_basic_desc(&desc,
6927 					   HCLGE_OPC_MAC_VLAN_ADD,
6928 					   false);
6929 		memcpy(desc.data, req,
6930 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6931 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6932 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6933 		retval = le16_to_cpu(desc.retval);
6934 
6935 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6936 							   resp_code,
6937 							   HCLGE_MAC_VLAN_ADD);
6938 	} else {
6939 		hclge_cmd_reuse_desc(&mc_desc[0], false);
6940 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6941 		hclge_cmd_reuse_desc(&mc_desc[1], false);
6942 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6943 		hclge_cmd_reuse_desc(&mc_desc[2], false);
6944 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6945 		memcpy(mc_desc[0].data, req,
6946 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6947 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6948 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6949 		retval = le16_to_cpu(mc_desc[0].retval);
6950 
6951 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6952 							   resp_code,
6953 							   HCLGE_MAC_VLAN_ADD);
6954 	}
6955 
6956 	if (ret) {
6957 		dev_err(&hdev->pdev->dev,
6958 			"add mac addr failed for cmd_send, ret =%d.\n",
6959 			ret);
6960 		return ret;
6961 	}
6962 
6963 	return cfg_status;
6964 }
6965 
6966 static int hclge_init_umv_space(struct hclge_dev *hdev)
6967 {
6968 	u16 allocated_size = 0;
6969 	int ret;
6970 
6971 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6972 				  true);
6973 	if (ret)
6974 		return ret;
6975 
6976 	if (allocated_size < hdev->wanted_umv_size)
6977 		dev_warn(&hdev->pdev->dev,
6978 			 "Alloc umv space failed, want %d, get %d\n",
6979 			 hdev->wanted_umv_size, allocated_size);
6980 
6981 	mutex_init(&hdev->umv_mutex);
6982 	hdev->max_umv_size = allocated_size;
6983 	/* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6984 	 * preserve some unicast mac vlan table entries shared by pf
6985 	 * and its vfs.
6986 	 */
6987 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6988 	hdev->share_umv_size = hdev->priv_umv_size +
6989 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
6990 
6991 	return 0;
6992 }
6993 
6994 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6995 {
6996 	int ret;
6997 
6998 	if (hdev->max_umv_size > 0) {
6999 		ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7000 					  false);
7001 		if (ret)
7002 			return ret;
7003 		hdev->max_umv_size = 0;
7004 	}
7005 	mutex_destroy(&hdev->umv_mutex);
7006 
7007 	return 0;
7008 }
7009 
7010 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7011 			       u16 *allocated_size, bool is_alloc)
7012 {
7013 	struct hclge_umv_spc_alc_cmd *req;
7014 	struct hclge_desc desc;
7015 	int ret;
7016 
7017 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7018 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7019 	if (!is_alloc)
7020 		hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7021 
7022 	req->space_size = cpu_to_le32(space_size);
7023 
7024 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7025 	if (ret) {
7026 		dev_err(&hdev->pdev->dev,
7027 			"%s umv space failed for cmd_send, ret =%d\n",
7028 			is_alloc ? "allocate" : "free", ret);
7029 		return ret;
7030 	}
7031 
7032 	if (is_alloc && allocated_size)
7033 		*allocated_size = le32_to_cpu(desc.data[1]);
7034 
7035 	return 0;
7036 }
7037 
7038 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7039 {
7040 	struct hclge_vport *vport;
7041 	int i;
7042 
7043 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7044 		vport = &hdev->vport[i];
7045 		vport->used_umv_num = 0;
7046 	}
7047 
7048 	mutex_lock(&hdev->umv_mutex);
7049 	hdev->share_umv_size = hdev->priv_umv_size +
7050 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
7051 	mutex_unlock(&hdev->umv_mutex);
7052 }
7053 
7054 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7055 {
7056 	struct hclge_dev *hdev = vport->back;
7057 	bool is_full;
7058 
7059 	mutex_lock(&hdev->umv_mutex);
7060 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7061 		   hdev->share_umv_size == 0);
7062 	mutex_unlock(&hdev->umv_mutex);
7063 
7064 	return is_full;
7065 }
7066 
7067 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7068 {
7069 	struct hclge_dev *hdev = vport->back;
7070 
7071 	mutex_lock(&hdev->umv_mutex);
7072 	if (is_free) {
7073 		if (vport->used_umv_num > hdev->priv_umv_size)
7074 			hdev->share_umv_size++;
7075 
7076 		if (vport->used_umv_num > 0)
7077 			vport->used_umv_num--;
7078 	} else {
7079 		if (vport->used_umv_num >= hdev->priv_umv_size &&
7080 		    hdev->share_umv_size > 0)
7081 			hdev->share_umv_size--;
7082 		vport->used_umv_num++;
7083 	}
7084 	mutex_unlock(&hdev->umv_mutex);
7085 }
7086 
7087 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7088 			     const unsigned char *addr)
7089 {
7090 	struct hclge_vport *vport = hclge_get_vport(handle);
7091 
7092 	return hclge_add_uc_addr_common(vport, addr);
7093 }
7094 
7095 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7096 			     const unsigned char *addr)
7097 {
7098 	struct hclge_dev *hdev = vport->back;
7099 	struct hclge_mac_vlan_tbl_entry_cmd req;
7100 	struct hclge_desc desc;
7101 	u16 egress_port = 0;
7102 	int ret;
7103 
7104 	/* mac addr check */
7105 	if (is_zero_ether_addr(addr) ||
7106 	    is_broadcast_ether_addr(addr) ||
7107 	    is_multicast_ether_addr(addr)) {
7108 		dev_err(&hdev->pdev->dev,
7109 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7110 			 addr, is_zero_ether_addr(addr),
7111 			 is_broadcast_ether_addr(addr),
7112 			 is_multicast_ether_addr(addr));
7113 		return -EINVAL;
7114 	}
7115 
7116 	memset(&req, 0, sizeof(req));
7117 
7118 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7119 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7120 
7121 	req.egress_port = cpu_to_le16(egress_port);
7122 
7123 	hclge_prepare_mac_addr(&req, addr, false);
7124 
7125 	/* Lookup the mac address in the mac_vlan table, and add
7126 	 * it if the entry is inexistent. Repeated unicast entry
7127 	 * is not allowed in the mac vlan table.
7128 	 */
7129 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7130 	if (ret == -ENOENT) {
7131 		if (!hclge_is_umv_space_full(vport)) {
7132 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7133 			if (!ret)
7134 				hclge_update_umv_space(vport, false);
7135 			return ret;
7136 		}
7137 
7138 		dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7139 			hdev->priv_umv_size);
7140 
7141 		return -ENOSPC;
7142 	}
7143 
7144 	/* check if we just hit the duplicate */
7145 	if (!ret) {
7146 		dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
7147 			 vport->vport_id, addr);
7148 		return 0;
7149 	}
7150 
7151 	dev_err(&hdev->pdev->dev,
7152 		"PF failed to add unicast entry(%pM) in the MAC table\n",
7153 		addr);
7154 
7155 	return ret;
7156 }
7157 
7158 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7159 			    const unsigned char *addr)
7160 {
7161 	struct hclge_vport *vport = hclge_get_vport(handle);
7162 
7163 	return hclge_rm_uc_addr_common(vport, addr);
7164 }
7165 
7166 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7167 			    const unsigned char *addr)
7168 {
7169 	struct hclge_dev *hdev = vport->back;
7170 	struct hclge_mac_vlan_tbl_entry_cmd req;
7171 	int ret;
7172 
7173 	/* mac addr check */
7174 	if (is_zero_ether_addr(addr) ||
7175 	    is_broadcast_ether_addr(addr) ||
7176 	    is_multicast_ether_addr(addr)) {
7177 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7178 			addr);
7179 		return -EINVAL;
7180 	}
7181 
7182 	memset(&req, 0, sizeof(req));
7183 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7184 	hclge_prepare_mac_addr(&req, addr, false);
7185 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
7186 	if (!ret)
7187 		hclge_update_umv_space(vport, true);
7188 
7189 	return ret;
7190 }
7191 
7192 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7193 			     const unsigned char *addr)
7194 {
7195 	struct hclge_vport *vport = hclge_get_vport(handle);
7196 
7197 	return hclge_add_mc_addr_common(vport, addr);
7198 }
7199 
7200 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7201 			     const unsigned char *addr)
7202 {
7203 	struct hclge_dev *hdev = vport->back;
7204 	struct hclge_mac_vlan_tbl_entry_cmd req;
7205 	struct hclge_desc desc[3];
7206 	int status;
7207 
7208 	/* mac addr check */
7209 	if (!is_multicast_ether_addr(addr)) {
7210 		dev_err(&hdev->pdev->dev,
7211 			"Add mc mac err! invalid mac:%pM.\n",
7212 			 addr);
7213 		return -EINVAL;
7214 	}
7215 	memset(&req, 0, sizeof(req));
7216 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7217 	hclge_prepare_mac_addr(&req, addr, true);
7218 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7219 	if (status) {
7220 		/* This mac addr do not exist, add new entry for it */
7221 		memset(desc[0].data, 0, sizeof(desc[0].data));
7222 		memset(desc[1].data, 0, sizeof(desc[0].data));
7223 		memset(desc[2].data, 0, sizeof(desc[0].data));
7224 	}
7225 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7226 	if (status)
7227 		return status;
7228 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7229 
7230 	if (status == -ENOSPC)
7231 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7232 
7233 	return status;
7234 }
7235 
7236 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7237 			    const unsigned char *addr)
7238 {
7239 	struct hclge_vport *vport = hclge_get_vport(handle);
7240 
7241 	return hclge_rm_mc_addr_common(vport, addr);
7242 }
7243 
7244 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7245 			    const unsigned char *addr)
7246 {
7247 	struct hclge_dev *hdev = vport->back;
7248 	struct hclge_mac_vlan_tbl_entry_cmd req;
7249 	enum hclge_cmd_status status;
7250 	struct hclge_desc desc[3];
7251 
7252 	/* mac addr check */
7253 	if (!is_multicast_ether_addr(addr)) {
7254 		dev_dbg(&hdev->pdev->dev,
7255 			"Remove mc mac err! invalid mac:%pM.\n",
7256 			 addr);
7257 		return -EINVAL;
7258 	}
7259 
7260 	memset(&req, 0, sizeof(req));
7261 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7262 	hclge_prepare_mac_addr(&req, addr, true);
7263 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7264 	if (!status) {
7265 		/* This mac addr exist, remove this handle's VFID for it */
7266 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7267 		if (status)
7268 			return status;
7269 
7270 		if (hclge_is_all_function_id_zero(desc))
7271 			/* All the vfid is zero, so need to delete this entry */
7272 			status = hclge_remove_mac_vlan_tbl(vport, &req);
7273 		else
7274 			/* Not all the vfid is zero, update the vfid */
7275 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7276 
7277 	} else {
7278 		/* Maybe this mac address is in mta table, but it cannot be
7279 		 * deleted here because an entry of mta represents an address
7280 		 * range rather than a specific address. the delete action to
7281 		 * all entries will take effect in update_mta_status called by
7282 		 * hns3_nic_set_rx_mode.
7283 		 */
7284 		status = 0;
7285 	}
7286 
7287 	return status;
7288 }
7289 
7290 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7291 			       enum HCLGE_MAC_ADDR_TYPE mac_type)
7292 {
7293 	struct hclge_vport_mac_addr_cfg *mac_cfg;
7294 	struct list_head *list;
7295 
7296 	if (!vport->vport_id)
7297 		return;
7298 
7299 	mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7300 	if (!mac_cfg)
7301 		return;
7302 
7303 	mac_cfg->hd_tbl_status = true;
7304 	memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7305 
7306 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7307 	       &vport->uc_mac_list : &vport->mc_mac_list;
7308 
7309 	list_add_tail(&mac_cfg->node, list);
7310 }
7311 
7312 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7313 			      bool is_write_tbl,
7314 			      enum HCLGE_MAC_ADDR_TYPE mac_type)
7315 {
7316 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7317 	struct list_head *list;
7318 	bool uc_flag, mc_flag;
7319 
7320 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7321 	       &vport->uc_mac_list : &vport->mc_mac_list;
7322 
7323 	uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7324 	mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7325 
7326 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7327 		if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
7328 			if (uc_flag && mac_cfg->hd_tbl_status)
7329 				hclge_rm_uc_addr_common(vport, mac_addr);
7330 
7331 			if (mc_flag && mac_cfg->hd_tbl_status)
7332 				hclge_rm_mc_addr_common(vport, mac_addr);
7333 
7334 			list_del(&mac_cfg->node);
7335 			kfree(mac_cfg);
7336 			break;
7337 		}
7338 	}
7339 }
7340 
7341 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7342 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
7343 {
7344 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7345 	struct list_head *list;
7346 
7347 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7348 	       &vport->uc_mac_list : &vport->mc_mac_list;
7349 
7350 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7351 		if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7352 			hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7353 
7354 		if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7355 			hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7356 
7357 		mac_cfg->hd_tbl_status = false;
7358 		if (is_del_list) {
7359 			list_del(&mac_cfg->node);
7360 			kfree(mac_cfg);
7361 		}
7362 	}
7363 }
7364 
7365 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7366 {
7367 	struct hclge_vport_mac_addr_cfg *mac, *tmp;
7368 	struct hclge_vport *vport;
7369 	int i;
7370 
7371 	mutex_lock(&hdev->vport_cfg_mutex);
7372 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7373 		vport = &hdev->vport[i];
7374 		list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7375 			list_del(&mac->node);
7376 			kfree(mac);
7377 		}
7378 
7379 		list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7380 			list_del(&mac->node);
7381 			kfree(mac);
7382 		}
7383 	}
7384 	mutex_unlock(&hdev->vport_cfg_mutex);
7385 }
7386 
7387 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7388 					      u16 cmdq_resp, u8 resp_code)
7389 {
7390 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
7391 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
7392 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
7393 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
7394 
7395 	int return_status;
7396 
7397 	if (cmdq_resp) {
7398 		dev_err(&hdev->pdev->dev,
7399 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
7400 			cmdq_resp);
7401 		return -EIO;
7402 	}
7403 
7404 	switch (resp_code) {
7405 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
7406 	case HCLGE_ETHERTYPE_ALREADY_ADD:
7407 		return_status = 0;
7408 		break;
7409 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7410 		dev_err(&hdev->pdev->dev,
7411 			"add mac ethertype failed for manager table overflow.\n");
7412 		return_status = -EIO;
7413 		break;
7414 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
7415 		dev_err(&hdev->pdev->dev,
7416 			"add mac ethertype failed for key conflict.\n");
7417 		return_status = -EIO;
7418 		break;
7419 	default:
7420 		dev_err(&hdev->pdev->dev,
7421 			"add mac ethertype failed for undefined, code=%d.\n",
7422 			resp_code);
7423 		return_status = -EIO;
7424 	}
7425 
7426 	return return_status;
7427 }
7428 
7429 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7430 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
7431 {
7432 	struct hclge_desc desc;
7433 	u8 resp_code;
7434 	u16 retval;
7435 	int ret;
7436 
7437 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7438 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7439 
7440 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7441 	if (ret) {
7442 		dev_err(&hdev->pdev->dev,
7443 			"add mac ethertype failed for cmd_send, ret =%d.\n",
7444 			ret);
7445 		return ret;
7446 	}
7447 
7448 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7449 	retval = le16_to_cpu(desc.retval);
7450 
7451 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7452 }
7453 
7454 static int init_mgr_tbl(struct hclge_dev *hdev)
7455 {
7456 	int ret;
7457 	int i;
7458 
7459 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7460 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7461 		if (ret) {
7462 			dev_err(&hdev->pdev->dev,
7463 				"add mac ethertype failed, ret =%d.\n",
7464 				ret);
7465 			return ret;
7466 		}
7467 	}
7468 
7469 	return 0;
7470 }
7471 
7472 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7473 {
7474 	struct hclge_vport *vport = hclge_get_vport(handle);
7475 	struct hclge_dev *hdev = vport->back;
7476 
7477 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
7478 }
7479 
7480 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7481 			      bool is_first)
7482 {
7483 	const unsigned char *new_addr = (const unsigned char *)p;
7484 	struct hclge_vport *vport = hclge_get_vport(handle);
7485 	struct hclge_dev *hdev = vport->back;
7486 	int ret;
7487 
7488 	/* mac addr check */
7489 	if (is_zero_ether_addr(new_addr) ||
7490 	    is_broadcast_ether_addr(new_addr) ||
7491 	    is_multicast_ether_addr(new_addr)) {
7492 		dev_err(&hdev->pdev->dev,
7493 			"Change uc mac err! invalid mac:%pM.\n",
7494 			 new_addr);
7495 		return -EINVAL;
7496 	}
7497 
7498 	if ((!is_first || is_kdump_kernel()) &&
7499 	    hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7500 		dev_warn(&hdev->pdev->dev,
7501 			 "remove old uc mac address fail.\n");
7502 
7503 	ret = hclge_add_uc_addr(handle, new_addr);
7504 	if (ret) {
7505 		dev_err(&hdev->pdev->dev,
7506 			"add uc mac address fail, ret =%d.\n",
7507 			ret);
7508 
7509 		if (!is_first &&
7510 		    hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7511 			dev_err(&hdev->pdev->dev,
7512 				"restore uc mac address fail.\n");
7513 
7514 		return -EIO;
7515 	}
7516 
7517 	ret = hclge_pause_addr_cfg(hdev, new_addr);
7518 	if (ret) {
7519 		dev_err(&hdev->pdev->dev,
7520 			"configure mac pause address fail, ret =%d.\n",
7521 			ret);
7522 		return -EIO;
7523 	}
7524 
7525 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7526 
7527 	return 0;
7528 }
7529 
7530 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7531 			  int cmd)
7532 {
7533 	struct hclge_vport *vport = hclge_get_vport(handle);
7534 	struct hclge_dev *hdev = vport->back;
7535 
7536 	if (!hdev->hw.mac.phydev)
7537 		return -EOPNOTSUPP;
7538 
7539 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7540 }
7541 
7542 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7543 				      u8 fe_type, bool filter_en, u8 vf_id)
7544 {
7545 	struct hclge_vlan_filter_ctrl_cmd *req;
7546 	struct hclge_desc desc;
7547 	int ret;
7548 
7549 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7550 
7551 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7552 	req->vlan_type = vlan_type;
7553 	req->vlan_fe = filter_en ? fe_type : 0;
7554 	req->vf_id = vf_id;
7555 
7556 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7557 	if (ret)
7558 		dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7559 			ret);
7560 
7561 	return ret;
7562 }
7563 
7564 #define HCLGE_FILTER_TYPE_VF		0
7565 #define HCLGE_FILTER_TYPE_PORT		1
7566 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
7567 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
7568 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
7569 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
7570 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
7571 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
7572 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
7573 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
7574 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
7575 
7576 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7577 {
7578 	struct hclge_vport *vport = hclge_get_vport(handle);
7579 	struct hclge_dev *hdev = vport->back;
7580 
7581 	if (hdev->pdev->revision >= 0x21) {
7582 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7583 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
7584 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7585 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
7586 	} else {
7587 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7588 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7589 					   0);
7590 	}
7591 	if (enable)
7592 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
7593 	else
7594 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7595 }
7596 
7597 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7598 				    bool is_kill, u16 vlan,
7599 				    __be16 proto)
7600 {
7601 #define HCLGE_MAX_VF_BYTES  16
7602 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
7603 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
7604 	struct hclge_desc desc[2];
7605 	u8 vf_byte_val;
7606 	u8 vf_byte_off;
7607 	int ret;
7608 
7609 	/* if vf vlan table is full, firmware will close vf vlan filter, it
7610 	 * is unable and unnecessary to add new vlan id to vf vlan filter
7611 	 */
7612 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7613 		return 0;
7614 
7615 	hclge_cmd_setup_basic_desc(&desc[0],
7616 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7617 	hclge_cmd_setup_basic_desc(&desc[1],
7618 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7619 
7620 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7621 
7622 	vf_byte_off = vfid / 8;
7623 	vf_byte_val = 1 << (vfid % 8);
7624 
7625 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7626 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7627 
7628 	req0->vlan_id  = cpu_to_le16(vlan);
7629 	req0->vlan_cfg = is_kill;
7630 
7631 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7632 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7633 	else
7634 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7635 
7636 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
7637 	if (ret) {
7638 		dev_err(&hdev->pdev->dev,
7639 			"Send vf vlan command fail, ret =%d.\n",
7640 			ret);
7641 		return ret;
7642 	}
7643 
7644 	if (!is_kill) {
7645 #define HCLGE_VF_VLAN_NO_ENTRY	2
7646 		if (!req0->resp_code || req0->resp_code == 1)
7647 			return 0;
7648 
7649 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7650 			set_bit(vfid, hdev->vf_vlan_full);
7651 			dev_warn(&hdev->pdev->dev,
7652 				 "vf vlan table is full, vf vlan filter is disabled\n");
7653 			return 0;
7654 		}
7655 
7656 		dev_err(&hdev->pdev->dev,
7657 			"Add vf vlan filter fail, ret =%d.\n",
7658 			req0->resp_code);
7659 	} else {
7660 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
7661 		if (!req0->resp_code)
7662 			return 0;
7663 
7664 		/* vf vlan filter is disabled when vf vlan table is full,
7665 		 * then new vlan id will not be added into vf vlan table.
7666 		 * Just return 0 without warning, avoid massive verbose
7667 		 * print logs when unload.
7668 		 */
7669 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7670 			return 0;
7671 
7672 		dev_err(&hdev->pdev->dev,
7673 			"Kill vf vlan filter fail, ret =%d.\n",
7674 			req0->resp_code);
7675 	}
7676 
7677 	return -EIO;
7678 }
7679 
7680 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7681 				      u16 vlan_id, bool is_kill)
7682 {
7683 	struct hclge_vlan_filter_pf_cfg_cmd *req;
7684 	struct hclge_desc desc;
7685 	u8 vlan_offset_byte_val;
7686 	u8 vlan_offset_byte;
7687 	u8 vlan_offset_160;
7688 	int ret;
7689 
7690 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7691 
7692 	vlan_offset_160 = vlan_id / 160;
7693 	vlan_offset_byte = (vlan_id % 160) / 8;
7694 	vlan_offset_byte_val = 1 << (vlan_id % 8);
7695 
7696 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7697 	req->vlan_offset = vlan_offset_160;
7698 	req->vlan_cfg = is_kill;
7699 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7700 
7701 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7702 	if (ret)
7703 		dev_err(&hdev->pdev->dev,
7704 			"port vlan command, send fail, ret =%d.\n", ret);
7705 	return ret;
7706 }
7707 
7708 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7709 				    u16 vport_id, u16 vlan_id,
7710 				    bool is_kill)
7711 {
7712 	u16 vport_idx, vport_num = 0;
7713 	int ret;
7714 
7715 	if (is_kill && !vlan_id)
7716 		return 0;
7717 
7718 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7719 				       proto);
7720 	if (ret) {
7721 		dev_err(&hdev->pdev->dev,
7722 			"Set %d vport vlan filter config fail, ret =%d.\n",
7723 			vport_id, ret);
7724 		return ret;
7725 	}
7726 
7727 	/* vlan 0 may be added twice when 8021q module is enabled */
7728 	if (!is_kill && !vlan_id &&
7729 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
7730 		return 0;
7731 
7732 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7733 		dev_err(&hdev->pdev->dev,
7734 			"Add port vlan failed, vport %d is already in vlan %d\n",
7735 			vport_id, vlan_id);
7736 		return -EINVAL;
7737 	}
7738 
7739 	if (is_kill &&
7740 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7741 		dev_err(&hdev->pdev->dev,
7742 			"Delete port vlan failed, vport %d is not in vlan %d\n",
7743 			vport_id, vlan_id);
7744 		return -EINVAL;
7745 	}
7746 
7747 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7748 		vport_num++;
7749 
7750 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7751 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7752 						 is_kill);
7753 
7754 	return ret;
7755 }
7756 
7757 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7758 {
7759 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7760 	struct hclge_vport_vtag_tx_cfg_cmd *req;
7761 	struct hclge_dev *hdev = vport->back;
7762 	struct hclge_desc desc;
7763 	u16 bmap_index;
7764 	int status;
7765 
7766 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7767 
7768 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7769 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7770 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7771 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7772 		      vcfg->accept_tag1 ? 1 : 0);
7773 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7774 		      vcfg->accept_untag1 ? 1 : 0);
7775 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7776 		      vcfg->accept_tag2 ? 1 : 0);
7777 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7778 		      vcfg->accept_untag2 ? 1 : 0);
7779 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7780 		      vcfg->insert_tag1_en ? 1 : 0);
7781 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7782 		      vcfg->insert_tag2_en ? 1 : 0);
7783 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7784 
7785 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7786 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7787 			HCLGE_VF_NUM_PER_BYTE;
7788 	req->vf_bitmap[bmap_index] =
7789 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7790 
7791 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7792 	if (status)
7793 		dev_err(&hdev->pdev->dev,
7794 			"Send port txvlan cfg command fail, ret =%d\n",
7795 			status);
7796 
7797 	return status;
7798 }
7799 
7800 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7801 {
7802 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7803 	struct hclge_vport_vtag_rx_cfg_cmd *req;
7804 	struct hclge_dev *hdev = vport->back;
7805 	struct hclge_desc desc;
7806 	u16 bmap_index;
7807 	int status;
7808 
7809 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7810 
7811 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7812 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7813 		      vcfg->strip_tag1_en ? 1 : 0);
7814 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7815 		      vcfg->strip_tag2_en ? 1 : 0);
7816 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7817 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
7818 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7819 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
7820 
7821 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7822 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7823 			HCLGE_VF_NUM_PER_BYTE;
7824 	req->vf_bitmap[bmap_index] =
7825 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7826 
7827 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7828 	if (status)
7829 		dev_err(&hdev->pdev->dev,
7830 			"Send port rxvlan cfg command fail, ret =%d\n",
7831 			status);
7832 
7833 	return status;
7834 }
7835 
7836 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7837 				  u16 port_base_vlan_state,
7838 				  u16 vlan_tag)
7839 {
7840 	int ret;
7841 
7842 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7843 		vport->txvlan_cfg.accept_tag1 = true;
7844 		vport->txvlan_cfg.insert_tag1_en = false;
7845 		vport->txvlan_cfg.default_tag1 = 0;
7846 	} else {
7847 		vport->txvlan_cfg.accept_tag1 = false;
7848 		vport->txvlan_cfg.insert_tag1_en = true;
7849 		vport->txvlan_cfg.default_tag1 = vlan_tag;
7850 	}
7851 
7852 	vport->txvlan_cfg.accept_untag1 = true;
7853 
7854 	/* accept_tag2 and accept_untag2 are not supported on
7855 	 * pdev revision(0x20), new revision support them,
7856 	 * this two fields can not be configured by user.
7857 	 */
7858 	vport->txvlan_cfg.accept_tag2 = true;
7859 	vport->txvlan_cfg.accept_untag2 = true;
7860 	vport->txvlan_cfg.insert_tag2_en = false;
7861 	vport->txvlan_cfg.default_tag2 = 0;
7862 
7863 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7864 		vport->rxvlan_cfg.strip_tag1_en = false;
7865 		vport->rxvlan_cfg.strip_tag2_en =
7866 				vport->rxvlan_cfg.rx_vlan_offload_en;
7867 	} else {
7868 		vport->rxvlan_cfg.strip_tag1_en =
7869 				vport->rxvlan_cfg.rx_vlan_offload_en;
7870 		vport->rxvlan_cfg.strip_tag2_en = true;
7871 	}
7872 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7873 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7874 
7875 	ret = hclge_set_vlan_tx_offload_cfg(vport);
7876 	if (ret)
7877 		return ret;
7878 
7879 	return hclge_set_vlan_rx_offload_cfg(vport);
7880 }
7881 
7882 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7883 {
7884 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7885 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7886 	struct hclge_desc desc;
7887 	int status;
7888 
7889 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7890 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7891 	rx_req->ot_fst_vlan_type =
7892 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7893 	rx_req->ot_sec_vlan_type =
7894 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7895 	rx_req->in_fst_vlan_type =
7896 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7897 	rx_req->in_sec_vlan_type =
7898 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7899 
7900 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7901 	if (status) {
7902 		dev_err(&hdev->pdev->dev,
7903 			"Send rxvlan protocol type command fail, ret =%d\n",
7904 			status);
7905 		return status;
7906 	}
7907 
7908 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7909 
7910 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7911 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7912 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7913 
7914 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7915 	if (status)
7916 		dev_err(&hdev->pdev->dev,
7917 			"Send txvlan protocol type command fail, ret =%d\n",
7918 			status);
7919 
7920 	return status;
7921 }
7922 
7923 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7924 {
7925 #define HCLGE_DEF_VLAN_TYPE		0x8100
7926 
7927 	struct hnae3_handle *handle = &hdev->vport[0].nic;
7928 	struct hclge_vport *vport;
7929 	int ret;
7930 	int i;
7931 
7932 	if (hdev->pdev->revision >= 0x21) {
7933 		/* for revision 0x21, vf vlan filter is per function */
7934 		for (i = 0; i < hdev->num_alloc_vport; i++) {
7935 			vport = &hdev->vport[i];
7936 			ret = hclge_set_vlan_filter_ctrl(hdev,
7937 							 HCLGE_FILTER_TYPE_VF,
7938 							 HCLGE_FILTER_FE_EGRESS,
7939 							 true,
7940 							 vport->vport_id);
7941 			if (ret)
7942 				return ret;
7943 		}
7944 
7945 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7946 						 HCLGE_FILTER_FE_INGRESS, true,
7947 						 0);
7948 		if (ret)
7949 			return ret;
7950 	} else {
7951 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7952 						 HCLGE_FILTER_FE_EGRESS_V1_B,
7953 						 true, 0);
7954 		if (ret)
7955 			return ret;
7956 	}
7957 
7958 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
7959 
7960 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7961 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7962 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7963 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7964 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7965 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7966 
7967 	ret = hclge_set_vlan_protocol_type(hdev);
7968 	if (ret)
7969 		return ret;
7970 
7971 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7972 		u16 vlan_tag;
7973 
7974 		vport = &hdev->vport[i];
7975 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7976 
7977 		ret = hclge_vlan_offload_cfg(vport,
7978 					     vport->port_base_vlan_cfg.state,
7979 					     vlan_tag);
7980 		if (ret)
7981 			return ret;
7982 	}
7983 
7984 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7985 }
7986 
7987 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7988 				       bool writen_to_tbl)
7989 {
7990 	struct hclge_vport_vlan_cfg *vlan;
7991 
7992 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7993 	if (!vlan)
7994 		return;
7995 
7996 	vlan->hd_tbl_status = writen_to_tbl;
7997 	vlan->vlan_id = vlan_id;
7998 
7999 	list_add_tail(&vlan->node, &vport->vlan_list);
8000 }
8001 
8002 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8003 {
8004 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8005 	struct hclge_dev *hdev = vport->back;
8006 	int ret;
8007 
8008 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8009 		if (!vlan->hd_tbl_status) {
8010 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8011 						       vport->vport_id,
8012 						       vlan->vlan_id, false);
8013 			if (ret) {
8014 				dev_err(&hdev->pdev->dev,
8015 					"restore vport vlan list failed, ret=%d\n",
8016 					ret);
8017 				return ret;
8018 			}
8019 		}
8020 		vlan->hd_tbl_status = true;
8021 	}
8022 
8023 	return 0;
8024 }
8025 
8026 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8027 				      bool is_write_tbl)
8028 {
8029 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8030 	struct hclge_dev *hdev = vport->back;
8031 
8032 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8033 		if (vlan->vlan_id == vlan_id) {
8034 			if (is_write_tbl && vlan->hd_tbl_status)
8035 				hclge_set_vlan_filter_hw(hdev,
8036 							 htons(ETH_P_8021Q),
8037 							 vport->vport_id,
8038 							 vlan_id,
8039 							 true);
8040 
8041 			list_del(&vlan->node);
8042 			kfree(vlan);
8043 			break;
8044 		}
8045 	}
8046 }
8047 
8048 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8049 {
8050 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8051 	struct hclge_dev *hdev = vport->back;
8052 
8053 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8054 		if (vlan->hd_tbl_status)
8055 			hclge_set_vlan_filter_hw(hdev,
8056 						 htons(ETH_P_8021Q),
8057 						 vport->vport_id,
8058 						 vlan->vlan_id,
8059 						 true);
8060 
8061 		vlan->hd_tbl_status = false;
8062 		if (is_del_list) {
8063 			list_del(&vlan->node);
8064 			kfree(vlan);
8065 		}
8066 	}
8067 }
8068 
8069 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8070 {
8071 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8072 	struct hclge_vport *vport;
8073 	int i;
8074 
8075 	mutex_lock(&hdev->vport_cfg_mutex);
8076 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8077 		vport = &hdev->vport[i];
8078 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8079 			list_del(&vlan->node);
8080 			kfree(vlan);
8081 		}
8082 	}
8083 	mutex_unlock(&hdev->vport_cfg_mutex);
8084 }
8085 
8086 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8087 {
8088 	struct hclge_vport *vport = hclge_get_vport(handle);
8089 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8090 	struct hclge_dev *hdev = vport->back;
8091 	u16 vlan_proto;
8092 	u16 state, vlan_id;
8093 	int i;
8094 
8095 	mutex_lock(&hdev->vport_cfg_mutex);
8096 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8097 		vport = &hdev->vport[i];
8098 		vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8099 		vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8100 		state = vport->port_base_vlan_cfg.state;
8101 
8102 		if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8103 			hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8104 						 vport->vport_id, vlan_id,
8105 						 false);
8106 			continue;
8107 		}
8108 
8109 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8110 			if (vlan->hd_tbl_status)
8111 				hclge_set_vlan_filter_hw(hdev,
8112 							 htons(ETH_P_8021Q),
8113 							 vport->vport_id,
8114 							 vlan->vlan_id,
8115 							 false);
8116 		}
8117 	}
8118 
8119 	mutex_unlock(&hdev->vport_cfg_mutex);
8120 }
8121 
8122 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8123 {
8124 	struct hclge_vport *vport = hclge_get_vport(handle);
8125 
8126 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8127 		vport->rxvlan_cfg.strip_tag1_en = false;
8128 		vport->rxvlan_cfg.strip_tag2_en = enable;
8129 	} else {
8130 		vport->rxvlan_cfg.strip_tag1_en = enable;
8131 		vport->rxvlan_cfg.strip_tag2_en = true;
8132 	}
8133 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8134 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8135 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8136 
8137 	return hclge_set_vlan_rx_offload_cfg(vport);
8138 }
8139 
8140 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8141 					    u16 port_base_vlan_state,
8142 					    struct hclge_vlan_info *new_info,
8143 					    struct hclge_vlan_info *old_info)
8144 {
8145 	struct hclge_dev *hdev = vport->back;
8146 	int ret;
8147 
8148 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8149 		hclge_rm_vport_all_vlan_table(vport, false);
8150 		return hclge_set_vlan_filter_hw(hdev,
8151 						 htons(new_info->vlan_proto),
8152 						 vport->vport_id,
8153 						 new_info->vlan_tag,
8154 						 false);
8155 	}
8156 
8157 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8158 				       vport->vport_id, old_info->vlan_tag,
8159 				       true);
8160 	if (ret)
8161 		return ret;
8162 
8163 	return hclge_add_vport_all_vlan_table(vport);
8164 }
8165 
8166 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8167 				    struct hclge_vlan_info *vlan_info)
8168 {
8169 	struct hnae3_handle *nic = &vport->nic;
8170 	struct hclge_vlan_info *old_vlan_info;
8171 	struct hclge_dev *hdev = vport->back;
8172 	int ret;
8173 
8174 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8175 
8176 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8177 	if (ret)
8178 		return ret;
8179 
8180 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8181 		/* add new VLAN tag */
8182 		ret = hclge_set_vlan_filter_hw(hdev,
8183 					       htons(vlan_info->vlan_proto),
8184 					       vport->vport_id,
8185 					       vlan_info->vlan_tag,
8186 					       false);
8187 		if (ret)
8188 			return ret;
8189 
8190 		/* remove old VLAN tag */
8191 		ret = hclge_set_vlan_filter_hw(hdev,
8192 					       htons(old_vlan_info->vlan_proto),
8193 					       vport->vport_id,
8194 					       old_vlan_info->vlan_tag,
8195 					       true);
8196 		if (ret)
8197 			return ret;
8198 
8199 		goto update;
8200 	}
8201 
8202 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8203 					       old_vlan_info);
8204 	if (ret)
8205 		return ret;
8206 
8207 	/* update state only when disable/enable port based VLAN */
8208 	vport->port_base_vlan_cfg.state = state;
8209 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8210 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8211 	else
8212 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8213 
8214 update:
8215 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8216 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8217 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8218 
8219 	return 0;
8220 }
8221 
8222 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8223 					  enum hnae3_port_base_vlan_state state,
8224 					  u16 vlan)
8225 {
8226 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8227 		if (!vlan)
8228 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8229 		else
8230 			return HNAE3_PORT_BASE_VLAN_ENABLE;
8231 	} else {
8232 		if (!vlan)
8233 			return HNAE3_PORT_BASE_VLAN_DISABLE;
8234 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8235 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8236 		else
8237 			return HNAE3_PORT_BASE_VLAN_MODIFY;
8238 	}
8239 }
8240 
8241 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8242 				    u16 vlan, u8 qos, __be16 proto)
8243 {
8244 	struct hclge_vport *vport = hclge_get_vport(handle);
8245 	struct hclge_dev *hdev = vport->back;
8246 	struct hclge_vlan_info vlan_info;
8247 	u16 state;
8248 	int ret;
8249 
8250 	if (hdev->pdev->revision == 0x20)
8251 		return -EOPNOTSUPP;
8252 
8253 	/* qos is a 3 bits value, so can not be bigger than 7 */
8254 	if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
8255 		return -EINVAL;
8256 	if (proto != htons(ETH_P_8021Q))
8257 		return -EPROTONOSUPPORT;
8258 
8259 	vport = &hdev->vport[vfid];
8260 	state = hclge_get_port_base_vlan_state(vport,
8261 					       vport->port_base_vlan_cfg.state,
8262 					       vlan);
8263 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8264 		return 0;
8265 
8266 	vlan_info.vlan_tag = vlan;
8267 	vlan_info.qos = qos;
8268 	vlan_info.vlan_proto = ntohs(proto);
8269 
8270 	/* update port based VLAN for PF */
8271 	if (!vfid) {
8272 		hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8273 		ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
8274 		hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8275 
8276 		return ret;
8277 	}
8278 
8279 	if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8280 		return hclge_update_port_base_vlan_cfg(vport, state,
8281 						       &vlan_info);
8282 	} else {
8283 		ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8284 							(u8)vfid, state,
8285 							vlan, qos,
8286 							ntohs(proto));
8287 		return ret;
8288 	}
8289 }
8290 
8291 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8292 			  u16 vlan_id, bool is_kill)
8293 {
8294 	struct hclge_vport *vport = hclge_get_vport(handle);
8295 	struct hclge_dev *hdev = vport->back;
8296 	bool writen_to_tbl = false;
8297 	int ret = 0;
8298 
8299 	/* When device is resetting, firmware is unable to handle
8300 	 * mailbox. Just record the vlan id, and remove it after
8301 	 * reset finished.
8302 	 */
8303 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8304 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
8305 		return -EBUSY;
8306 	}
8307 
8308 	/* when port base vlan enabled, we use port base vlan as the vlan
8309 	 * filter entry. In this case, we don't update vlan filter table
8310 	 * when user add new vlan or remove exist vlan, just update the vport
8311 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
8312 	 * table until port base vlan disabled
8313 	 */
8314 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8315 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8316 					       vlan_id, is_kill);
8317 		writen_to_tbl = true;
8318 	}
8319 
8320 	if (!ret) {
8321 		if (is_kill)
8322 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
8323 		else
8324 			hclge_add_vport_vlan_table(vport, vlan_id,
8325 						   writen_to_tbl);
8326 	} else if (is_kill) {
8327 		/* when remove hw vlan filter failed, record the vlan id,
8328 		 * and try to remove it from hw later, to be consistence
8329 		 * with stack
8330 		 */
8331 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
8332 	}
8333 	return ret;
8334 }
8335 
8336 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8337 {
8338 #define HCLGE_MAX_SYNC_COUNT	60
8339 
8340 	int i, ret, sync_cnt = 0;
8341 	u16 vlan_id;
8342 
8343 	/* start from vport 1 for PF is always alive */
8344 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8345 		struct hclge_vport *vport = &hdev->vport[i];
8346 
8347 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8348 					 VLAN_N_VID);
8349 		while (vlan_id != VLAN_N_VID) {
8350 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8351 						       vport->vport_id, vlan_id,
8352 						       true);
8353 			if (ret && ret != -EINVAL)
8354 				return;
8355 
8356 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8357 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
8358 
8359 			sync_cnt++;
8360 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8361 				return;
8362 
8363 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8364 						 VLAN_N_VID);
8365 		}
8366 	}
8367 }
8368 
8369 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8370 {
8371 	struct hclge_config_max_frm_size_cmd *req;
8372 	struct hclge_desc desc;
8373 
8374 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8375 
8376 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8377 	req->max_frm_size = cpu_to_le16(new_mps);
8378 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8379 
8380 	return hclge_cmd_send(&hdev->hw, &desc, 1);
8381 }
8382 
8383 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8384 {
8385 	struct hclge_vport *vport = hclge_get_vport(handle);
8386 
8387 	return hclge_set_vport_mtu(vport, new_mtu);
8388 }
8389 
8390 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8391 {
8392 	struct hclge_dev *hdev = vport->back;
8393 	int i, max_frm_size, ret;
8394 
8395 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8396 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8397 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
8398 		return -EINVAL;
8399 
8400 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8401 	mutex_lock(&hdev->vport_lock);
8402 	/* VF's mps must fit within hdev->mps */
8403 	if (vport->vport_id && max_frm_size > hdev->mps) {
8404 		mutex_unlock(&hdev->vport_lock);
8405 		return -EINVAL;
8406 	} else if (vport->vport_id) {
8407 		vport->mps = max_frm_size;
8408 		mutex_unlock(&hdev->vport_lock);
8409 		return 0;
8410 	}
8411 
8412 	/* PF's mps must be greater then VF's mps */
8413 	for (i = 1; i < hdev->num_alloc_vport; i++)
8414 		if (max_frm_size < hdev->vport[i].mps) {
8415 			mutex_unlock(&hdev->vport_lock);
8416 			return -EINVAL;
8417 		}
8418 
8419 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8420 
8421 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
8422 	if (ret) {
8423 		dev_err(&hdev->pdev->dev,
8424 			"Change mtu fail, ret =%d\n", ret);
8425 		goto out;
8426 	}
8427 
8428 	hdev->mps = max_frm_size;
8429 	vport->mps = max_frm_size;
8430 
8431 	ret = hclge_buffer_alloc(hdev);
8432 	if (ret)
8433 		dev_err(&hdev->pdev->dev,
8434 			"Allocate buffer fail, ret =%d\n", ret);
8435 
8436 out:
8437 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8438 	mutex_unlock(&hdev->vport_lock);
8439 	return ret;
8440 }
8441 
8442 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8443 				    bool enable)
8444 {
8445 	struct hclge_reset_tqp_queue_cmd *req;
8446 	struct hclge_desc desc;
8447 	int ret;
8448 
8449 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8450 
8451 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8452 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8453 	if (enable)
8454 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8455 
8456 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8457 	if (ret) {
8458 		dev_err(&hdev->pdev->dev,
8459 			"Send tqp reset cmd error, status =%d\n", ret);
8460 		return ret;
8461 	}
8462 
8463 	return 0;
8464 }
8465 
8466 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8467 {
8468 	struct hclge_reset_tqp_queue_cmd *req;
8469 	struct hclge_desc desc;
8470 	int ret;
8471 
8472 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8473 
8474 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8475 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8476 
8477 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8478 	if (ret) {
8479 		dev_err(&hdev->pdev->dev,
8480 			"Get reset status error, status =%d\n", ret);
8481 		return ret;
8482 	}
8483 
8484 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8485 }
8486 
8487 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8488 {
8489 	struct hnae3_queue *queue;
8490 	struct hclge_tqp *tqp;
8491 
8492 	queue = handle->kinfo.tqp[queue_id];
8493 	tqp = container_of(queue, struct hclge_tqp, q);
8494 
8495 	return tqp->index;
8496 }
8497 
8498 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8499 {
8500 	struct hclge_vport *vport = hclge_get_vport(handle);
8501 	struct hclge_dev *hdev = vport->back;
8502 	int reset_try_times = 0;
8503 	int reset_status;
8504 	u16 queue_gid;
8505 	int ret;
8506 
8507 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8508 
8509 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8510 	if (ret) {
8511 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8512 		return ret;
8513 	}
8514 
8515 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8516 	if (ret) {
8517 		dev_err(&hdev->pdev->dev,
8518 			"Send reset tqp cmd fail, ret = %d\n", ret);
8519 		return ret;
8520 	}
8521 
8522 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8523 		reset_status = hclge_get_reset_status(hdev, queue_gid);
8524 		if (reset_status)
8525 			break;
8526 
8527 		/* Wait for tqp hw reset */
8528 		usleep_range(1000, 1200);
8529 	}
8530 
8531 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8532 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8533 		return ret;
8534 	}
8535 
8536 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8537 	if (ret)
8538 		dev_err(&hdev->pdev->dev,
8539 			"Deassert the soft reset fail, ret = %d\n", ret);
8540 
8541 	return ret;
8542 }
8543 
8544 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8545 {
8546 	struct hclge_dev *hdev = vport->back;
8547 	int reset_try_times = 0;
8548 	int reset_status;
8549 	u16 queue_gid;
8550 	int ret;
8551 
8552 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8553 
8554 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8555 	if (ret) {
8556 		dev_warn(&hdev->pdev->dev,
8557 			 "Send reset tqp cmd fail, ret = %d\n", ret);
8558 		return;
8559 	}
8560 
8561 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8562 		reset_status = hclge_get_reset_status(hdev, queue_gid);
8563 		if (reset_status)
8564 			break;
8565 
8566 		/* Wait for tqp hw reset */
8567 		usleep_range(1000, 1200);
8568 	}
8569 
8570 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8571 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8572 		return;
8573 	}
8574 
8575 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8576 	if (ret)
8577 		dev_warn(&hdev->pdev->dev,
8578 			 "Deassert the soft reset fail, ret = %d\n", ret);
8579 }
8580 
8581 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8582 {
8583 	struct hclge_vport *vport = hclge_get_vport(handle);
8584 	struct hclge_dev *hdev = vport->back;
8585 
8586 	return hdev->fw_version;
8587 }
8588 
8589 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8590 {
8591 	struct phy_device *phydev = hdev->hw.mac.phydev;
8592 
8593 	if (!phydev)
8594 		return;
8595 
8596 	phy_set_asym_pause(phydev, rx_en, tx_en);
8597 }
8598 
8599 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8600 {
8601 	int ret;
8602 
8603 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8604 		return 0;
8605 
8606 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8607 	if (ret)
8608 		dev_err(&hdev->pdev->dev,
8609 			"configure pauseparam error, ret = %d.\n", ret);
8610 
8611 	return ret;
8612 }
8613 
8614 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8615 {
8616 	struct phy_device *phydev = hdev->hw.mac.phydev;
8617 	u16 remote_advertising = 0;
8618 	u16 local_advertising;
8619 	u32 rx_pause, tx_pause;
8620 	u8 flowctl;
8621 
8622 	if (!phydev->link || !phydev->autoneg)
8623 		return 0;
8624 
8625 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8626 
8627 	if (phydev->pause)
8628 		remote_advertising = LPA_PAUSE_CAP;
8629 
8630 	if (phydev->asym_pause)
8631 		remote_advertising |= LPA_PAUSE_ASYM;
8632 
8633 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8634 					   remote_advertising);
8635 	tx_pause = flowctl & FLOW_CTRL_TX;
8636 	rx_pause = flowctl & FLOW_CTRL_RX;
8637 
8638 	if (phydev->duplex == HCLGE_MAC_HALF) {
8639 		tx_pause = 0;
8640 		rx_pause = 0;
8641 	}
8642 
8643 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8644 }
8645 
8646 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8647 				 u32 *rx_en, u32 *tx_en)
8648 {
8649 	struct hclge_vport *vport = hclge_get_vport(handle);
8650 	struct hclge_dev *hdev = vport->back;
8651 	struct phy_device *phydev = hdev->hw.mac.phydev;
8652 
8653 	*auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8654 
8655 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8656 		*rx_en = 0;
8657 		*tx_en = 0;
8658 		return;
8659 	}
8660 
8661 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8662 		*rx_en = 1;
8663 		*tx_en = 0;
8664 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8665 		*tx_en = 1;
8666 		*rx_en = 0;
8667 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8668 		*rx_en = 1;
8669 		*tx_en = 1;
8670 	} else {
8671 		*rx_en = 0;
8672 		*tx_en = 0;
8673 	}
8674 }
8675 
8676 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8677 					 u32 rx_en, u32 tx_en)
8678 {
8679 	if (rx_en && tx_en)
8680 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
8681 	else if (rx_en && !tx_en)
8682 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8683 	else if (!rx_en && tx_en)
8684 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8685 	else
8686 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
8687 
8688 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8689 }
8690 
8691 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8692 				u32 rx_en, u32 tx_en)
8693 {
8694 	struct hclge_vport *vport = hclge_get_vport(handle);
8695 	struct hclge_dev *hdev = vport->back;
8696 	struct phy_device *phydev = hdev->hw.mac.phydev;
8697 	u32 fc_autoneg;
8698 
8699 	if (phydev) {
8700 		fc_autoneg = hclge_get_autoneg(handle);
8701 		if (auto_neg != fc_autoneg) {
8702 			dev_info(&hdev->pdev->dev,
8703 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8704 			return -EOPNOTSUPP;
8705 		}
8706 	}
8707 
8708 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8709 		dev_info(&hdev->pdev->dev,
8710 			 "Priority flow control enabled. Cannot set link flow control.\n");
8711 		return -EOPNOTSUPP;
8712 	}
8713 
8714 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8715 
8716 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8717 
8718 	if (!auto_neg)
8719 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8720 
8721 	if (phydev)
8722 		return phy_start_aneg(phydev);
8723 
8724 	return -EOPNOTSUPP;
8725 }
8726 
8727 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8728 					  u8 *auto_neg, u32 *speed, u8 *duplex)
8729 {
8730 	struct hclge_vport *vport = hclge_get_vport(handle);
8731 	struct hclge_dev *hdev = vport->back;
8732 
8733 	if (speed)
8734 		*speed = hdev->hw.mac.speed;
8735 	if (duplex)
8736 		*duplex = hdev->hw.mac.duplex;
8737 	if (auto_neg)
8738 		*auto_neg = hdev->hw.mac.autoneg;
8739 }
8740 
8741 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8742 				 u8 *module_type)
8743 {
8744 	struct hclge_vport *vport = hclge_get_vport(handle);
8745 	struct hclge_dev *hdev = vport->back;
8746 
8747 	if (media_type)
8748 		*media_type = hdev->hw.mac.media_type;
8749 
8750 	if (module_type)
8751 		*module_type = hdev->hw.mac.module_type;
8752 }
8753 
8754 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8755 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
8756 {
8757 	struct hclge_vport *vport = hclge_get_vport(handle);
8758 	struct hclge_dev *hdev = vport->back;
8759 	struct phy_device *phydev = hdev->hw.mac.phydev;
8760 	int mdix_ctrl, mdix, is_resolved;
8761 	unsigned int retval;
8762 
8763 	if (!phydev) {
8764 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8765 		*tp_mdix = ETH_TP_MDI_INVALID;
8766 		return;
8767 	}
8768 
8769 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8770 
8771 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8772 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8773 				    HCLGE_PHY_MDIX_CTRL_S);
8774 
8775 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8776 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8777 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8778 
8779 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8780 
8781 	switch (mdix_ctrl) {
8782 	case 0x0:
8783 		*tp_mdix_ctrl = ETH_TP_MDI;
8784 		break;
8785 	case 0x1:
8786 		*tp_mdix_ctrl = ETH_TP_MDI_X;
8787 		break;
8788 	case 0x3:
8789 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8790 		break;
8791 	default:
8792 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8793 		break;
8794 	}
8795 
8796 	if (!is_resolved)
8797 		*tp_mdix = ETH_TP_MDI_INVALID;
8798 	else if (mdix)
8799 		*tp_mdix = ETH_TP_MDI_X;
8800 	else
8801 		*tp_mdix = ETH_TP_MDI;
8802 }
8803 
8804 static void hclge_info_show(struct hclge_dev *hdev)
8805 {
8806 	struct device *dev = &hdev->pdev->dev;
8807 
8808 	dev_info(dev, "PF info begin:\n");
8809 
8810 	dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8811 	dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8812 	dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8813 	dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8814 	dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8815 	dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8816 	dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8817 	dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8818 	dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8819 	dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8820 	dev_info(dev, "This is %s PF\n",
8821 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8822 	dev_info(dev, "DCB %s\n",
8823 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8824 	dev_info(dev, "MQPRIO %s\n",
8825 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8826 
8827 	dev_info(dev, "PF info end.\n");
8828 }
8829 
8830 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8831 					  struct hclge_vport *vport)
8832 {
8833 	struct hnae3_client *client = vport->nic.client;
8834 	struct hclge_dev *hdev = ae_dev->priv;
8835 	int rst_cnt;
8836 	int ret;
8837 
8838 	rst_cnt = hdev->rst_stats.reset_cnt;
8839 	ret = client->ops->init_instance(&vport->nic);
8840 	if (ret)
8841 		return ret;
8842 
8843 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8844 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8845 	    rst_cnt != hdev->rst_stats.reset_cnt) {
8846 		ret = -EBUSY;
8847 		goto init_nic_err;
8848 	}
8849 
8850 	/* Enable nic hw error interrupts */
8851 	ret = hclge_config_nic_hw_error(hdev, true);
8852 	if (ret) {
8853 		dev_err(&ae_dev->pdev->dev,
8854 			"fail(%d) to enable hw error interrupts\n", ret);
8855 		goto init_nic_err;
8856 	}
8857 
8858 	hnae3_set_client_init_flag(client, ae_dev, 1);
8859 
8860 	if (netif_msg_drv(&hdev->vport->nic))
8861 		hclge_info_show(hdev);
8862 
8863 	return ret;
8864 
8865 init_nic_err:
8866 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8867 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8868 		msleep(HCLGE_WAIT_RESET_DONE);
8869 
8870 	client->ops->uninit_instance(&vport->nic, 0);
8871 
8872 	return ret;
8873 }
8874 
8875 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8876 					   struct hclge_vport *vport)
8877 {
8878 	struct hnae3_client *client = vport->roce.client;
8879 	struct hclge_dev *hdev = ae_dev->priv;
8880 	int rst_cnt;
8881 	int ret;
8882 
8883 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8884 	    !hdev->nic_client)
8885 		return 0;
8886 
8887 	client = hdev->roce_client;
8888 	ret = hclge_init_roce_base_info(vport);
8889 	if (ret)
8890 		return ret;
8891 
8892 	rst_cnt = hdev->rst_stats.reset_cnt;
8893 	ret = client->ops->init_instance(&vport->roce);
8894 	if (ret)
8895 		return ret;
8896 
8897 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8898 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8899 	    rst_cnt != hdev->rst_stats.reset_cnt) {
8900 		ret = -EBUSY;
8901 		goto init_roce_err;
8902 	}
8903 
8904 	/* Enable roce ras interrupts */
8905 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
8906 	if (ret) {
8907 		dev_err(&ae_dev->pdev->dev,
8908 			"fail(%d) to enable roce ras interrupts\n", ret);
8909 		goto init_roce_err;
8910 	}
8911 
8912 	hnae3_set_client_init_flag(client, ae_dev, 1);
8913 
8914 	return 0;
8915 
8916 init_roce_err:
8917 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8918 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8919 		msleep(HCLGE_WAIT_RESET_DONE);
8920 
8921 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
8922 
8923 	return ret;
8924 }
8925 
8926 static int hclge_init_client_instance(struct hnae3_client *client,
8927 				      struct hnae3_ae_dev *ae_dev)
8928 {
8929 	struct hclge_dev *hdev = ae_dev->priv;
8930 	struct hclge_vport *vport;
8931 	int i, ret;
8932 
8933 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8934 		vport = &hdev->vport[i];
8935 
8936 		switch (client->type) {
8937 		case HNAE3_CLIENT_KNIC:
8938 
8939 			hdev->nic_client = client;
8940 			vport->nic.client = client;
8941 			ret = hclge_init_nic_client_instance(ae_dev, vport);
8942 			if (ret)
8943 				goto clear_nic;
8944 
8945 			ret = hclge_init_roce_client_instance(ae_dev, vport);
8946 			if (ret)
8947 				goto clear_roce;
8948 
8949 			break;
8950 		case HNAE3_CLIENT_ROCE:
8951 			if (hnae3_dev_roce_supported(hdev)) {
8952 				hdev->roce_client = client;
8953 				vport->roce.client = client;
8954 			}
8955 
8956 			ret = hclge_init_roce_client_instance(ae_dev, vport);
8957 			if (ret)
8958 				goto clear_roce;
8959 
8960 			break;
8961 		default:
8962 			return -EINVAL;
8963 		}
8964 	}
8965 
8966 	return 0;
8967 
8968 clear_nic:
8969 	hdev->nic_client = NULL;
8970 	vport->nic.client = NULL;
8971 	return ret;
8972 clear_roce:
8973 	hdev->roce_client = NULL;
8974 	vport->roce.client = NULL;
8975 	return ret;
8976 }
8977 
8978 static void hclge_uninit_client_instance(struct hnae3_client *client,
8979 					 struct hnae3_ae_dev *ae_dev)
8980 {
8981 	struct hclge_dev *hdev = ae_dev->priv;
8982 	struct hclge_vport *vport;
8983 	int i;
8984 
8985 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8986 		vport = &hdev->vport[i];
8987 		if (hdev->roce_client) {
8988 			clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8989 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8990 				msleep(HCLGE_WAIT_RESET_DONE);
8991 
8992 			hdev->roce_client->ops->uninit_instance(&vport->roce,
8993 								0);
8994 			hdev->roce_client = NULL;
8995 			vport->roce.client = NULL;
8996 		}
8997 		if (client->type == HNAE3_CLIENT_ROCE)
8998 			return;
8999 		if (hdev->nic_client && client->ops->uninit_instance) {
9000 			clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9001 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9002 				msleep(HCLGE_WAIT_RESET_DONE);
9003 
9004 			client->ops->uninit_instance(&vport->nic, 0);
9005 			hdev->nic_client = NULL;
9006 			vport->nic.client = NULL;
9007 		}
9008 	}
9009 }
9010 
9011 static int hclge_pci_init(struct hclge_dev *hdev)
9012 {
9013 	struct pci_dev *pdev = hdev->pdev;
9014 	struct hclge_hw *hw;
9015 	int ret;
9016 
9017 	ret = pci_enable_device(pdev);
9018 	if (ret) {
9019 		dev_err(&pdev->dev, "failed to enable PCI device\n");
9020 		return ret;
9021 	}
9022 
9023 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9024 	if (ret) {
9025 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9026 		if (ret) {
9027 			dev_err(&pdev->dev,
9028 				"can't set consistent PCI DMA");
9029 			goto err_disable_device;
9030 		}
9031 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9032 	}
9033 
9034 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9035 	if (ret) {
9036 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9037 		goto err_disable_device;
9038 	}
9039 
9040 	pci_set_master(pdev);
9041 	hw = &hdev->hw;
9042 	hw->io_base = pcim_iomap(pdev, 2, 0);
9043 	if (!hw->io_base) {
9044 		dev_err(&pdev->dev, "Can't map configuration register space\n");
9045 		ret = -ENOMEM;
9046 		goto err_clr_master;
9047 	}
9048 
9049 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9050 
9051 	return 0;
9052 err_clr_master:
9053 	pci_clear_master(pdev);
9054 	pci_release_regions(pdev);
9055 err_disable_device:
9056 	pci_disable_device(pdev);
9057 
9058 	return ret;
9059 }
9060 
9061 static void hclge_pci_uninit(struct hclge_dev *hdev)
9062 {
9063 	struct pci_dev *pdev = hdev->pdev;
9064 
9065 	pcim_iounmap(pdev, hdev->hw.io_base);
9066 	pci_free_irq_vectors(pdev);
9067 	pci_clear_master(pdev);
9068 	pci_release_mem_regions(pdev);
9069 	pci_disable_device(pdev);
9070 }
9071 
9072 static void hclge_state_init(struct hclge_dev *hdev)
9073 {
9074 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9075 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9076 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9077 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9078 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9079 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9080 }
9081 
9082 static void hclge_state_uninit(struct hclge_dev *hdev)
9083 {
9084 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9085 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9086 
9087 	if (hdev->reset_timer.function)
9088 		del_timer_sync(&hdev->reset_timer);
9089 	if (hdev->service_task.work.func)
9090 		cancel_delayed_work_sync(&hdev->service_task);
9091 	if (hdev->rst_service_task.func)
9092 		cancel_work_sync(&hdev->rst_service_task);
9093 	if (hdev->mbx_service_task.func)
9094 		cancel_work_sync(&hdev->mbx_service_task);
9095 }
9096 
9097 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9098 {
9099 #define HCLGE_FLR_WAIT_MS	100
9100 #define HCLGE_FLR_WAIT_CNT	50
9101 	struct hclge_dev *hdev = ae_dev->priv;
9102 	int cnt = 0;
9103 
9104 	clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
9105 	clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9106 	set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
9107 	hclge_reset_event(hdev->pdev, NULL);
9108 
9109 	while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
9110 	       cnt++ < HCLGE_FLR_WAIT_CNT)
9111 		msleep(HCLGE_FLR_WAIT_MS);
9112 
9113 	if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
9114 		dev_err(&hdev->pdev->dev,
9115 			"flr wait down timeout: %d\n", cnt);
9116 }
9117 
9118 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9119 {
9120 	struct hclge_dev *hdev = ae_dev->priv;
9121 
9122 	set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9123 }
9124 
9125 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9126 {
9127 	u16 i;
9128 
9129 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9130 		struct hclge_vport *vport = &hdev->vport[i];
9131 		int ret;
9132 
9133 		 /* Send cmd to clear VF's FUNC_RST_ING */
9134 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9135 		if (ret)
9136 			dev_warn(&hdev->pdev->dev,
9137 				 "clear vf(%d) rst failed %d!\n",
9138 				 vport->vport_id, ret);
9139 	}
9140 }
9141 
9142 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9143 {
9144 	struct pci_dev *pdev = ae_dev->pdev;
9145 	struct hclge_dev *hdev;
9146 	int ret;
9147 
9148 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9149 	if (!hdev) {
9150 		ret = -ENOMEM;
9151 		goto out;
9152 	}
9153 
9154 	hdev->pdev = pdev;
9155 	hdev->ae_dev = ae_dev;
9156 	hdev->reset_type = HNAE3_NONE_RESET;
9157 	hdev->reset_level = HNAE3_FUNC_RESET;
9158 	ae_dev->priv = hdev;
9159 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9160 
9161 	mutex_init(&hdev->vport_lock);
9162 	mutex_init(&hdev->vport_cfg_mutex);
9163 	spin_lock_init(&hdev->fd_rule_lock);
9164 
9165 	ret = hclge_pci_init(hdev);
9166 	if (ret) {
9167 		dev_err(&pdev->dev, "PCI init failed\n");
9168 		goto out;
9169 	}
9170 
9171 	/* Firmware command queue initialize */
9172 	ret = hclge_cmd_queue_init(hdev);
9173 	if (ret) {
9174 		dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
9175 		goto err_pci_uninit;
9176 	}
9177 
9178 	/* Firmware command initialize */
9179 	ret = hclge_cmd_init(hdev);
9180 	if (ret)
9181 		goto err_cmd_uninit;
9182 
9183 	ret = hclge_get_cap(hdev);
9184 	if (ret) {
9185 		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
9186 			ret);
9187 		goto err_cmd_uninit;
9188 	}
9189 
9190 	ret = hclge_configure(hdev);
9191 	if (ret) {
9192 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9193 		goto err_cmd_uninit;
9194 	}
9195 
9196 	ret = hclge_init_msi(hdev);
9197 	if (ret) {
9198 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9199 		goto err_cmd_uninit;
9200 	}
9201 
9202 	ret = hclge_misc_irq_init(hdev);
9203 	if (ret) {
9204 		dev_err(&pdev->dev,
9205 			"Misc IRQ(vector0) init error, ret = %d.\n",
9206 			ret);
9207 		goto err_msi_uninit;
9208 	}
9209 
9210 	ret = hclge_alloc_tqps(hdev);
9211 	if (ret) {
9212 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9213 		goto err_msi_irq_uninit;
9214 	}
9215 
9216 	ret = hclge_alloc_vport(hdev);
9217 	if (ret) {
9218 		dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
9219 		goto err_msi_irq_uninit;
9220 	}
9221 
9222 	ret = hclge_map_tqp(hdev);
9223 	if (ret) {
9224 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9225 		goto err_msi_irq_uninit;
9226 	}
9227 
9228 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9229 		ret = hclge_mac_mdio_config(hdev);
9230 		if (ret) {
9231 			dev_err(&hdev->pdev->dev,
9232 				"mdio config fail ret=%d\n", ret);
9233 			goto err_msi_irq_uninit;
9234 		}
9235 	}
9236 
9237 	ret = hclge_init_umv_space(hdev);
9238 	if (ret) {
9239 		dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9240 		goto err_mdiobus_unreg;
9241 	}
9242 
9243 	ret = hclge_mac_init(hdev);
9244 	if (ret) {
9245 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9246 		goto err_mdiobus_unreg;
9247 	}
9248 
9249 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9250 	if (ret) {
9251 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9252 		goto err_mdiobus_unreg;
9253 	}
9254 
9255 	ret = hclge_config_gro(hdev, true);
9256 	if (ret)
9257 		goto err_mdiobus_unreg;
9258 
9259 	ret = hclge_init_vlan_config(hdev);
9260 	if (ret) {
9261 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9262 		goto err_mdiobus_unreg;
9263 	}
9264 
9265 	ret = hclge_tm_schd_init(hdev);
9266 	if (ret) {
9267 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9268 		goto err_mdiobus_unreg;
9269 	}
9270 
9271 	hclge_rss_init_cfg(hdev);
9272 	ret = hclge_rss_init_hw(hdev);
9273 	if (ret) {
9274 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9275 		goto err_mdiobus_unreg;
9276 	}
9277 
9278 	ret = init_mgr_tbl(hdev);
9279 	if (ret) {
9280 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9281 		goto err_mdiobus_unreg;
9282 	}
9283 
9284 	ret = hclge_init_fd_config(hdev);
9285 	if (ret) {
9286 		dev_err(&pdev->dev,
9287 			"fd table init fail, ret=%d\n", ret);
9288 		goto err_mdiobus_unreg;
9289 	}
9290 
9291 	INIT_KFIFO(hdev->mac_tnl_log);
9292 
9293 	hclge_dcb_ops_set(hdev);
9294 
9295 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9296 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9297 	INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
9298 	INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
9299 
9300 	/* Setup affinity after service timer setup because add_timer_on
9301 	 * is called in affinity notify.
9302 	 */
9303 	hclge_misc_affinity_setup(hdev);
9304 
9305 	hclge_clear_all_event_cause(hdev);
9306 	hclge_clear_resetting_state(hdev);
9307 
9308 	/* Log and clear the hw errors those already occurred */
9309 	hclge_handle_all_hns_hw_errors(ae_dev);
9310 
9311 	/* request delayed reset for the error recovery because an immediate
9312 	 * global reset on a PF affecting pending initialization of other PFs
9313 	 */
9314 	if (ae_dev->hw_err_reset_req) {
9315 		enum hnae3_reset_type reset_level;
9316 
9317 		reset_level = hclge_get_reset_level(ae_dev,
9318 						    &ae_dev->hw_err_reset_req);
9319 		hclge_set_def_reset_request(ae_dev, reset_level);
9320 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9321 	}
9322 
9323 	/* Enable MISC vector(vector0) */
9324 	hclge_enable_vector(&hdev->misc_vector, true);
9325 
9326 	hclge_state_init(hdev);
9327 	hdev->last_reset_time = jiffies;
9328 
9329 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9330 		 HCLGE_DRIVER_NAME);
9331 
9332 	return 0;
9333 
9334 err_mdiobus_unreg:
9335 	if (hdev->hw.mac.phydev)
9336 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
9337 err_msi_irq_uninit:
9338 	hclge_misc_irq_uninit(hdev);
9339 err_msi_uninit:
9340 	pci_free_irq_vectors(pdev);
9341 err_cmd_uninit:
9342 	hclge_cmd_uninit(hdev);
9343 err_pci_uninit:
9344 	pcim_iounmap(pdev, hdev->hw.io_base);
9345 	pci_clear_master(pdev);
9346 	pci_release_regions(pdev);
9347 	pci_disable_device(pdev);
9348 out:
9349 	return ret;
9350 }
9351 
9352 static void hclge_stats_clear(struct hclge_dev *hdev)
9353 {
9354 	memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
9355 }
9356 
9357 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9358 {
9359 	struct hclge_vport *vport = hdev->vport;
9360 	int i;
9361 
9362 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9363 		hclge_vport_stop(vport);
9364 		vport++;
9365 	}
9366 }
9367 
9368 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9369 {
9370 	struct hclge_dev *hdev = ae_dev->priv;
9371 	struct pci_dev *pdev = ae_dev->pdev;
9372 	int ret;
9373 
9374 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9375 
9376 	hclge_stats_clear(hdev);
9377 	memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9378 	memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9379 
9380 	ret = hclge_cmd_init(hdev);
9381 	if (ret) {
9382 		dev_err(&pdev->dev, "Cmd queue init failed\n");
9383 		return ret;
9384 	}
9385 
9386 	ret = hclge_map_tqp(hdev);
9387 	if (ret) {
9388 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9389 		return ret;
9390 	}
9391 
9392 	hclge_reset_umv_space(hdev);
9393 
9394 	ret = hclge_mac_init(hdev);
9395 	if (ret) {
9396 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9397 		return ret;
9398 	}
9399 
9400 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9401 	if (ret) {
9402 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9403 		return ret;
9404 	}
9405 
9406 	ret = hclge_config_gro(hdev, true);
9407 	if (ret)
9408 		return ret;
9409 
9410 	ret = hclge_init_vlan_config(hdev);
9411 	if (ret) {
9412 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9413 		return ret;
9414 	}
9415 
9416 	ret = hclge_tm_init_hw(hdev, true);
9417 	if (ret) {
9418 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9419 		return ret;
9420 	}
9421 
9422 	ret = hclge_rss_init_hw(hdev);
9423 	if (ret) {
9424 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9425 		return ret;
9426 	}
9427 
9428 	ret = hclge_init_fd_config(hdev);
9429 	if (ret) {
9430 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9431 		return ret;
9432 	}
9433 
9434 	/* Re-enable the hw error interrupts because
9435 	 * the interrupts get disabled on global reset.
9436 	 */
9437 	ret = hclge_config_nic_hw_error(hdev, true);
9438 	if (ret) {
9439 		dev_err(&pdev->dev,
9440 			"fail(%d) to re-enable NIC hw error interrupts\n",
9441 			ret);
9442 		return ret;
9443 	}
9444 
9445 	if (hdev->roce_client) {
9446 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
9447 		if (ret) {
9448 			dev_err(&pdev->dev,
9449 				"fail(%d) to re-enable roce ras interrupts\n",
9450 				ret);
9451 			return ret;
9452 		}
9453 	}
9454 
9455 	hclge_reset_vport_state(hdev);
9456 
9457 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9458 		 HCLGE_DRIVER_NAME);
9459 
9460 	return 0;
9461 }
9462 
9463 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9464 {
9465 	struct hclge_dev *hdev = ae_dev->priv;
9466 	struct hclge_mac *mac = &hdev->hw.mac;
9467 
9468 	hclge_misc_affinity_teardown(hdev);
9469 	hclge_state_uninit(hdev);
9470 
9471 	if (mac->phydev)
9472 		mdiobus_unregister(mac->mdio_bus);
9473 
9474 	hclge_uninit_umv_space(hdev);
9475 
9476 	/* Disable MISC vector(vector0) */
9477 	hclge_enable_vector(&hdev->misc_vector, false);
9478 	synchronize_irq(hdev->misc_vector.vector_irq);
9479 
9480 	/* Disable all hw interrupts */
9481 	hclge_config_mac_tnl_int(hdev, false);
9482 	hclge_config_nic_hw_error(hdev, false);
9483 	hclge_config_rocee_ras_interrupt(hdev, false);
9484 
9485 	hclge_cmd_uninit(hdev);
9486 	hclge_misc_irq_uninit(hdev);
9487 	hclge_pci_uninit(hdev);
9488 	mutex_destroy(&hdev->vport_lock);
9489 	hclge_uninit_vport_mac_table(hdev);
9490 	hclge_uninit_vport_vlan_table(hdev);
9491 	mutex_destroy(&hdev->vport_cfg_mutex);
9492 	ae_dev->priv = NULL;
9493 }
9494 
9495 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9496 {
9497 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9498 	struct hclge_vport *vport = hclge_get_vport(handle);
9499 	struct hclge_dev *hdev = vport->back;
9500 
9501 	return min_t(u32, hdev->rss_size_max,
9502 		     vport->alloc_tqps / kinfo->num_tc);
9503 }
9504 
9505 static void hclge_get_channels(struct hnae3_handle *handle,
9506 			       struct ethtool_channels *ch)
9507 {
9508 	ch->max_combined = hclge_get_max_channels(handle);
9509 	ch->other_count = 1;
9510 	ch->max_other = 1;
9511 	ch->combined_count = handle->kinfo.rss_size;
9512 }
9513 
9514 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9515 					u16 *alloc_tqps, u16 *max_rss_size)
9516 {
9517 	struct hclge_vport *vport = hclge_get_vport(handle);
9518 	struct hclge_dev *hdev = vport->back;
9519 
9520 	*alloc_tqps = vport->alloc_tqps;
9521 	*max_rss_size = hdev->rss_size_max;
9522 }
9523 
9524 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9525 			      bool rxfh_configured)
9526 {
9527 	struct hclge_vport *vport = hclge_get_vport(handle);
9528 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9529 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9530 	struct hclge_dev *hdev = vport->back;
9531 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9532 	int cur_rss_size = kinfo->rss_size;
9533 	int cur_tqps = kinfo->num_tqps;
9534 	u16 tc_valid[HCLGE_MAX_TC_NUM];
9535 	u16 roundup_size;
9536 	u32 *rss_indir;
9537 	unsigned int i;
9538 	int ret;
9539 
9540 	kinfo->req_rss_size = new_tqps_num;
9541 
9542 	ret = hclge_tm_vport_map_update(hdev);
9543 	if (ret) {
9544 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9545 		return ret;
9546 	}
9547 
9548 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
9549 	roundup_size = ilog2(roundup_size);
9550 	/* Set the RSS TC mode according to the new RSS size */
9551 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9552 		tc_valid[i] = 0;
9553 
9554 		if (!(hdev->hw_tc_map & BIT(i)))
9555 			continue;
9556 
9557 		tc_valid[i] = 1;
9558 		tc_size[i] = roundup_size;
9559 		tc_offset[i] = kinfo->rss_size * i;
9560 	}
9561 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9562 	if (ret)
9563 		return ret;
9564 
9565 	/* RSS indirection table has been configuared by user */
9566 	if (rxfh_configured)
9567 		goto out;
9568 
9569 	/* Reinitializes the rss indirect table according to the new RSS size */
9570 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9571 	if (!rss_indir)
9572 		return -ENOMEM;
9573 
9574 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9575 		rss_indir[i] = i % kinfo->rss_size;
9576 
9577 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9578 	if (ret)
9579 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9580 			ret);
9581 
9582 	kfree(rss_indir);
9583 
9584 out:
9585 	if (!ret)
9586 		dev_info(&hdev->pdev->dev,
9587 			 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
9588 			 cur_rss_size, kinfo->rss_size,
9589 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
9590 
9591 	return ret;
9592 }
9593 
9594 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
9595 			      u32 *regs_num_64_bit)
9596 {
9597 	struct hclge_desc desc;
9598 	u32 total_num;
9599 	int ret;
9600 
9601 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
9602 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9603 	if (ret) {
9604 		dev_err(&hdev->pdev->dev,
9605 			"Query register number cmd failed, ret = %d.\n", ret);
9606 		return ret;
9607 	}
9608 
9609 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
9610 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
9611 
9612 	total_num = *regs_num_32_bit + *regs_num_64_bit;
9613 	if (!total_num)
9614 		return -EINVAL;
9615 
9616 	return 0;
9617 }
9618 
9619 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9620 				 void *data)
9621 {
9622 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
9623 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
9624 
9625 	struct hclge_desc *desc;
9626 	u32 *reg_val = data;
9627 	__le32 *desc_data;
9628 	int nodata_num;
9629 	int cmd_num;
9630 	int i, k, n;
9631 	int ret;
9632 
9633 	if (regs_num == 0)
9634 		return 0;
9635 
9636 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
9637 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
9638 			       HCLGE_32_BIT_REG_RTN_DATANUM);
9639 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9640 	if (!desc)
9641 		return -ENOMEM;
9642 
9643 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
9644 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9645 	if (ret) {
9646 		dev_err(&hdev->pdev->dev,
9647 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
9648 		kfree(desc);
9649 		return ret;
9650 	}
9651 
9652 	for (i = 0; i < cmd_num; i++) {
9653 		if (i == 0) {
9654 			desc_data = (__le32 *)(&desc[i].data[0]);
9655 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
9656 		} else {
9657 			desc_data = (__le32 *)(&desc[i]);
9658 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
9659 		}
9660 		for (k = 0; k < n; k++) {
9661 			*reg_val++ = le32_to_cpu(*desc_data++);
9662 
9663 			regs_num--;
9664 			if (!regs_num)
9665 				break;
9666 		}
9667 	}
9668 
9669 	kfree(desc);
9670 	return 0;
9671 }
9672 
9673 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9674 				 void *data)
9675 {
9676 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
9677 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
9678 
9679 	struct hclge_desc *desc;
9680 	u64 *reg_val = data;
9681 	__le64 *desc_data;
9682 	int nodata_len;
9683 	int cmd_num;
9684 	int i, k, n;
9685 	int ret;
9686 
9687 	if (regs_num == 0)
9688 		return 0;
9689 
9690 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
9691 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
9692 			       HCLGE_64_BIT_REG_RTN_DATANUM);
9693 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9694 	if (!desc)
9695 		return -ENOMEM;
9696 
9697 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9698 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9699 	if (ret) {
9700 		dev_err(&hdev->pdev->dev,
9701 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
9702 		kfree(desc);
9703 		return ret;
9704 	}
9705 
9706 	for (i = 0; i < cmd_num; i++) {
9707 		if (i == 0) {
9708 			desc_data = (__le64 *)(&desc[i].data[0]);
9709 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9710 		} else {
9711 			desc_data = (__le64 *)(&desc[i]);
9712 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
9713 		}
9714 		for (k = 0; k < n; k++) {
9715 			*reg_val++ = le64_to_cpu(*desc_data++);
9716 
9717 			regs_num--;
9718 			if (!regs_num)
9719 				break;
9720 		}
9721 	}
9722 
9723 	kfree(desc);
9724 	return 0;
9725 }
9726 
9727 #define MAX_SEPARATE_NUM	4
9728 #define SEPARATOR_VALUE		0xFDFCFBFA
9729 #define REG_NUM_PER_LINE	4
9730 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
9731 #define REG_SEPARATOR_LINE	1
9732 #define REG_NUM_REMAIN_MASK	3
9733 #define BD_LIST_MAX_NUM		30
9734 
9735 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
9736 {
9737 	/*prepare 4 commands to query DFX BD number*/
9738 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
9739 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9740 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
9741 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9742 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
9743 	desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9744 	hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
9745 
9746 	return hclge_cmd_send(&hdev->hw, desc, 4);
9747 }
9748 
9749 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
9750 				    int *bd_num_list,
9751 				    u32 type_num)
9752 {
9753 #define HCLGE_DFX_REG_BD_NUM	4
9754 
9755 	u32 entries_per_desc, desc_index, index, offset, i;
9756 	struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
9757 	int ret;
9758 
9759 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
9760 	if (ret) {
9761 		dev_err(&hdev->pdev->dev,
9762 			"Get dfx bd num fail, status is %d.\n", ret);
9763 		return ret;
9764 	}
9765 
9766 	entries_per_desc = ARRAY_SIZE(desc[0].data);
9767 	for (i = 0; i < type_num; i++) {
9768 		offset = hclge_dfx_bd_offset_list[i];
9769 		index = offset % entries_per_desc;
9770 		desc_index = offset / entries_per_desc;
9771 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
9772 	}
9773 
9774 	return ret;
9775 }
9776 
9777 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
9778 				  struct hclge_desc *desc_src, int bd_num,
9779 				  enum hclge_opcode_type cmd)
9780 {
9781 	struct hclge_desc *desc = desc_src;
9782 	int i, ret;
9783 
9784 	hclge_cmd_setup_basic_desc(desc, cmd, true);
9785 	for (i = 0; i < bd_num - 1; i++) {
9786 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9787 		desc++;
9788 		hclge_cmd_setup_basic_desc(desc, cmd, true);
9789 	}
9790 
9791 	desc = desc_src;
9792 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
9793 	if (ret)
9794 		dev_err(&hdev->pdev->dev,
9795 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
9796 			cmd, ret);
9797 
9798 	return ret;
9799 }
9800 
9801 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
9802 				    void *data)
9803 {
9804 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
9805 	struct hclge_desc *desc = desc_src;
9806 	u32 *reg = data;
9807 
9808 	entries_per_desc = ARRAY_SIZE(desc->data);
9809 	reg_num = entries_per_desc * bd_num;
9810 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
9811 	for (i = 0; i < reg_num; i++) {
9812 		index = i % entries_per_desc;
9813 		desc_index = i / entries_per_desc;
9814 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
9815 	}
9816 	for (i = 0; i < separator_num; i++)
9817 		*reg++ = SEPARATOR_VALUE;
9818 
9819 	return reg_num + separator_num;
9820 }
9821 
9822 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
9823 {
9824 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9825 	int data_len_per_desc, data_len, bd_num, i;
9826 	int bd_num_list[BD_LIST_MAX_NUM];
9827 	int ret;
9828 
9829 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
9830 	if (ret) {
9831 		dev_err(&hdev->pdev->dev,
9832 			"Get dfx reg bd num fail, status is %d.\n", ret);
9833 		return ret;
9834 	}
9835 
9836 	data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
9837 	*len = 0;
9838 	for (i = 0; i < dfx_reg_type_num; i++) {
9839 		bd_num = bd_num_list[i];
9840 		data_len = data_len_per_desc * bd_num;
9841 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
9842 	}
9843 
9844 	return ret;
9845 }
9846 
9847 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
9848 {
9849 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9850 	int bd_num, bd_num_max, buf_len, i;
9851 	int bd_num_list[BD_LIST_MAX_NUM];
9852 	struct hclge_desc *desc_src;
9853 	u32 *reg = data;
9854 	int ret;
9855 
9856 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
9857 	if (ret) {
9858 		dev_err(&hdev->pdev->dev,
9859 			"Get dfx reg bd num fail, status is %d.\n", ret);
9860 		return ret;
9861 	}
9862 
9863 	bd_num_max = bd_num_list[0];
9864 	for (i = 1; i < dfx_reg_type_num; i++)
9865 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
9866 
9867 	buf_len = sizeof(*desc_src) * bd_num_max;
9868 	desc_src = kzalloc(buf_len, GFP_KERNEL);
9869 	if (!desc_src) {
9870 		dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
9871 		return -ENOMEM;
9872 	}
9873 
9874 	for (i = 0; i < dfx_reg_type_num; i++) {
9875 		bd_num = bd_num_list[i];
9876 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
9877 					     hclge_dfx_reg_opcode_list[i]);
9878 		if (ret) {
9879 			dev_err(&hdev->pdev->dev,
9880 				"Get dfx reg fail, status is %d.\n", ret);
9881 			break;
9882 		}
9883 
9884 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
9885 	}
9886 
9887 	kfree(desc_src);
9888 	return ret;
9889 }
9890 
9891 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
9892 			      struct hnae3_knic_private_info *kinfo)
9893 {
9894 #define HCLGE_RING_REG_OFFSET		0x200
9895 #define HCLGE_RING_INT_REG_OFFSET	0x4
9896 
9897 	int i, j, reg_num, separator_num;
9898 	int data_num_sum;
9899 	u32 *reg = data;
9900 
9901 	/* fetching per-PF registers valus from PF PCIe register space */
9902 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
9903 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9904 	for (i = 0; i < reg_num; i++)
9905 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9906 	for (i = 0; i < separator_num; i++)
9907 		*reg++ = SEPARATOR_VALUE;
9908 	data_num_sum = reg_num + separator_num;
9909 
9910 	reg_num = ARRAY_SIZE(common_reg_addr_list);
9911 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9912 	for (i = 0; i < reg_num; i++)
9913 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9914 	for (i = 0; i < separator_num; i++)
9915 		*reg++ = SEPARATOR_VALUE;
9916 	data_num_sum += reg_num + separator_num;
9917 
9918 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
9919 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9920 	for (j = 0; j < kinfo->num_tqps; j++) {
9921 		for (i = 0; i < reg_num; i++)
9922 			*reg++ = hclge_read_dev(&hdev->hw,
9923 						ring_reg_addr_list[i] +
9924 						HCLGE_RING_REG_OFFSET * j);
9925 		for (i = 0; i < separator_num; i++)
9926 			*reg++ = SEPARATOR_VALUE;
9927 	}
9928 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
9929 
9930 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
9931 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9932 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
9933 		for (i = 0; i < reg_num; i++)
9934 			*reg++ = hclge_read_dev(&hdev->hw,
9935 						tqp_intr_reg_addr_list[i] +
9936 						HCLGE_RING_INT_REG_OFFSET * j);
9937 		for (i = 0; i < separator_num; i++)
9938 			*reg++ = SEPARATOR_VALUE;
9939 	}
9940 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
9941 
9942 	return data_num_sum;
9943 }
9944 
9945 static int hclge_get_regs_len(struct hnae3_handle *handle)
9946 {
9947 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9948 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9949 	struct hclge_vport *vport = hclge_get_vport(handle);
9950 	struct hclge_dev *hdev = vport->back;
9951 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
9952 	int regs_lines_32_bit, regs_lines_64_bit;
9953 	int ret;
9954 
9955 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9956 	if (ret) {
9957 		dev_err(&hdev->pdev->dev,
9958 			"Get register number failed, ret = %d.\n", ret);
9959 		return ret;
9960 	}
9961 
9962 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
9963 	if (ret) {
9964 		dev_err(&hdev->pdev->dev,
9965 			"Get dfx reg len failed, ret = %d.\n", ret);
9966 		return ret;
9967 	}
9968 
9969 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
9970 		REG_SEPARATOR_LINE;
9971 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
9972 		REG_SEPARATOR_LINE;
9973 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
9974 		REG_SEPARATOR_LINE;
9975 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
9976 		REG_SEPARATOR_LINE;
9977 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
9978 		REG_SEPARATOR_LINE;
9979 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
9980 		REG_SEPARATOR_LINE;
9981 
9982 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9983 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
9984 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
9985 }
9986 
9987 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9988 			   void *data)
9989 {
9990 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9991 	struct hclge_vport *vport = hclge_get_vport(handle);
9992 	struct hclge_dev *hdev = vport->back;
9993 	u32 regs_num_32_bit, regs_num_64_bit;
9994 	int i, reg_num, separator_num, ret;
9995 	u32 *reg = data;
9996 
9997 	*version = hdev->fw_version;
9998 
9999 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10000 	if (ret) {
10001 		dev_err(&hdev->pdev->dev,
10002 			"Get register number failed, ret = %d.\n", ret);
10003 		return;
10004 	}
10005 
10006 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10007 
10008 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10009 	if (ret) {
10010 		dev_err(&hdev->pdev->dev,
10011 			"Get 32 bit register failed, ret = %d.\n", ret);
10012 		return;
10013 	}
10014 	reg_num = regs_num_32_bit;
10015 	reg += reg_num;
10016 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10017 	for (i = 0; i < separator_num; i++)
10018 		*reg++ = SEPARATOR_VALUE;
10019 
10020 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10021 	if (ret) {
10022 		dev_err(&hdev->pdev->dev,
10023 			"Get 64 bit register failed, ret = %d.\n", ret);
10024 		return;
10025 	}
10026 	reg_num = regs_num_64_bit * 2;
10027 	reg += reg_num;
10028 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10029 	for (i = 0; i < separator_num; i++)
10030 		*reg++ = SEPARATOR_VALUE;
10031 
10032 	ret = hclge_get_dfx_reg(hdev, reg);
10033 	if (ret)
10034 		dev_err(&hdev->pdev->dev,
10035 			"Get dfx register failed, ret = %d.\n", ret);
10036 }
10037 
10038 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10039 {
10040 	struct hclge_set_led_state_cmd *req;
10041 	struct hclge_desc desc;
10042 	int ret;
10043 
10044 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10045 
10046 	req = (struct hclge_set_led_state_cmd *)desc.data;
10047 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10048 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10049 
10050 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10051 	if (ret)
10052 		dev_err(&hdev->pdev->dev,
10053 			"Send set led state cmd error, ret =%d\n", ret);
10054 
10055 	return ret;
10056 }
10057 
10058 enum hclge_led_status {
10059 	HCLGE_LED_OFF,
10060 	HCLGE_LED_ON,
10061 	HCLGE_LED_NO_CHANGE = 0xFF,
10062 };
10063 
10064 static int hclge_set_led_id(struct hnae3_handle *handle,
10065 			    enum ethtool_phys_id_state status)
10066 {
10067 	struct hclge_vport *vport = hclge_get_vport(handle);
10068 	struct hclge_dev *hdev = vport->back;
10069 
10070 	switch (status) {
10071 	case ETHTOOL_ID_ACTIVE:
10072 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
10073 	case ETHTOOL_ID_INACTIVE:
10074 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10075 	default:
10076 		return -EINVAL;
10077 	}
10078 }
10079 
10080 static void hclge_get_link_mode(struct hnae3_handle *handle,
10081 				unsigned long *supported,
10082 				unsigned long *advertising)
10083 {
10084 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10085 	struct hclge_vport *vport = hclge_get_vport(handle);
10086 	struct hclge_dev *hdev = vport->back;
10087 	unsigned int idx = 0;
10088 
10089 	for (; idx < size; idx++) {
10090 		supported[idx] = hdev->hw.mac.supported[idx];
10091 		advertising[idx] = hdev->hw.mac.advertising[idx];
10092 	}
10093 }
10094 
10095 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10096 {
10097 	struct hclge_vport *vport = hclge_get_vport(handle);
10098 	struct hclge_dev *hdev = vport->back;
10099 
10100 	return hclge_config_gro(hdev, enable);
10101 }
10102 
10103 static const struct hnae3_ae_ops hclge_ops = {
10104 	.init_ae_dev = hclge_init_ae_dev,
10105 	.uninit_ae_dev = hclge_uninit_ae_dev,
10106 	.flr_prepare = hclge_flr_prepare,
10107 	.flr_done = hclge_flr_done,
10108 	.init_client_instance = hclge_init_client_instance,
10109 	.uninit_client_instance = hclge_uninit_client_instance,
10110 	.map_ring_to_vector = hclge_map_ring_to_vector,
10111 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10112 	.get_vector = hclge_get_vector,
10113 	.put_vector = hclge_put_vector,
10114 	.set_promisc_mode = hclge_set_promisc_mode,
10115 	.set_loopback = hclge_set_loopback,
10116 	.start = hclge_ae_start,
10117 	.stop = hclge_ae_stop,
10118 	.client_start = hclge_client_start,
10119 	.client_stop = hclge_client_stop,
10120 	.get_status = hclge_get_status,
10121 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
10122 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10123 	.get_media_type = hclge_get_media_type,
10124 	.check_port_speed = hclge_check_port_speed,
10125 	.get_fec = hclge_get_fec,
10126 	.set_fec = hclge_set_fec,
10127 	.get_rss_key_size = hclge_get_rss_key_size,
10128 	.get_rss_indir_size = hclge_get_rss_indir_size,
10129 	.get_rss = hclge_get_rss,
10130 	.set_rss = hclge_set_rss,
10131 	.set_rss_tuple = hclge_set_rss_tuple,
10132 	.get_rss_tuple = hclge_get_rss_tuple,
10133 	.get_tc_size = hclge_get_tc_size,
10134 	.get_mac_addr = hclge_get_mac_addr,
10135 	.set_mac_addr = hclge_set_mac_addr,
10136 	.do_ioctl = hclge_do_ioctl,
10137 	.add_uc_addr = hclge_add_uc_addr,
10138 	.rm_uc_addr = hclge_rm_uc_addr,
10139 	.add_mc_addr = hclge_add_mc_addr,
10140 	.rm_mc_addr = hclge_rm_mc_addr,
10141 	.set_autoneg = hclge_set_autoneg,
10142 	.get_autoneg = hclge_get_autoneg,
10143 	.restart_autoneg = hclge_restart_autoneg,
10144 	.halt_autoneg = hclge_halt_autoneg,
10145 	.get_pauseparam = hclge_get_pauseparam,
10146 	.set_pauseparam = hclge_set_pauseparam,
10147 	.set_mtu = hclge_set_mtu,
10148 	.reset_queue = hclge_reset_tqp,
10149 	.get_stats = hclge_get_stats,
10150 	.get_mac_stats = hclge_get_mac_stat,
10151 	.update_stats = hclge_update_stats,
10152 	.get_strings = hclge_get_strings,
10153 	.get_sset_count = hclge_get_sset_count,
10154 	.get_fw_version = hclge_get_fw_version,
10155 	.get_mdix_mode = hclge_get_mdix_mode,
10156 	.enable_vlan_filter = hclge_enable_vlan_filter,
10157 	.set_vlan_filter = hclge_set_vlan_filter,
10158 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10159 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10160 	.reset_event = hclge_reset_event,
10161 	.get_reset_level = hclge_get_reset_level,
10162 	.set_default_reset_request = hclge_set_def_reset_request,
10163 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10164 	.set_channels = hclge_set_channels,
10165 	.get_channels = hclge_get_channels,
10166 	.get_regs_len = hclge_get_regs_len,
10167 	.get_regs = hclge_get_regs,
10168 	.set_led_id = hclge_set_led_id,
10169 	.get_link_mode = hclge_get_link_mode,
10170 	.add_fd_entry = hclge_add_fd_entry,
10171 	.del_fd_entry = hclge_del_fd_entry,
10172 	.del_all_fd_entries = hclge_del_all_fd_entries,
10173 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10174 	.get_fd_rule_info = hclge_get_fd_rule_info,
10175 	.get_fd_all_rules = hclge_get_all_rules,
10176 	.restore_fd_rules = hclge_restore_fd_entries,
10177 	.enable_fd = hclge_enable_fd,
10178 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
10179 	.dbg_run_cmd = hclge_dbg_run_cmd,
10180 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
10181 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
10182 	.ae_dev_resetting = hclge_ae_dev_resetting,
10183 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10184 	.set_gro_en = hclge_gro_en,
10185 	.get_global_queue_id = hclge_covert_handle_qid_global,
10186 	.set_timer_task = hclge_set_timer_task,
10187 	.mac_connect_phy = hclge_mac_connect_phy,
10188 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
10189 	.restore_vlan_table = hclge_restore_vlan_table,
10190 };
10191 
10192 static struct hnae3_ae_algo ae_algo = {
10193 	.ops = &hclge_ops,
10194 	.pdev_id_table = ae_algo_pci_tbl,
10195 };
10196 
10197 static int hclge_init(void)
10198 {
10199 	pr_info("%s is initializing\n", HCLGE_NAME);
10200 
10201 	hnae3_register_ae_algo(&ae_algo);
10202 
10203 	return 0;
10204 }
10205 
10206 static void hclge_exit(void)
10207 {
10208 	hnae3_unregister_ae_algo(&ae_algo);
10209 }
10210 module_init(hclge_init);
10211 module_exit(hclge_exit);
10212 
10213 MODULE_LICENSE("GPL");
10214 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10215 MODULE_DESCRIPTION("HCLGE Driver");
10216 MODULE_VERSION(HCLGE_MOD_VERSION);
10217