1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256U
31 #define HCLGE_BUF_MUL_BY	2
32 #define HCLGE_BUF_DIV_BY	2
33 #define NEED_RESERVE_TC_NUM	2
34 #define BUF_MAX_PERCENT		100
35 #define BUF_RESERVE_PERCENT	90
36 
37 #define HCLGE_RESET_MAX_FAIL_CNT	5
38 #define HCLGE_RESET_SYNC_TIME		100
39 #define HCLGE_PF_RESET_SYNC_TIME	20
40 #define HCLGE_PF_RESET_SYNC_CNT		1500
41 
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55 
56 #define HCLGE_LINK_STATUS_MS	10
57 
58 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
59 static int hclge_init_vlan_config(struct hclge_dev *hdev);
60 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
61 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
62 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
63 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
64 			       u16 *allocated_size, bool is_alloc);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 						   unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 
71 static struct hnae3_ae_algo ae_algo;
72 
73 static const struct pci_device_id ae_algo_pci_tbl[] = {
74 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
75 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
76 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
77 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
78 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
79 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
81 	/* required last entry */
82 	{0, }
83 };
84 
85 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
86 
87 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
88 					 HCLGE_CMDQ_TX_ADDR_H_REG,
89 					 HCLGE_CMDQ_TX_DEPTH_REG,
90 					 HCLGE_CMDQ_TX_TAIL_REG,
91 					 HCLGE_CMDQ_TX_HEAD_REG,
92 					 HCLGE_CMDQ_RX_ADDR_L_REG,
93 					 HCLGE_CMDQ_RX_ADDR_H_REG,
94 					 HCLGE_CMDQ_RX_DEPTH_REG,
95 					 HCLGE_CMDQ_RX_TAIL_REG,
96 					 HCLGE_CMDQ_RX_HEAD_REG,
97 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
98 					 HCLGE_CMDQ_INTR_STS_REG,
99 					 HCLGE_CMDQ_INTR_EN_REG,
100 					 HCLGE_CMDQ_INTR_GEN_REG};
101 
102 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
103 					   HCLGE_VECTOR0_OTER_EN_REG,
104 					   HCLGE_MISC_RESET_STS_REG,
105 					   HCLGE_MISC_VECTOR_INT_STS,
106 					   HCLGE_GLOBAL_RESET_REG,
107 					   HCLGE_FUN_RST_ING,
108 					   HCLGE_GRO_EN_REG};
109 
110 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
111 					 HCLGE_RING_RX_ADDR_H_REG,
112 					 HCLGE_RING_RX_BD_NUM_REG,
113 					 HCLGE_RING_RX_BD_LENGTH_REG,
114 					 HCLGE_RING_RX_MERGE_EN_REG,
115 					 HCLGE_RING_RX_TAIL_REG,
116 					 HCLGE_RING_RX_HEAD_REG,
117 					 HCLGE_RING_RX_FBD_NUM_REG,
118 					 HCLGE_RING_RX_OFFSET_REG,
119 					 HCLGE_RING_RX_FBD_OFFSET_REG,
120 					 HCLGE_RING_RX_STASH_REG,
121 					 HCLGE_RING_RX_BD_ERR_REG,
122 					 HCLGE_RING_TX_ADDR_L_REG,
123 					 HCLGE_RING_TX_ADDR_H_REG,
124 					 HCLGE_RING_TX_BD_NUM_REG,
125 					 HCLGE_RING_TX_PRIORITY_REG,
126 					 HCLGE_RING_TX_TC_REG,
127 					 HCLGE_RING_TX_MERGE_EN_REG,
128 					 HCLGE_RING_TX_TAIL_REG,
129 					 HCLGE_RING_TX_HEAD_REG,
130 					 HCLGE_RING_TX_FBD_NUM_REG,
131 					 HCLGE_RING_TX_OFFSET_REG,
132 					 HCLGE_RING_TX_EBD_NUM_REG,
133 					 HCLGE_RING_TX_EBD_OFFSET_REG,
134 					 HCLGE_RING_TX_BD_ERR_REG,
135 					 HCLGE_RING_EN_REG};
136 
137 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
138 					     HCLGE_TQP_INTR_GL0_REG,
139 					     HCLGE_TQP_INTR_GL1_REG,
140 					     HCLGE_TQP_INTR_GL2_REG,
141 					     HCLGE_TQP_INTR_RL_REG};
142 
143 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
144 	"App    Loopback test",
145 	"Serdes serial Loopback test",
146 	"Serdes parallel Loopback test",
147 	"Phy    Loopback test"
148 };
149 
150 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
151 	{"mac_tx_mac_pause_num",
152 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
153 	{"mac_rx_mac_pause_num",
154 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
155 	{"mac_tx_control_pkt_num",
156 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
157 	{"mac_rx_control_pkt_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
159 	{"mac_tx_pfc_pkt_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
161 	{"mac_tx_pfc_pri0_pkt_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
163 	{"mac_tx_pfc_pri1_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
165 	{"mac_tx_pfc_pri2_pkt_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
167 	{"mac_tx_pfc_pri3_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
169 	{"mac_tx_pfc_pri4_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
171 	{"mac_tx_pfc_pri5_pkt_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
173 	{"mac_tx_pfc_pri6_pkt_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
175 	{"mac_tx_pfc_pri7_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
177 	{"mac_rx_pfc_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
179 	{"mac_rx_pfc_pri0_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
181 	{"mac_rx_pfc_pri1_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
183 	{"mac_rx_pfc_pri2_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
185 	{"mac_rx_pfc_pri3_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
187 	{"mac_rx_pfc_pri4_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
189 	{"mac_rx_pfc_pri5_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
191 	{"mac_rx_pfc_pri6_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
193 	{"mac_rx_pfc_pri7_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
195 	{"mac_tx_total_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
197 	{"mac_tx_total_oct_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
199 	{"mac_tx_good_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
201 	{"mac_tx_bad_pkt_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
203 	{"mac_tx_good_oct_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
205 	{"mac_tx_bad_oct_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
207 	{"mac_tx_uni_pkt_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
209 	{"mac_tx_multi_pkt_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
211 	{"mac_tx_broad_pkt_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
213 	{"mac_tx_undersize_pkt_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
215 	{"mac_tx_oversize_pkt_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
217 	{"mac_tx_64_oct_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
219 	{"mac_tx_65_127_oct_pkt_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
221 	{"mac_tx_128_255_oct_pkt_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
223 	{"mac_tx_256_511_oct_pkt_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
225 	{"mac_tx_512_1023_oct_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
227 	{"mac_tx_1024_1518_oct_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
229 	{"mac_tx_1519_2047_oct_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
231 	{"mac_tx_2048_4095_oct_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
233 	{"mac_tx_4096_8191_oct_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
235 	{"mac_tx_8192_9216_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
237 	{"mac_tx_9217_12287_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
239 	{"mac_tx_12288_16383_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
241 	{"mac_tx_1519_max_good_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
243 	{"mac_tx_1519_max_bad_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
245 	{"mac_rx_total_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
247 	{"mac_rx_total_oct_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
249 	{"mac_rx_good_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
251 	{"mac_rx_bad_pkt_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
253 	{"mac_rx_good_oct_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
255 	{"mac_rx_bad_oct_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
257 	{"mac_rx_uni_pkt_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
259 	{"mac_rx_multi_pkt_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
261 	{"mac_rx_broad_pkt_num",
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
263 	{"mac_rx_undersize_pkt_num",
264 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
265 	{"mac_rx_oversize_pkt_num",
266 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
267 	{"mac_rx_64_oct_pkt_num",
268 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
269 	{"mac_rx_65_127_oct_pkt_num",
270 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
271 	{"mac_rx_128_255_oct_pkt_num",
272 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
273 	{"mac_rx_256_511_oct_pkt_num",
274 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
275 	{"mac_rx_512_1023_oct_pkt_num",
276 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
277 	{"mac_rx_1024_1518_oct_pkt_num",
278 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
279 	{"mac_rx_1519_2047_oct_pkt_num",
280 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
281 	{"mac_rx_2048_4095_oct_pkt_num",
282 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
283 	{"mac_rx_4096_8191_oct_pkt_num",
284 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
285 	{"mac_rx_8192_9216_oct_pkt_num",
286 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
287 	{"mac_rx_9217_12287_oct_pkt_num",
288 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
289 	{"mac_rx_12288_16383_oct_pkt_num",
290 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
291 	{"mac_rx_1519_max_good_pkt_num",
292 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
293 	{"mac_rx_1519_max_bad_pkt_num",
294 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
295 
296 	{"mac_tx_fragment_pkt_num",
297 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
298 	{"mac_tx_undermin_pkt_num",
299 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
300 	{"mac_tx_jabber_pkt_num",
301 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
302 	{"mac_tx_err_all_pkt_num",
303 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
304 	{"mac_tx_from_app_good_pkt_num",
305 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
306 	{"mac_tx_from_app_bad_pkt_num",
307 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
308 	{"mac_rx_fragment_pkt_num",
309 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
310 	{"mac_rx_undermin_pkt_num",
311 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
312 	{"mac_rx_jabber_pkt_num",
313 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
314 	{"mac_rx_fcs_err_pkt_num",
315 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
316 	{"mac_rx_send_app_good_pkt_num",
317 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
318 	{"mac_rx_send_app_bad_pkt_num",
319 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
320 };
321 
322 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
323 	{
324 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
325 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
326 		.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
327 		.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
328 		.i_port_bitmap = 0x1,
329 	},
330 };
331 
332 static const u8 hclge_hash_key[] = {
333 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
334 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
335 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
336 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
337 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
338 };
339 
340 static const u32 hclge_dfx_bd_offset_list[] = {
341 	HCLGE_DFX_BIOS_BD_OFFSET,
342 	HCLGE_DFX_SSU_0_BD_OFFSET,
343 	HCLGE_DFX_SSU_1_BD_OFFSET,
344 	HCLGE_DFX_IGU_BD_OFFSET,
345 	HCLGE_DFX_RPU_0_BD_OFFSET,
346 	HCLGE_DFX_RPU_1_BD_OFFSET,
347 	HCLGE_DFX_NCSI_BD_OFFSET,
348 	HCLGE_DFX_RTC_BD_OFFSET,
349 	HCLGE_DFX_PPP_BD_OFFSET,
350 	HCLGE_DFX_RCB_BD_OFFSET,
351 	HCLGE_DFX_TQP_BD_OFFSET,
352 	HCLGE_DFX_SSU_2_BD_OFFSET
353 };
354 
355 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
356 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
357 	HCLGE_OPC_DFX_SSU_REG_0,
358 	HCLGE_OPC_DFX_SSU_REG_1,
359 	HCLGE_OPC_DFX_IGU_EGU_REG,
360 	HCLGE_OPC_DFX_RPU_REG_0,
361 	HCLGE_OPC_DFX_RPU_REG_1,
362 	HCLGE_OPC_DFX_NCSI_REG,
363 	HCLGE_OPC_DFX_RTC_REG,
364 	HCLGE_OPC_DFX_PPP_REG,
365 	HCLGE_OPC_DFX_RCB_REG,
366 	HCLGE_OPC_DFX_TQP_REG,
367 	HCLGE_OPC_DFX_SSU_REG_2
368 };
369 
370 static const struct key_info meta_data_key_info[] = {
371 	{ PACKET_TYPE_ID, 6},
372 	{ IP_FRAGEMENT, 1},
373 	{ ROCE_TYPE, 1},
374 	{ NEXT_KEY, 5},
375 	{ VLAN_NUMBER, 2},
376 	{ SRC_VPORT, 12},
377 	{ DST_VPORT, 12},
378 	{ TUNNEL_PACKET, 1},
379 };
380 
381 static const struct key_info tuple_key_info[] = {
382 	{ OUTER_DST_MAC, 48},
383 	{ OUTER_SRC_MAC, 48},
384 	{ OUTER_VLAN_TAG_FST, 16},
385 	{ OUTER_VLAN_TAG_SEC, 16},
386 	{ OUTER_ETH_TYPE, 16},
387 	{ OUTER_L2_RSV, 16},
388 	{ OUTER_IP_TOS, 8},
389 	{ OUTER_IP_PROTO, 8},
390 	{ OUTER_SRC_IP, 32},
391 	{ OUTER_DST_IP, 32},
392 	{ OUTER_L3_RSV, 16},
393 	{ OUTER_SRC_PORT, 16},
394 	{ OUTER_DST_PORT, 16},
395 	{ OUTER_L4_RSV, 32},
396 	{ OUTER_TUN_VNI, 24},
397 	{ OUTER_TUN_FLOW_ID, 8},
398 	{ INNER_DST_MAC, 48},
399 	{ INNER_SRC_MAC, 48},
400 	{ INNER_VLAN_TAG_FST, 16},
401 	{ INNER_VLAN_TAG_SEC, 16},
402 	{ INNER_ETH_TYPE, 16},
403 	{ INNER_L2_RSV, 16},
404 	{ INNER_IP_TOS, 8},
405 	{ INNER_IP_PROTO, 8},
406 	{ INNER_SRC_IP, 32},
407 	{ INNER_DST_IP, 32},
408 	{ INNER_L3_RSV, 16},
409 	{ INNER_SRC_PORT, 16},
410 	{ INNER_DST_PORT, 16},
411 	{ INNER_L4_RSV, 32},
412 };
413 
414 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
415 {
416 #define HCLGE_MAC_CMD_NUM 21
417 
418 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
419 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
420 	__le64 *desc_data;
421 	int i, k, n;
422 	int ret;
423 
424 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
425 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
426 	if (ret) {
427 		dev_err(&hdev->pdev->dev,
428 			"Get MAC pkt stats fail, status = %d.\n", ret);
429 
430 		return ret;
431 	}
432 
433 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
434 		/* for special opcode 0032, only the first desc has the head */
435 		if (unlikely(i == 0)) {
436 			desc_data = (__le64 *)(&desc[i].data[0]);
437 			n = HCLGE_RD_FIRST_STATS_NUM;
438 		} else {
439 			desc_data = (__le64 *)(&desc[i]);
440 			n = HCLGE_RD_OTHER_STATS_NUM;
441 		}
442 
443 		for (k = 0; k < n; k++) {
444 			*data += le64_to_cpu(*desc_data);
445 			data++;
446 			desc_data++;
447 		}
448 	}
449 
450 	return 0;
451 }
452 
453 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
454 {
455 	u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
456 	struct hclge_desc *desc;
457 	__le64 *desc_data;
458 	u16 i, k, n;
459 	int ret;
460 
461 	/* This may be called inside atomic sections,
462 	 * so GFP_ATOMIC is more suitalbe here
463 	 */
464 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
465 	if (!desc)
466 		return -ENOMEM;
467 
468 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
469 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
470 	if (ret) {
471 		kfree(desc);
472 		return ret;
473 	}
474 
475 	for (i = 0; i < desc_num; i++) {
476 		/* for special opcode 0034, only the first desc has the head */
477 		if (i == 0) {
478 			desc_data = (__le64 *)(&desc[i].data[0]);
479 			n = HCLGE_RD_FIRST_STATS_NUM;
480 		} else {
481 			desc_data = (__le64 *)(&desc[i]);
482 			n = HCLGE_RD_OTHER_STATS_NUM;
483 		}
484 
485 		for (k = 0; k < n; k++) {
486 			*data += le64_to_cpu(*desc_data);
487 			data++;
488 			desc_data++;
489 		}
490 	}
491 
492 	kfree(desc);
493 
494 	return 0;
495 }
496 
497 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
498 {
499 	struct hclge_desc desc;
500 	__le32 *desc_data;
501 	u32 reg_num;
502 	int ret;
503 
504 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
505 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
506 	if (ret)
507 		return ret;
508 
509 	desc_data = (__le32 *)(&desc.data[0]);
510 	reg_num = le32_to_cpu(*desc_data);
511 
512 	*desc_num = 1 + ((reg_num - 3) >> 2) +
513 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
514 
515 	return 0;
516 }
517 
518 static int hclge_mac_update_stats(struct hclge_dev *hdev)
519 {
520 	u32 desc_num;
521 	int ret;
522 
523 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
524 
525 	/* The firmware supports the new statistics acquisition method */
526 	if (!ret)
527 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
528 	else if (ret == -EOPNOTSUPP)
529 		ret = hclge_mac_update_stats_defective(hdev);
530 	else
531 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
532 
533 	return ret;
534 }
535 
536 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
537 {
538 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
539 	struct hclge_vport *vport = hclge_get_vport(handle);
540 	struct hclge_dev *hdev = vport->back;
541 	struct hnae3_queue *queue;
542 	struct hclge_desc desc[1];
543 	struct hclge_tqp *tqp;
544 	int ret, i;
545 
546 	for (i = 0; i < kinfo->num_tqps; i++) {
547 		queue = handle->kinfo.tqp[i];
548 		tqp = container_of(queue, struct hclge_tqp, q);
549 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
550 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
551 					   true);
552 
553 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
554 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
555 		if (ret) {
556 			dev_err(&hdev->pdev->dev,
557 				"Query tqp stat fail, status = %d,queue = %d\n",
558 				ret, i);
559 			return ret;
560 		}
561 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
562 			le32_to_cpu(desc[0].data[1]);
563 	}
564 
565 	for (i = 0; i < kinfo->num_tqps; i++) {
566 		queue = handle->kinfo.tqp[i];
567 		tqp = container_of(queue, struct hclge_tqp, q);
568 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
569 		hclge_cmd_setup_basic_desc(&desc[0],
570 					   HCLGE_OPC_QUERY_TX_STATUS,
571 					   true);
572 
573 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
574 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
575 		if (ret) {
576 			dev_err(&hdev->pdev->dev,
577 				"Query tqp stat fail, status = %d,queue = %d\n",
578 				ret, i);
579 			return ret;
580 		}
581 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
582 			le32_to_cpu(desc[0].data[1]);
583 	}
584 
585 	return 0;
586 }
587 
588 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
589 {
590 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
591 	struct hclge_tqp *tqp;
592 	u64 *buff = data;
593 	int i;
594 
595 	for (i = 0; i < kinfo->num_tqps; i++) {
596 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
597 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
598 	}
599 
600 	for (i = 0; i < kinfo->num_tqps; i++) {
601 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
603 	}
604 
605 	return buff;
606 }
607 
608 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
609 {
610 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
611 
612 	/* each tqp has TX & RX two queues */
613 	return kinfo->num_tqps * (2);
614 }
615 
616 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
617 {
618 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
619 	u8 *buff = data;
620 	int i = 0;
621 
622 	for (i = 0; i < kinfo->num_tqps; i++) {
623 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
624 			struct hclge_tqp, q);
625 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
626 			 tqp->index);
627 		buff = buff + ETH_GSTRING_LEN;
628 	}
629 
630 	for (i = 0; i < kinfo->num_tqps; i++) {
631 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
632 			struct hclge_tqp, q);
633 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
634 			 tqp->index);
635 		buff = buff + ETH_GSTRING_LEN;
636 	}
637 
638 	return buff;
639 }
640 
641 static u64 *hclge_comm_get_stats(const void *comm_stats,
642 				 const struct hclge_comm_stats_str strs[],
643 				 int size, u64 *data)
644 {
645 	u64 *buf = data;
646 	u32 i;
647 
648 	for (i = 0; i < size; i++)
649 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
650 
651 	return buf + size;
652 }
653 
654 static u8 *hclge_comm_get_strings(u32 stringset,
655 				  const struct hclge_comm_stats_str strs[],
656 				  int size, u8 *data)
657 {
658 	char *buff = (char *)data;
659 	u32 i;
660 
661 	if (stringset != ETH_SS_STATS)
662 		return buff;
663 
664 	for (i = 0; i < size; i++) {
665 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
666 		buff = buff + ETH_GSTRING_LEN;
667 	}
668 
669 	return (u8 *)buff;
670 }
671 
672 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
673 {
674 	struct hnae3_handle *handle;
675 	int status;
676 
677 	handle = &hdev->vport[0].nic;
678 	if (handle->client) {
679 		status = hclge_tqps_update_stats(handle);
680 		if (status) {
681 			dev_err(&hdev->pdev->dev,
682 				"Update TQPS stats fail, status = %d.\n",
683 				status);
684 		}
685 	}
686 
687 	status = hclge_mac_update_stats(hdev);
688 	if (status)
689 		dev_err(&hdev->pdev->dev,
690 			"Update MAC stats fail, status = %d.\n", status);
691 }
692 
693 static void hclge_update_stats(struct hnae3_handle *handle,
694 			       struct net_device_stats *net_stats)
695 {
696 	struct hclge_vport *vport = hclge_get_vport(handle);
697 	struct hclge_dev *hdev = vport->back;
698 	int status;
699 
700 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
701 		return;
702 
703 	status = hclge_mac_update_stats(hdev);
704 	if (status)
705 		dev_err(&hdev->pdev->dev,
706 			"Update MAC stats fail, status = %d.\n",
707 			status);
708 
709 	status = hclge_tqps_update_stats(handle);
710 	if (status)
711 		dev_err(&hdev->pdev->dev,
712 			"Update TQPS stats fail, status = %d.\n",
713 			status);
714 
715 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
716 }
717 
718 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
719 {
720 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
721 		HNAE3_SUPPORT_PHY_LOOPBACK |\
722 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
723 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
724 
725 	struct hclge_vport *vport = hclge_get_vport(handle);
726 	struct hclge_dev *hdev = vport->back;
727 	int count = 0;
728 
729 	/* Loopback test support rules:
730 	 * mac: only GE mode support
731 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
732 	 * phy: only support when phy device exist on board
733 	 */
734 	if (stringset == ETH_SS_TEST) {
735 		/* clear loopback bit flags at first */
736 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
737 		if (hdev->pdev->revision >= 0x21 ||
738 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
739 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
740 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
741 			count += 1;
742 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
743 		}
744 
745 		count += 2;
746 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
747 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
748 
749 		if (hdev->hw.mac.phydev) {
750 			count += 1;
751 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
752 		}
753 
754 	} else if (stringset == ETH_SS_STATS) {
755 		count = ARRAY_SIZE(g_mac_stats_string) +
756 			hclge_tqps_get_sset_count(handle, stringset);
757 	}
758 
759 	return count;
760 }
761 
762 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
763 			      u8 *data)
764 {
765 	u8 *p = (char *)data;
766 	int size;
767 
768 	if (stringset == ETH_SS_STATS) {
769 		size = ARRAY_SIZE(g_mac_stats_string);
770 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
771 					   size, p);
772 		p = hclge_tqps_get_strings(handle, p);
773 	} else if (stringset == ETH_SS_TEST) {
774 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
775 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
776 			       ETH_GSTRING_LEN);
777 			p += ETH_GSTRING_LEN;
778 		}
779 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
780 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
781 			       ETH_GSTRING_LEN);
782 			p += ETH_GSTRING_LEN;
783 		}
784 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
785 			memcpy(p,
786 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
787 			       ETH_GSTRING_LEN);
788 			p += ETH_GSTRING_LEN;
789 		}
790 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
791 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
792 			       ETH_GSTRING_LEN);
793 			p += ETH_GSTRING_LEN;
794 		}
795 	}
796 }
797 
798 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
799 {
800 	struct hclge_vport *vport = hclge_get_vport(handle);
801 	struct hclge_dev *hdev = vport->back;
802 	u64 *p;
803 
804 	p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
805 				 ARRAY_SIZE(g_mac_stats_string), data);
806 	p = hclge_tqps_get_stats(handle, p);
807 }
808 
809 static void hclge_get_mac_stat(struct hnae3_handle *handle,
810 			       struct hns3_mac_stats *mac_stats)
811 {
812 	struct hclge_vport *vport = hclge_get_vport(handle);
813 	struct hclge_dev *hdev = vport->back;
814 
815 	hclge_update_stats(handle, NULL);
816 
817 	mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
818 	mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
819 }
820 
821 static int hclge_parse_func_status(struct hclge_dev *hdev,
822 				   struct hclge_func_status_cmd *status)
823 {
824 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
825 		return -EINVAL;
826 
827 	/* Set the pf to main pf */
828 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
829 		hdev->flag |= HCLGE_FLAG_MAIN;
830 	else
831 		hdev->flag &= ~HCLGE_FLAG_MAIN;
832 
833 	return 0;
834 }
835 
836 static int hclge_query_function_status(struct hclge_dev *hdev)
837 {
838 #define HCLGE_QUERY_MAX_CNT	5
839 
840 	struct hclge_func_status_cmd *req;
841 	struct hclge_desc desc;
842 	int timeout = 0;
843 	int ret;
844 
845 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
846 	req = (struct hclge_func_status_cmd *)desc.data;
847 
848 	do {
849 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
850 		if (ret) {
851 			dev_err(&hdev->pdev->dev,
852 				"query function status failed %d.\n", ret);
853 			return ret;
854 		}
855 
856 		/* Check pf reset is done */
857 		if (req->pf_state)
858 			break;
859 		usleep_range(1000, 2000);
860 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
861 
862 	ret = hclge_parse_func_status(hdev, req);
863 
864 	return ret;
865 }
866 
867 static int hclge_query_pf_resource(struct hclge_dev *hdev)
868 {
869 	struct hclge_pf_res_cmd *req;
870 	struct hclge_desc desc;
871 	int ret;
872 
873 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
874 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
875 	if (ret) {
876 		dev_err(&hdev->pdev->dev,
877 			"query pf resource failed %d.\n", ret);
878 		return ret;
879 	}
880 
881 	req = (struct hclge_pf_res_cmd *)desc.data;
882 	hdev->num_tqps = __le16_to_cpu(req->tqp_num);
883 	hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
884 
885 	if (req->tx_buf_size)
886 		hdev->tx_buf_size =
887 			__le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
888 	else
889 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
890 
891 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
892 
893 	if (req->dv_buf_size)
894 		hdev->dv_buf_size =
895 			__le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
896 	else
897 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
898 
899 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
900 
901 	if (hnae3_dev_roce_supported(hdev)) {
902 		hdev->roce_base_msix_offset =
903 		hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
904 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
905 		hdev->num_roce_msi =
906 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
907 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
908 
909 		/* PF should have NIC vectors and Roce vectors,
910 		 * NIC vectors are queued before Roce vectors.
911 		 */
912 		hdev->num_msi = hdev->num_roce_msi +
913 				hdev->roce_base_msix_offset;
914 	} else {
915 		hdev->num_msi =
916 		hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
917 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
918 	}
919 
920 	return 0;
921 }
922 
923 static int hclge_parse_speed(int speed_cmd, int *speed)
924 {
925 	switch (speed_cmd) {
926 	case 6:
927 		*speed = HCLGE_MAC_SPEED_10M;
928 		break;
929 	case 7:
930 		*speed = HCLGE_MAC_SPEED_100M;
931 		break;
932 	case 0:
933 		*speed = HCLGE_MAC_SPEED_1G;
934 		break;
935 	case 1:
936 		*speed = HCLGE_MAC_SPEED_10G;
937 		break;
938 	case 2:
939 		*speed = HCLGE_MAC_SPEED_25G;
940 		break;
941 	case 3:
942 		*speed = HCLGE_MAC_SPEED_40G;
943 		break;
944 	case 4:
945 		*speed = HCLGE_MAC_SPEED_50G;
946 		break;
947 	case 5:
948 		*speed = HCLGE_MAC_SPEED_100G;
949 		break;
950 	default:
951 		return -EINVAL;
952 	}
953 
954 	return 0;
955 }
956 
957 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
958 {
959 	struct hclge_vport *vport = hclge_get_vport(handle);
960 	struct hclge_dev *hdev = vport->back;
961 	u32 speed_ability = hdev->hw.mac.speed_ability;
962 	u32 speed_bit = 0;
963 
964 	switch (speed) {
965 	case HCLGE_MAC_SPEED_10M:
966 		speed_bit = HCLGE_SUPPORT_10M_BIT;
967 		break;
968 	case HCLGE_MAC_SPEED_100M:
969 		speed_bit = HCLGE_SUPPORT_100M_BIT;
970 		break;
971 	case HCLGE_MAC_SPEED_1G:
972 		speed_bit = HCLGE_SUPPORT_1G_BIT;
973 		break;
974 	case HCLGE_MAC_SPEED_10G:
975 		speed_bit = HCLGE_SUPPORT_10G_BIT;
976 		break;
977 	case HCLGE_MAC_SPEED_25G:
978 		speed_bit = HCLGE_SUPPORT_25G_BIT;
979 		break;
980 	case HCLGE_MAC_SPEED_40G:
981 		speed_bit = HCLGE_SUPPORT_40G_BIT;
982 		break;
983 	case HCLGE_MAC_SPEED_50G:
984 		speed_bit = HCLGE_SUPPORT_50G_BIT;
985 		break;
986 	case HCLGE_MAC_SPEED_100G:
987 		speed_bit = HCLGE_SUPPORT_100G_BIT;
988 		break;
989 	default:
990 		return -EINVAL;
991 	}
992 
993 	if (speed_bit & speed_ability)
994 		return 0;
995 
996 	return -EINVAL;
997 }
998 
999 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1000 {
1001 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1002 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1003 				 mac->supported);
1004 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1005 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1006 				 mac->supported);
1007 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1008 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1009 				 mac->supported);
1010 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1011 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1012 				 mac->supported);
1013 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1014 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1015 				 mac->supported);
1016 }
1017 
1018 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1019 {
1020 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1021 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1022 				 mac->supported);
1023 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1024 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1025 				 mac->supported);
1026 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1027 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1028 				 mac->supported);
1029 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1030 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1031 				 mac->supported);
1032 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1033 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1034 				 mac->supported);
1035 }
1036 
1037 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1038 {
1039 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1040 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1041 				 mac->supported);
1042 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1043 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1044 				 mac->supported);
1045 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1046 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1047 				 mac->supported);
1048 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1049 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1050 				 mac->supported);
1051 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1052 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1053 				 mac->supported);
1054 }
1055 
1056 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1057 {
1058 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1059 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1060 				 mac->supported);
1061 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1062 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1063 				 mac->supported);
1064 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1065 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1066 				 mac->supported);
1067 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1068 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1069 				 mac->supported);
1070 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1072 				 mac->supported);
1073 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1074 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1075 				 mac->supported);
1076 }
1077 
1078 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1079 {
1080 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1081 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1082 
1083 	switch (mac->speed) {
1084 	case HCLGE_MAC_SPEED_10G:
1085 	case HCLGE_MAC_SPEED_40G:
1086 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1087 				 mac->supported);
1088 		mac->fec_ability =
1089 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1090 		break;
1091 	case HCLGE_MAC_SPEED_25G:
1092 	case HCLGE_MAC_SPEED_50G:
1093 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1094 				 mac->supported);
1095 		mac->fec_ability =
1096 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1097 			BIT(HNAE3_FEC_AUTO);
1098 		break;
1099 	case HCLGE_MAC_SPEED_100G:
1100 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1101 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1102 		break;
1103 	default:
1104 		mac->fec_ability = 0;
1105 		break;
1106 	}
1107 }
1108 
1109 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1110 					u8 speed_ability)
1111 {
1112 	struct hclge_mac *mac = &hdev->hw.mac;
1113 
1114 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1115 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1116 				 mac->supported);
1117 
1118 	hclge_convert_setting_sr(mac, speed_ability);
1119 	hclge_convert_setting_lr(mac, speed_ability);
1120 	hclge_convert_setting_cr(mac, speed_ability);
1121 	if (hdev->pdev->revision >= 0x21)
1122 		hclge_convert_setting_fec(mac);
1123 
1124 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1125 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1126 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1127 }
1128 
1129 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1130 					    u8 speed_ability)
1131 {
1132 	struct hclge_mac *mac = &hdev->hw.mac;
1133 
1134 	hclge_convert_setting_kr(mac, speed_ability);
1135 	if (hdev->pdev->revision >= 0x21)
1136 		hclge_convert_setting_fec(mac);
1137 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1138 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1139 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1140 }
1141 
1142 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1143 					 u8 speed_ability)
1144 {
1145 	unsigned long *supported = hdev->hw.mac.supported;
1146 
1147 	/* default to support all speed for GE port */
1148 	if (!speed_ability)
1149 		speed_ability = HCLGE_SUPPORT_GE;
1150 
1151 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1152 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1153 				 supported);
1154 
1155 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1156 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1157 				 supported);
1158 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1159 				 supported);
1160 	}
1161 
1162 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1163 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1164 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1165 	}
1166 
1167 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1168 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1169 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1170 	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1171 }
1172 
1173 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1174 {
1175 	u8 media_type = hdev->hw.mac.media_type;
1176 
1177 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1178 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1179 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1180 		hclge_parse_copper_link_mode(hdev, speed_ability);
1181 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1182 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1183 }
1184 
1185 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1186 {
1187 	struct hclge_cfg_param_cmd *req;
1188 	u64 mac_addr_tmp_high;
1189 	u64 mac_addr_tmp;
1190 	unsigned int i;
1191 
1192 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1193 
1194 	/* get the configuration */
1195 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1196 					      HCLGE_CFG_VMDQ_M,
1197 					      HCLGE_CFG_VMDQ_S);
1198 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1199 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1200 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1201 					    HCLGE_CFG_TQP_DESC_N_M,
1202 					    HCLGE_CFG_TQP_DESC_N_S);
1203 
1204 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1205 					HCLGE_CFG_PHY_ADDR_M,
1206 					HCLGE_CFG_PHY_ADDR_S);
1207 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1208 					  HCLGE_CFG_MEDIA_TP_M,
1209 					  HCLGE_CFG_MEDIA_TP_S);
1210 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1211 					  HCLGE_CFG_RX_BUF_LEN_M,
1212 					  HCLGE_CFG_RX_BUF_LEN_S);
1213 	/* get mac_address */
1214 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1215 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1216 					    HCLGE_CFG_MAC_ADDR_H_M,
1217 					    HCLGE_CFG_MAC_ADDR_H_S);
1218 
1219 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1220 
1221 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1222 					     HCLGE_CFG_DEFAULT_SPEED_M,
1223 					     HCLGE_CFG_DEFAULT_SPEED_S);
1224 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1225 					    HCLGE_CFG_RSS_SIZE_M,
1226 					    HCLGE_CFG_RSS_SIZE_S);
1227 
1228 	for (i = 0; i < ETH_ALEN; i++)
1229 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1230 
1231 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1232 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1233 
1234 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1235 					     HCLGE_CFG_SPEED_ABILITY_M,
1236 					     HCLGE_CFG_SPEED_ABILITY_S);
1237 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1238 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1239 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1240 	if (!cfg->umv_space)
1241 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1242 }
1243 
1244 /* hclge_get_cfg: query the static parameter from flash
1245  * @hdev: pointer to struct hclge_dev
1246  * @hcfg: the config structure to be getted
1247  */
1248 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1249 {
1250 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1251 	struct hclge_cfg_param_cmd *req;
1252 	unsigned int i;
1253 	int ret;
1254 
1255 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1256 		u32 offset = 0;
1257 
1258 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1259 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1260 					   true);
1261 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1262 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1263 		/* Len should be united by 4 bytes when send to hardware */
1264 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1265 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1266 		req->offset = cpu_to_le32(offset);
1267 	}
1268 
1269 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1270 	if (ret) {
1271 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1272 		return ret;
1273 	}
1274 
1275 	hclge_parse_cfg(hcfg, desc);
1276 
1277 	return 0;
1278 }
1279 
1280 static int hclge_get_cap(struct hclge_dev *hdev)
1281 {
1282 	int ret;
1283 
1284 	ret = hclge_query_function_status(hdev);
1285 	if (ret) {
1286 		dev_err(&hdev->pdev->dev,
1287 			"query function status error %d.\n", ret);
1288 		return ret;
1289 	}
1290 
1291 	/* get pf resource */
1292 	ret = hclge_query_pf_resource(hdev);
1293 	if (ret)
1294 		dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1295 
1296 	return ret;
1297 }
1298 
1299 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1300 {
1301 #define HCLGE_MIN_TX_DESC	64
1302 #define HCLGE_MIN_RX_DESC	64
1303 
1304 	if (!is_kdump_kernel())
1305 		return;
1306 
1307 	dev_info(&hdev->pdev->dev,
1308 		 "Running kdump kernel. Using minimal resources\n");
1309 
1310 	/* minimal queue pairs equals to the number of vports */
1311 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1312 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1313 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1314 }
1315 
1316 static int hclge_configure(struct hclge_dev *hdev)
1317 {
1318 	struct hclge_cfg cfg;
1319 	unsigned int i;
1320 	int ret;
1321 
1322 	ret = hclge_get_cfg(hdev, &cfg);
1323 	if (ret) {
1324 		dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1325 		return ret;
1326 	}
1327 
1328 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1329 	hdev->base_tqp_pid = 0;
1330 	hdev->rss_size_max = cfg.rss_size_max;
1331 	hdev->rx_buf_len = cfg.rx_buf_len;
1332 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1333 	hdev->hw.mac.media_type = cfg.media_type;
1334 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1335 	hdev->num_tx_desc = cfg.tqp_desc_num;
1336 	hdev->num_rx_desc = cfg.tqp_desc_num;
1337 	hdev->tm_info.num_pg = 1;
1338 	hdev->tc_max = cfg.tc_num;
1339 	hdev->tm_info.hw_pfc_map = 0;
1340 	hdev->wanted_umv_size = cfg.umv_space;
1341 
1342 	if (hnae3_dev_fd_supported(hdev)) {
1343 		hdev->fd_en = true;
1344 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1345 	}
1346 
1347 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1348 	if (ret) {
1349 		dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1350 		return ret;
1351 	}
1352 
1353 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1354 
1355 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1356 	    (hdev->tc_max < 1)) {
1357 		dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1358 			 hdev->tc_max);
1359 		hdev->tc_max = 1;
1360 	}
1361 
1362 	/* Dev does not support DCB */
1363 	if (!hnae3_dev_dcb_supported(hdev)) {
1364 		hdev->tc_max = 1;
1365 		hdev->pfc_max = 0;
1366 	} else {
1367 		hdev->pfc_max = hdev->tc_max;
1368 	}
1369 
1370 	hdev->tm_info.num_tc = 1;
1371 
1372 	/* Currently not support uncontiuous tc */
1373 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1374 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1375 
1376 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1377 
1378 	hclge_init_kdump_kernel_config(hdev);
1379 
1380 	/* Set the init affinity based on pci func number */
1381 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1382 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1383 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1384 			&hdev->affinity_mask);
1385 
1386 	return ret;
1387 }
1388 
1389 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1390 			    unsigned int tso_mss_max)
1391 {
1392 	struct hclge_cfg_tso_status_cmd *req;
1393 	struct hclge_desc desc;
1394 	u16 tso_mss;
1395 
1396 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1397 
1398 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1399 
1400 	tso_mss = 0;
1401 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1402 			HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1403 	req->tso_mss_min = cpu_to_le16(tso_mss);
1404 
1405 	tso_mss = 0;
1406 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1407 			HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1408 	req->tso_mss_max = cpu_to_le16(tso_mss);
1409 
1410 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1411 }
1412 
1413 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1414 {
1415 	struct hclge_cfg_gro_status_cmd *req;
1416 	struct hclge_desc desc;
1417 	int ret;
1418 
1419 	if (!hnae3_dev_gro_supported(hdev))
1420 		return 0;
1421 
1422 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1423 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1424 
1425 	req->gro_en = cpu_to_le16(en ? 1 : 0);
1426 
1427 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1428 	if (ret)
1429 		dev_err(&hdev->pdev->dev,
1430 			"GRO hardware config cmd failed, ret = %d\n", ret);
1431 
1432 	return ret;
1433 }
1434 
1435 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1436 {
1437 	struct hclge_tqp *tqp;
1438 	int i;
1439 
1440 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1441 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1442 	if (!hdev->htqp)
1443 		return -ENOMEM;
1444 
1445 	tqp = hdev->htqp;
1446 
1447 	for (i = 0; i < hdev->num_tqps; i++) {
1448 		tqp->dev = &hdev->pdev->dev;
1449 		tqp->index = i;
1450 
1451 		tqp->q.ae_algo = &ae_algo;
1452 		tqp->q.buf_size = hdev->rx_buf_len;
1453 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1454 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1455 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1456 			i * HCLGE_TQP_REG_SIZE;
1457 
1458 		tqp++;
1459 	}
1460 
1461 	return 0;
1462 }
1463 
1464 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1465 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1466 {
1467 	struct hclge_tqp_map_cmd *req;
1468 	struct hclge_desc desc;
1469 	int ret;
1470 
1471 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1472 
1473 	req = (struct hclge_tqp_map_cmd *)desc.data;
1474 	req->tqp_id = cpu_to_le16(tqp_pid);
1475 	req->tqp_vf = func_id;
1476 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1477 	if (!is_pf)
1478 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1479 	req->tqp_vid = cpu_to_le16(tqp_vid);
1480 
1481 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1482 	if (ret)
1483 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1484 
1485 	return ret;
1486 }
1487 
1488 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1489 {
1490 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1491 	struct hclge_dev *hdev = vport->back;
1492 	int i, alloced;
1493 
1494 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1495 	     alloced < num_tqps; i++) {
1496 		if (!hdev->htqp[i].alloced) {
1497 			hdev->htqp[i].q.handle = &vport->nic;
1498 			hdev->htqp[i].q.tqp_index = alloced;
1499 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1500 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1501 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1502 			hdev->htqp[i].alloced = true;
1503 			alloced++;
1504 		}
1505 	}
1506 	vport->alloc_tqps = alloced;
1507 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1508 				vport->alloc_tqps / hdev->tm_info.num_tc);
1509 
1510 	return 0;
1511 }
1512 
1513 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1514 			    u16 num_tx_desc, u16 num_rx_desc)
1515 
1516 {
1517 	struct hnae3_handle *nic = &vport->nic;
1518 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1519 	struct hclge_dev *hdev = vport->back;
1520 	int ret;
1521 
1522 	kinfo->num_tx_desc = num_tx_desc;
1523 	kinfo->num_rx_desc = num_rx_desc;
1524 
1525 	kinfo->rx_buf_len = hdev->rx_buf_len;
1526 
1527 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1528 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1529 	if (!kinfo->tqp)
1530 		return -ENOMEM;
1531 
1532 	ret = hclge_assign_tqp(vport, num_tqps);
1533 	if (ret)
1534 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1535 
1536 	return ret;
1537 }
1538 
1539 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1540 				  struct hclge_vport *vport)
1541 {
1542 	struct hnae3_handle *nic = &vport->nic;
1543 	struct hnae3_knic_private_info *kinfo;
1544 	u16 i;
1545 
1546 	kinfo = &nic->kinfo;
1547 	for (i = 0; i < vport->alloc_tqps; i++) {
1548 		struct hclge_tqp *q =
1549 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1550 		bool is_pf;
1551 		int ret;
1552 
1553 		is_pf = !(vport->vport_id);
1554 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1555 					     i, is_pf);
1556 		if (ret)
1557 			return ret;
1558 	}
1559 
1560 	return 0;
1561 }
1562 
1563 static int hclge_map_tqp(struct hclge_dev *hdev)
1564 {
1565 	struct hclge_vport *vport = hdev->vport;
1566 	u16 i, num_vport;
1567 
1568 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1569 	for (i = 0; i < num_vport; i++)	{
1570 		int ret;
1571 
1572 		ret = hclge_map_tqp_to_vport(hdev, vport);
1573 		if (ret)
1574 			return ret;
1575 
1576 		vport++;
1577 	}
1578 
1579 	return 0;
1580 }
1581 
1582 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1583 {
1584 	struct hnae3_handle *nic = &vport->nic;
1585 	struct hclge_dev *hdev = vport->back;
1586 	int ret;
1587 
1588 	nic->pdev = hdev->pdev;
1589 	nic->ae_algo = &ae_algo;
1590 	nic->numa_node_mask = hdev->numa_node_mask;
1591 
1592 	ret = hclge_knic_setup(vport, num_tqps,
1593 			       hdev->num_tx_desc, hdev->num_rx_desc);
1594 	if (ret)
1595 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1596 
1597 	return ret;
1598 }
1599 
1600 static int hclge_alloc_vport(struct hclge_dev *hdev)
1601 {
1602 	struct pci_dev *pdev = hdev->pdev;
1603 	struct hclge_vport *vport;
1604 	u32 tqp_main_vport;
1605 	u32 tqp_per_vport;
1606 	int num_vport, i;
1607 	int ret;
1608 
1609 	/* We need to alloc a vport for main NIC of PF */
1610 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1611 
1612 	if (hdev->num_tqps < num_vport) {
1613 		dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1614 			hdev->num_tqps, num_vport);
1615 		return -EINVAL;
1616 	}
1617 
1618 	/* Alloc the same number of TQPs for every vport */
1619 	tqp_per_vport = hdev->num_tqps / num_vport;
1620 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1621 
1622 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1623 			     GFP_KERNEL);
1624 	if (!vport)
1625 		return -ENOMEM;
1626 
1627 	hdev->vport = vport;
1628 	hdev->num_alloc_vport = num_vport;
1629 
1630 	if (IS_ENABLED(CONFIG_PCI_IOV))
1631 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1632 
1633 	for (i = 0; i < num_vport; i++) {
1634 		vport->back = hdev;
1635 		vport->vport_id = i;
1636 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1637 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1638 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1639 		INIT_LIST_HEAD(&vport->vlan_list);
1640 		INIT_LIST_HEAD(&vport->uc_mac_list);
1641 		INIT_LIST_HEAD(&vport->mc_mac_list);
1642 
1643 		if (i == 0)
1644 			ret = hclge_vport_setup(vport, tqp_main_vport);
1645 		else
1646 			ret = hclge_vport_setup(vport, tqp_per_vport);
1647 		if (ret) {
1648 			dev_err(&pdev->dev,
1649 				"vport setup failed for vport %d, %d\n",
1650 				i, ret);
1651 			return ret;
1652 		}
1653 
1654 		vport++;
1655 	}
1656 
1657 	return 0;
1658 }
1659 
1660 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1661 				    struct hclge_pkt_buf_alloc *buf_alloc)
1662 {
1663 /* TX buffer size is unit by 128 byte */
1664 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1665 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1666 	struct hclge_tx_buff_alloc_cmd *req;
1667 	struct hclge_desc desc;
1668 	int ret;
1669 	u8 i;
1670 
1671 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1672 
1673 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1674 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1675 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1676 
1677 		req->tx_pkt_buff[i] =
1678 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1679 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1680 	}
1681 
1682 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1683 	if (ret)
1684 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1685 			ret);
1686 
1687 	return ret;
1688 }
1689 
1690 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1691 				 struct hclge_pkt_buf_alloc *buf_alloc)
1692 {
1693 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1694 
1695 	if (ret)
1696 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1697 
1698 	return ret;
1699 }
1700 
1701 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1702 {
1703 	unsigned int i;
1704 	u32 cnt = 0;
1705 
1706 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1707 		if (hdev->hw_tc_map & BIT(i))
1708 			cnt++;
1709 	return cnt;
1710 }
1711 
1712 /* Get the number of pfc enabled TCs, which have private buffer */
1713 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1714 				  struct hclge_pkt_buf_alloc *buf_alloc)
1715 {
1716 	struct hclge_priv_buf *priv;
1717 	unsigned int i;
1718 	int cnt = 0;
1719 
1720 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1721 		priv = &buf_alloc->priv_buf[i];
1722 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1723 		    priv->enable)
1724 			cnt++;
1725 	}
1726 
1727 	return cnt;
1728 }
1729 
1730 /* Get the number of pfc disabled TCs, which have private buffer */
1731 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1732 				     struct hclge_pkt_buf_alloc *buf_alloc)
1733 {
1734 	struct hclge_priv_buf *priv;
1735 	unsigned int i;
1736 	int cnt = 0;
1737 
1738 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1739 		priv = &buf_alloc->priv_buf[i];
1740 		if (hdev->hw_tc_map & BIT(i) &&
1741 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1742 		    priv->enable)
1743 			cnt++;
1744 	}
1745 
1746 	return cnt;
1747 }
1748 
1749 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1750 {
1751 	struct hclge_priv_buf *priv;
1752 	u32 rx_priv = 0;
1753 	int i;
1754 
1755 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1756 		priv = &buf_alloc->priv_buf[i];
1757 		if (priv->enable)
1758 			rx_priv += priv->buf_size;
1759 	}
1760 	return rx_priv;
1761 }
1762 
1763 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1764 {
1765 	u32 i, total_tx_size = 0;
1766 
1767 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1768 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1769 
1770 	return total_tx_size;
1771 }
1772 
1773 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1774 				struct hclge_pkt_buf_alloc *buf_alloc,
1775 				u32 rx_all)
1776 {
1777 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1778 	u32 tc_num = hclge_get_tc_num(hdev);
1779 	u32 shared_buf, aligned_mps;
1780 	u32 rx_priv;
1781 	int i;
1782 
1783 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1784 
1785 	if (hnae3_dev_dcb_supported(hdev))
1786 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1787 					hdev->dv_buf_size;
1788 	else
1789 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1790 					+ hdev->dv_buf_size;
1791 
1792 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1793 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1794 			     HCLGE_BUF_SIZE_UNIT);
1795 
1796 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1797 	if (rx_all < rx_priv + shared_std)
1798 		return false;
1799 
1800 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1801 	buf_alloc->s_buf.buf_size = shared_buf;
1802 	if (hnae3_dev_dcb_supported(hdev)) {
1803 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1804 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1805 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1806 				  HCLGE_BUF_SIZE_UNIT);
1807 	} else {
1808 		buf_alloc->s_buf.self.high = aligned_mps +
1809 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1810 		buf_alloc->s_buf.self.low = aligned_mps;
1811 	}
1812 
1813 	if (hnae3_dev_dcb_supported(hdev)) {
1814 		hi_thrd = shared_buf - hdev->dv_buf_size;
1815 
1816 		if (tc_num <= NEED_RESERVE_TC_NUM)
1817 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1818 					/ BUF_MAX_PERCENT;
1819 
1820 		if (tc_num)
1821 			hi_thrd = hi_thrd / tc_num;
1822 
1823 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1824 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1825 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1826 	} else {
1827 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1828 		lo_thrd = aligned_mps;
1829 	}
1830 
1831 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1832 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1833 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1834 	}
1835 
1836 	return true;
1837 }
1838 
1839 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1840 				struct hclge_pkt_buf_alloc *buf_alloc)
1841 {
1842 	u32 i, total_size;
1843 
1844 	total_size = hdev->pkt_buf_size;
1845 
1846 	/* alloc tx buffer for all enabled tc */
1847 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1848 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1849 
1850 		if (hdev->hw_tc_map & BIT(i)) {
1851 			if (total_size < hdev->tx_buf_size)
1852 				return -ENOMEM;
1853 
1854 			priv->tx_buf_size = hdev->tx_buf_size;
1855 		} else {
1856 			priv->tx_buf_size = 0;
1857 		}
1858 
1859 		total_size -= priv->tx_buf_size;
1860 	}
1861 
1862 	return 0;
1863 }
1864 
1865 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1866 				  struct hclge_pkt_buf_alloc *buf_alloc)
1867 {
1868 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1869 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1870 	unsigned int i;
1871 
1872 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1873 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1874 
1875 		priv->enable = 0;
1876 		priv->wl.low = 0;
1877 		priv->wl.high = 0;
1878 		priv->buf_size = 0;
1879 
1880 		if (!(hdev->hw_tc_map & BIT(i)))
1881 			continue;
1882 
1883 		priv->enable = 1;
1884 
1885 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1886 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1887 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
1888 						HCLGE_BUF_SIZE_UNIT);
1889 		} else {
1890 			priv->wl.low = 0;
1891 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1892 					aligned_mps;
1893 		}
1894 
1895 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1896 	}
1897 
1898 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1899 }
1900 
1901 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1902 					  struct hclge_pkt_buf_alloc *buf_alloc)
1903 {
1904 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1905 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1906 	int i;
1907 
1908 	/* let the last to be cleared first */
1909 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1910 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1911 		unsigned int mask = BIT((unsigned int)i);
1912 
1913 		if (hdev->hw_tc_map & mask &&
1914 		    !(hdev->tm_info.hw_pfc_map & mask)) {
1915 			/* Clear the no pfc TC private buffer */
1916 			priv->wl.low = 0;
1917 			priv->wl.high = 0;
1918 			priv->buf_size = 0;
1919 			priv->enable = 0;
1920 			no_pfc_priv_num--;
1921 		}
1922 
1923 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1924 		    no_pfc_priv_num == 0)
1925 			break;
1926 	}
1927 
1928 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1929 }
1930 
1931 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1932 					struct hclge_pkt_buf_alloc *buf_alloc)
1933 {
1934 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1935 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1936 	int i;
1937 
1938 	/* let the last to be cleared first */
1939 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1940 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1941 		unsigned int mask = BIT((unsigned int)i);
1942 
1943 		if (hdev->hw_tc_map & mask &&
1944 		    hdev->tm_info.hw_pfc_map & mask) {
1945 			/* Reduce the number of pfc TC with private buffer */
1946 			priv->wl.low = 0;
1947 			priv->enable = 0;
1948 			priv->wl.high = 0;
1949 			priv->buf_size = 0;
1950 			pfc_priv_num--;
1951 		}
1952 
1953 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1954 		    pfc_priv_num == 0)
1955 			break;
1956 	}
1957 
1958 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1959 }
1960 
1961 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
1962 				      struct hclge_pkt_buf_alloc *buf_alloc)
1963 {
1964 #define COMPENSATE_BUFFER	0x3C00
1965 #define COMPENSATE_HALF_MPS_NUM	5
1966 #define PRIV_WL_GAP		0x1800
1967 
1968 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1969 	u32 tc_num = hclge_get_tc_num(hdev);
1970 	u32 half_mps = hdev->mps >> 1;
1971 	u32 min_rx_priv;
1972 	unsigned int i;
1973 
1974 	if (tc_num)
1975 		rx_priv = rx_priv / tc_num;
1976 
1977 	if (tc_num <= NEED_RESERVE_TC_NUM)
1978 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
1979 
1980 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
1981 			COMPENSATE_HALF_MPS_NUM * half_mps;
1982 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
1983 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
1984 
1985 	if (rx_priv < min_rx_priv)
1986 		return false;
1987 
1988 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1989 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1990 
1991 		priv->enable = 0;
1992 		priv->wl.low = 0;
1993 		priv->wl.high = 0;
1994 		priv->buf_size = 0;
1995 
1996 		if (!(hdev->hw_tc_map & BIT(i)))
1997 			continue;
1998 
1999 		priv->enable = 1;
2000 		priv->buf_size = rx_priv;
2001 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2002 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2003 	}
2004 
2005 	buf_alloc->s_buf.buf_size = 0;
2006 
2007 	return true;
2008 }
2009 
2010 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2011  * @hdev: pointer to struct hclge_dev
2012  * @buf_alloc: pointer to buffer calculation data
2013  * @return: 0: calculate sucessful, negative: fail
2014  */
2015 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2016 				struct hclge_pkt_buf_alloc *buf_alloc)
2017 {
2018 	/* When DCB is not supported, rx private buffer is not allocated. */
2019 	if (!hnae3_dev_dcb_supported(hdev)) {
2020 		u32 rx_all = hdev->pkt_buf_size;
2021 
2022 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2023 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2024 			return -ENOMEM;
2025 
2026 		return 0;
2027 	}
2028 
2029 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2030 		return 0;
2031 
2032 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2033 		return 0;
2034 
2035 	/* try to decrease the buffer size */
2036 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2037 		return 0;
2038 
2039 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2040 		return 0;
2041 
2042 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2043 		return 0;
2044 
2045 	return -ENOMEM;
2046 }
2047 
2048 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2049 				   struct hclge_pkt_buf_alloc *buf_alloc)
2050 {
2051 	struct hclge_rx_priv_buff_cmd *req;
2052 	struct hclge_desc desc;
2053 	int ret;
2054 	int i;
2055 
2056 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2057 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2058 
2059 	/* Alloc private buffer TCs */
2060 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2061 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2062 
2063 		req->buf_num[i] =
2064 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2065 		req->buf_num[i] |=
2066 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2067 	}
2068 
2069 	req->shared_buf =
2070 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2071 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2072 
2073 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2074 	if (ret)
2075 		dev_err(&hdev->pdev->dev,
2076 			"rx private buffer alloc cmd failed %d\n", ret);
2077 
2078 	return ret;
2079 }
2080 
2081 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2082 				   struct hclge_pkt_buf_alloc *buf_alloc)
2083 {
2084 	struct hclge_rx_priv_wl_buf *req;
2085 	struct hclge_priv_buf *priv;
2086 	struct hclge_desc desc[2];
2087 	int i, j;
2088 	int ret;
2089 
2090 	for (i = 0; i < 2; i++) {
2091 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2092 					   false);
2093 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2094 
2095 		/* The first descriptor set the NEXT bit to 1 */
2096 		if (i == 0)
2097 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2098 		else
2099 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2100 
2101 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2102 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2103 
2104 			priv = &buf_alloc->priv_buf[idx];
2105 			req->tc_wl[j].high =
2106 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2107 			req->tc_wl[j].high |=
2108 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2109 			req->tc_wl[j].low =
2110 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2111 			req->tc_wl[j].low |=
2112 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2113 		}
2114 	}
2115 
2116 	/* Send 2 descriptor at one time */
2117 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2118 	if (ret)
2119 		dev_err(&hdev->pdev->dev,
2120 			"rx private waterline config cmd failed %d\n",
2121 			ret);
2122 	return ret;
2123 }
2124 
2125 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2126 				    struct hclge_pkt_buf_alloc *buf_alloc)
2127 {
2128 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2129 	struct hclge_rx_com_thrd *req;
2130 	struct hclge_desc desc[2];
2131 	struct hclge_tc_thrd *tc;
2132 	int i, j;
2133 	int ret;
2134 
2135 	for (i = 0; i < 2; i++) {
2136 		hclge_cmd_setup_basic_desc(&desc[i],
2137 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2138 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2139 
2140 		/* The first descriptor set the NEXT bit to 1 */
2141 		if (i == 0)
2142 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2143 		else
2144 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2145 
2146 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2147 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2148 
2149 			req->com_thrd[j].high =
2150 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2151 			req->com_thrd[j].high |=
2152 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2153 			req->com_thrd[j].low =
2154 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2155 			req->com_thrd[j].low |=
2156 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2157 		}
2158 	}
2159 
2160 	/* Send 2 descriptors at one time */
2161 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2162 	if (ret)
2163 		dev_err(&hdev->pdev->dev,
2164 			"common threshold config cmd failed %d\n", ret);
2165 	return ret;
2166 }
2167 
2168 static int hclge_common_wl_config(struct hclge_dev *hdev,
2169 				  struct hclge_pkt_buf_alloc *buf_alloc)
2170 {
2171 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2172 	struct hclge_rx_com_wl *req;
2173 	struct hclge_desc desc;
2174 	int ret;
2175 
2176 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2177 
2178 	req = (struct hclge_rx_com_wl *)desc.data;
2179 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2180 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2181 
2182 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2183 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2184 
2185 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2186 	if (ret)
2187 		dev_err(&hdev->pdev->dev,
2188 			"common waterline config cmd failed %d\n", ret);
2189 
2190 	return ret;
2191 }
2192 
2193 int hclge_buffer_alloc(struct hclge_dev *hdev)
2194 {
2195 	struct hclge_pkt_buf_alloc *pkt_buf;
2196 	int ret;
2197 
2198 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2199 	if (!pkt_buf)
2200 		return -ENOMEM;
2201 
2202 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2203 	if (ret) {
2204 		dev_err(&hdev->pdev->dev,
2205 			"could not calc tx buffer size for all TCs %d\n", ret);
2206 		goto out;
2207 	}
2208 
2209 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2210 	if (ret) {
2211 		dev_err(&hdev->pdev->dev,
2212 			"could not alloc tx buffers %d\n", ret);
2213 		goto out;
2214 	}
2215 
2216 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2217 	if (ret) {
2218 		dev_err(&hdev->pdev->dev,
2219 			"could not calc rx priv buffer size for all TCs %d\n",
2220 			ret);
2221 		goto out;
2222 	}
2223 
2224 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2225 	if (ret) {
2226 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2227 			ret);
2228 		goto out;
2229 	}
2230 
2231 	if (hnae3_dev_dcb_supported(hdev)) {
2232 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2233 		if (ret) {
2234 			dev_err(&hdev->pdev->dev,
2235 				"could not configure rx private waterline %d\n",
2236 				ret);
2237 			goto out;
2238 		}
2239 
2240 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2241 		if (ret) {
2242 			dev_err(&hdev->pdev->dev,
2243 				"could not configure common threshold %d\n",
2244 				ret);
2245 			goto out;
2246 		}
2247 	}
2248 
2249 	ret = hclge_common_wl_config(hdev, pkt_buf);
2250 	if (ret)
2251 		dev_err(&hdev->pdev->dev,
2252 			"could not configure common waterline %d\n", ret);
2253 
2254 out:
2255 	kfree(pkt_buf);
2256 	return ret;
2257 }
2258 
2259 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2260 {
2261 	struct hnae3_handle *roce = &vport->roce;
2262 	struct hnae3_handle *nic = &vport->nic;
2263 
2264 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2265 
2266 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2267 	    vport->back->num_msi_left == 0)
2268 		return -EINVAL;
2269 
2270 	roce->rinfo.base_vector = vport->back->roce_base_vector;
2271 
2272 	roce->rinfo.netdev = nic->kinfo.netdev;
2273 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
2274 
2275 	roce->pdev = nic->pdev;
2276 	roce->ae_algo = nic->ae_algo;
2277 	roce->numa_node_mask = nic->numa_node_mask;
2278 
2279 	return 0;
2280 }
2281 
2282 static int hclge_init_msi(struct hclge_dev *hdev)
2283 {
2284 	struct pci_dev *pdev = hdev->pdev;
2285 	int vectors;
2286 	int i;
2287 
2288 	vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2289 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2290 	if (vectors < 0) {
2291 		dev_err(&pdev->dev,
2292 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2293 			vectors);
2294 		return vectors;
2295 	}
2296 	if (vectors < hdev->num_msi)
2297 		dev_warn(&hdev->pdev->dev,
2298 			 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2299 			 hdev->num_msi, vectors);
2300 
2301 	hdev->num_msi = vectors;
2302 	hdev->num_msi_left = vectors;
2303 	hdev->base_msi_vector = pdev->irq;
2304 	hdev->roce_base_vector = hdev->base_msi_vector +
2305 				hdev->roce_base_msix_offset;
2306 
2307 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2308 					   sizeof(u16), GFP_KERNEL);
2309 	if (!hdev->vector_status) {
2310 		pci_free_irq_vectors(pdev);
2311 		return -ENOMEM;
2312 	}
2313 
2314 	for (i = 0; i < hdev->num_msi; i++)
2315 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2316 
2317 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2318 					sizeof(int), GFP_KERNEL);
2319 	if (!hdev->vector_irq) {
2320 		pci_free_irq_vectors(pdev);
2321 		return -ENOMEM;
2322 	}
2323 
2324 	return 0;
2325 }
2326 
2327 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2328 {
2329 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2330 		duplex = HCLGE_MAC_FULL;
2331 
2332 	return duplex;
2333 }
2334 
2335 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2336 				      u8 duplex)
2337 {
2338 	struct hclge_config_mac_speed_dup_cmd *req;
2339 	struct hclge_desc desc;
2340 	int ret;
2341 
2342 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2343 
2344 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2345 
2346 	if (duplex)
2347 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2348 
2349 	switch (speed) {
2350 	case HCLGE_MAC_SPEED_10M:
2351 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2352 				HCLGE_CFG_SPEED_S, 6);
2353 		break;
2354 	case HCLGE_MAC_SPEED_100M:
2355 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2356 				HCLGE_CFG_SPEED_S, 7);
2357 		break;
2358 	case HCLGE_MAC_SPEED_1G:
2359 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2360 				HCLGE_CFG_SPEED_S, 0);
2361 		break;
2362 	case HCLGE_MAC_SPEED_10G:
2363 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2364 				HCLGE_CFG_SPEED_S, 1);
2365 		break;
2366 	case HCLGE_MAC_SPEED_25G:
2367 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2368 				HCLGE_CFG_SPEED_S, 2);
2369 		break;
2370 	case HCLGE_MAC_SPEED_40G:
2371 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2372 				HCLGE_CFG_SPEED_S, 3);
2373 		break;
2374 	case HCLGE_MAC_SPEED_50G:
2375 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2376 				HCLGE_CFG_SPEED_S, 4);
2377 		break;
2378 	case HCLGE_MAC_SPEED_100G:
2379 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2380 				HCLGE_CFG_SPEED_S, 5);
2381 		break;
2382 	default:
2383 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2384 		return -EINVAL;
2385 	}
2386 
2387 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2388 		      1);
2389 
2390 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2391 	if (ret) {
2392 		dev_err(&hdev->pdev->dev,
2393 			"mac speed/duplex config cmd failed %d.\n", ret);
2394 		return ret;
2395 	}
2396 
2397 	return 0;
2398 }
2399 
2400 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2401 {
2402 	int ret;
2403 
2404 	duplex = hclge_check_speed_dup(duplex, speed);
2405 	if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2406 		return 0;
2407 
2408 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2409 	if (ret)
2410 		return ret;
2411 
2412 	hdev->hw.mac.speed = speed;
2413 	hdev->hw.mac.duplex = duplex;
2414 
2415 	return 0;
2416 }
2417 
2418 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2419 				     u8 duplex)
2420 {
2421 	struct hclge_vport *vport = hclge_get_vport(handle);
2422 	struct hclge_dev *hdev = vport->back;
2423 
2424 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2425 }
2426 
2427 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2428 {
2429 	struct hclge_config_auto_neg_cmd *req;
2430 	struct hclge_desc desc;
2431 	u32 flag = 0;
2432 	int ret;
2433 
2434 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2435 
2436 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2437 	if (enable)
2438 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2439 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2440 
2441 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2442 	if (ret)
2443 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2444 			ret);
2445 
2446 	return ret;
2447 }
2448 
2449 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2450 {
2451 	struct hclge_vport *vport = hclge_get_vport(handle);
2452 	struct hclge_dev *hdev = vport->back;
2453 
2454 	if (!hdev->hw.mac.support_autoneg) {
2455 		if (enable) {
2456 			dev_err(&hdev->pdev->dev,
2457 				"autoneg is not supported by current port\n");
2458 			return -EOPNOTSUPP;
2459 		} else {
2460 			return 0;
2461 		}
2462 	}
2463 
2464 	return hclge_set_autoneg_en(hdev, enable);
2465 }
2466 
2467 static int hclge_get_autoneg(struct hnae3_handle *handle)
2468 {
2469 	struct hclge_vport *vport = hclge_get_vport(handle);
2470 	struct hclge_dev *hdev = vport->back;
2471 	struct phy_device *phydev = hdev->hw.mac.phydev;
2472 
2473 	if (phydev)
2474 		return phydev->autoneg;
2475 
2476 	return hdev->hw.mac.autoneg;
2477 }
2478 
2479 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2480 {
2481 	struct hclge_vport *vport = hclge_get_vport(handle);
2482 	struct hclge_dev *hdev = vport->back;
2483 	int ret;
2484 
2485 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2486 
2487 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2488 	if (ret)
2489 		return ret;
2490 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2491 }
2492 
2493 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2494 {
2495 	struct hclge_vport *vport = hclge_get_vport(handle);
2496 	struct hclge_dev *hdev = vport->back;
2497 
2498 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2499 		return hclge_set_autoneg_en(hdev, !halt);
2500 
2501 	return 0;
2502 }
2503 
2504 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2505 {
2506 	struct hclge_config_fec_cmd *req;
2507 	struct hclge_desc desc;
2508 	int ret;
2509 
2510 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2511 
2512 	req = (struct hclge_config_fec_cmd *)desc.data;
2513 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2514 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2515 	if (fec_mode & BIT(HNAE3_FEC_RS))
2516 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2517 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2518 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2519 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2520 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2521 
2522 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2523 	if (ret)
2524 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2525 
2526 	return ret;
2527 }
2528 
2529 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2530 {
2531 	struct hclge_vport *vport = hclge_get_vport(handle);
2532 	struct hclge_dev *hdev = vport->back;
2533 	struct hclge_mac *mac = &hdev->hw.mac;
2534 	int ret;
2535 
2536 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2537 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2538 		return -EINVAL;
2539 	}
2540 
2541 	ret = hclge_set_fec_hw(hdev, fec_mode);
2542 	if (ret)
2543 		return ret;
2544 
2545 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2546 	return 0;
2547 }
2548 
2549 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2550 			  u8 *fec_mode)
2551 {
2552 	struct hclge_vport *vport = hclge_get_vport(handle);
2553 	struct hclge_dev *hdev = vport->back;
2554 	struct hclge_mac *mac = &hdev->hw.mac;
2555 
2556 	if (fec_ability)
2557 		*fec_ability = mac->fec_ability;
2558 	if (fec_mode)
2559 		*fec_mode = mac->fec_mode;
2560 }
2561 
2562 static int hclge_mac_init(struct hclge_dev *hdev)
2563 {
2564 	struct hclge_mac *mac = &hdev->hw.mac;
2565 	int ret;
2566 
2567 	hdev->support_sfp_query = true;
2568 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2569 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2570 					 hdev->hw.mac.duplex);
2571 	if (ret) {
2572 		dev_err(&hdev->pdev->dev,
2573 			"Config mac speed dup fail ret=%d\n", ret);
2574 		return ret;
2575 	}
2576 
2577 	if (hdev->hw.mac.support_autoneg) {
2578 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2579 		if (ret) {
2580 			dev_err(&hdev->pdev->dev,
2581 				"Config mac autoneg fail ret=%d\n", ret);
2582 			return ret;
2583 		}
2584 	}
2585 
2586 	mac->link = 0;
2587 
2588 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2589 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2590 		if (ret) {
2591 			dev_err(&hdev->pdev->dev,
2592 				"Fec mode init fail, ret = %d\n", ret);
2593 			return ret;
2594 		}
2595 	}
2596 
2597 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2598 	if (ret) {
2599 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2600 		return ret;
2601 	}
2602 
2603 	ret = hclge_set_default_loopback(hdev);
2604 	if (ret)
2605 		return ret;
2606 
2607 	ret = hclge_buffer_alloc(hdev);
2608 	if (ret)
2609 		dev_err(&hdev->pdev->dev,
2610 			"allocate buffer fail, ret=%d\n", ret);
2611 
2612 	return ret;
2613 }
2614 
2615 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2616 {
2617 	if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2618 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2619 		queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2620 			      &hdev->mbx_service_task);
2621 }
2622 
2623 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2624 {
2625 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2626 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2627 		queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2628 			      &hdev->rst_service_task);
2629 }
2630 
2631 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2632 {
2633 	if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2634 	    !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2635 	    !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
2636 		hdev->hw_stats.stats_timer++;
2637 		hdev->fd_arfs_expire_timer++;
2638 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2639 				    system_wq, &hdev->service_task,
2640 				    delay_time);
2641 	}
2642 }
2643 
2644 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2645 {
2646 	struct hclge_link_status_cmd *req;
2647 	struct hclge_desc desc;
2648 	int link_status;
2649 	int ret;
2650 
2651 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2652 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2653 	if (ret) {
2654 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2655 			ret);
2656 		return ret;
2657 	}
2658 
2659 	req = (struct hclge_link_status_cmd *)desc.data;
2660 	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2661 
2662 	return !!link_status;
2663 }
2664 
2665 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2666 {
2667 	unsigned int mac_state;
2668 	int link_stat;
2669 
2670 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2671 		return 0;
2672 
2673 	mac_state = hclge_get_mac_link_status(hdev);
2674 
2675 	if (hdev->hw.mac.phydev) {
2676 		if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2677 			link_stat = mac_state &
2678 				hdev->hw.mac.phydev->link;
2679 		else
2680 			link_stat = 0;
2681 
2682 	} else {
2683 		link_stat = mac_state;
2684 	}
2685 
2686 	return !!link_stat;
2687 }
2688 
2689 static void hclge_update_link_status(struct hclge_dev *hdev)
2690 {
2691 	struct hnae3_client *rclient = hdev->roce_client;
2692 	struct hnae3_client *client = hdev->nic_client;
2693 	struct hnae3_handle *rhandle;
2694 	struct hnae3_handle *handle;
2695 	int state;
2696 	int i;
2697 
2698 	if (!client)
2699 		return;
2700 	state = hclge_get_mac_phy_link(hdev);
2701 	if (state != hdev->hw.mac.link) {
2702 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2703 			handle = &hdev->vport[i].nic;
2704 			client->ops->link_status_change(handle, state);
2705 			hclge_config_mac_tnl_int(hdev, state);
2706 			rhandle = &hdev->vport[i].roce;
2707 			if (rclient && rclient->ops->link_status_change)
2708 				rclient->ops->link_status_change(rhandle,
2709 								 state);
2710 		}
2711 		hdev->hw.mac.link = state;
2712 	}
2713 }
2714 
2715 static void hclge_update_port_capability(struct hclge_mac *mac)
2716 {
2717 	/* update fec ability by speed */
2718 	hclge_convert_setting_fec(mac);
2719 
2720 	/* firmware can not identify back plane type, the media type
2721 	 * read from configuration can help deal it
2722 	 */
2723 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2724 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2725 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2726 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2727 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2728 
2729 	if (mac->support_autoneg == true) {
2730 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2731 		linkmode_copy(mac->advertising, mac->supported);
2732 	} else {
2733 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2734 				   mac->supported);
2735 		linkmode_zero(mac->advertising);
2736 	}
2737 }
2738 
2739 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2740 {
2741 	struct hclge_sfp_info_cmd *resp;
2742 	struct hclge_desc desc;
2743 	int ret;
2744 
2745 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2746 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2747 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2748 	if (ret == -EOPNOTSUPP) {
2749 		dev_warn(&hdev->pdev->dev,
2750 			 "IMP do not support get SFP speed %d\n", ret);
2751 		return ret;
2752 	} else if (ret) {
2753 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2754 		return ret;
2755 	}
2756 
2757 	*speed = le32_to_cpu(resp->speed);
2758 
2759 	return 0;
2760 }
2761 
2762 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2763 {
2764 	struct hclge_sfp_info_cmd *resp;
2765 	struct hclge_desc desc;
2766 	int ret;
2767 
2768 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2769 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2770 
2771 	resp->query_type = QUERY_ACTIVE_SPEED;
2772 
2773 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2774 	if (ret == -EOPNOTSUPP) {
2775 		dev_warn(&hdev->pdev->dev,
2776 			 "IMP does not support get SFP info %d\n", ret);
2777 		return ret;
2778 	} else if (ret) {
2779 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2780 		return ret;
2781 	}
2782 
2783 	mac->speed = le32_to_cpu(resp->speed);
2784 	/* if resp->speed_ability is 0, it means it's an old version
2785 	 * firmware, do not update these params
2786 	 */
2787 	if (resp->speed_ability) {
2788 		mac->module_type = le32_to_cpu(resp->module_type);
2789 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
2790 		mac->autoneg = resp->autoneg;
2791 		mac->support_autoneg = resp->autoneg_ability;
2792 		mac->speed_type = QUERY_ACTIVE_SPEED;
2793 		if (!resp->active_fec)
2794 			mac->fec_mode = 0;
2795 		else
2796 			mac->fec_mode = BIT(resp->active_fec);
2797 	} else {
2798 		mac->speed_type = QUERY_SFP_SPEED;
2799 	}
2800 
2801 	return 0;
2802 }
2803 
2804 static int hclge_update_port_info(struct hclge_dev *hdev)
2805 {
2806 	struct hclge_mac *mac = &hdev->hw.mac;
2807 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
2808 	int ret;
2809 
2810 	/* get the port info from SFP cmd if not copper port */
2811 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2812 		return 0;
2813 
2814 	/* if IMP does not support get SFP/qSFP info, return directly */
2815 	if (!hdev->support_sfp_query)
2816 		return 0;
2817 
2818 	if (hdev->pdev->revision >= 0x21)
2819 		ret = hclge_get_sfp_info(hdev, mac);
2820 	else
2821 		ret = hclge_get_sfp_speed(hdev, &speed);
2822 
2823 	if (ret == -EOPNOTSUPP) {
2824 		hdev->support_sfp_query = false;
2825 		return ret;
2826 	} else if (ret) {
2827 		return ret;
2828 	}
2829 
2830 	if (hdev->pdev->revision >= 0x21) {
2831 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2832 			hclge_update_port_capability(mac);
2833 			return 0;
2834 		}
2835 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2836 					       HCLGE_MAC_FULL);
2837 	} else {
2838 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2839 			return 0; /* do nothing if no SFP */
2840 
2841 		/* must config full duplex for SFP */
2842 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2843 	}
2844 }
2845 
2846 static int hclge_get_status(struct hnae3_handle *handle)
2847 {
2848 	struct hclge_vport *vport = hclge_get_vport(handle);
2849 	struct hclge_dev *hdev = vport->back;
2850 
2851 	hclge_update_link_status(hdev);
2852 
2853 	return hdev->hw.mac.link;
2854 }
2855 
2856 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2857 {
2858 	u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2859 
2860 	/* fetch the events from their corresponding regs */
2861 	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2862 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2863 	msix_src_reg = hclge_read_dev(&hdev->hw,
2864 				      HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2865 
2866 	/* Assumption: If by any chance reset and mailbox events are reported
2867 	 * together then we will only process reset event in this go and will
2868 	 * defer the processing of the mailbox events. Since, we would have not
2869 	 * cleared RX CMDQ event this time we would receive again another
2870 	 * interrupt from H/W just for the mailbox.
2871 	 *
2872 	 * check for vector0 reset event sources
2873 	 */
2874 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2875 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2876 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2877 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2878 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2879 		hdev->rst_stats.imp_rst_cnt++;
2880 		return HCLGE_VECTOR0_EVENT_RST;
2881 	}
2882 
2883 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2884 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2885 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2886 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2887 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2888 		hdev->rst_stats.global_rst_cnt++;
2889 		return HCLGE_VECTOR0_EVENT_RST;
2890 	}
2891 
2892 	/* check for vector0 msix event source */
2893 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2894 		dev_info(&hdev->pdev->dev, "received event 0x%x\n",
2895 			 msix_src_reg);
2896 		*clearval = msix_src_reg;
2897 		return HCLGE_VECTOR0_EVENT_ERR;
2898 	}
2899 
2900 	/* check for vector0 mailbox(=CMDQ RX) event source */
2901 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2902 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2903 		*clearval = cmdq_src_reg;
2904 		return HCLGE_VECTOR0_EVENT_MBX;
2905 	}
2906 
2907 	/* print other vector0 event source */
2908 	dev_info(&hdev->pdev->dev,
2909 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
2910 		 cmdq_src_reg, msix_src_reg);
2911 	*clearval = msix_src_reg;
2912 
2913 	return HCLGE_VECTOR0_EVENT_OTHER;
2914 }
2915 
2916 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2917 				    u32 regclr)
2918 {
2919 	switch (event_type) {
2920 	case HCLGE_VECTOR0_EVENT_RST:
2921 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2922 		break;
2923 	case HCLGE_VECTOR0_EVENT_MBX:
2924 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2925 		break;
2926 	default:
2927 		break;
2928 	}
2929 }
2930 
2931 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2932 {
2933 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2934 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2935 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2936 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2937 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2938 }
2939 
2940 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2941 {
2942 	writel(enable ? 1 : 0, vector->addr);
2943 }
2944 
2945 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2946 {
2947 	struct hclge_dev *hdev = data;
2948 	u32 clearval = 0;
2949 	u32 event_cause;
2950 
2951 	hclge_enable_vector(&hdev->misc_vector, false);
2952 	event_cause = hclge_check_event_cause(hdev, &clearval);
2953 
2954 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
2955 	switch (event_cause) {
2956 	case HCLGE_VECTOR0_EVENT_ERR:
2957 		/* we do not know what type of reset is required now. This could
2958 		 * only be decided after we fetch the type of errors which
2959 		 * caused this event. Therefore, we will do below for now:
2960 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2961 		 *    have defered type of reset to be used.
2962 		 * 2. Schedule the reset serivce task.
2963 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2964 		 *    will fetch the correct type of reset.  This would be done
2965 		 *    by first decoding the types of errors.
2966 		 */
2967 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2968 		/* fall through */
2969 	case HCLGE_VECTOR0_EVENT_RST:
2970 		hclge_reset_task_schedule(hdev);
2971 		break;
2972 	case HCLGE_VECTOR0_EVENT_MBX:
2973 		/* If we are here then,
2974 		 * 1. Either we are not handling any mbx task and we are not
2975 		 *    scheduled as well
2976 		 *                        OR
2977 		 * 2. We could be handling a mbx task but nothing more is
2978 		 *    scheduled.
2979 		 * In both cases, we should schedule mbx task as there are more
2980 		 * mbx messages reported by this interrupt.
2981 		 */
2982 		hclge_mbx_task_schedule(hdev);
2983 		break;
2984 	default:
2985 		dev_warn(&hdev->pdev->dev,
2986 			 "received unknown or unhandled event of vector0\n");
2987 		break;
2988 	}
2989 
2990 	hclge_clear_event_cause(hdev, event_cause, clearval);
2991 
2992 	/* Enable interrupt if it is not cause by reset. And when
2993 	 * clearval equal to 0, it means interrupt status may be
2994 	 * cleared by hardware before driver reads status register.
2995 	 * For this case, vector0 interrupt also should be enabled.
2996 	 */
2997 	if (!clearval ||
2998 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2999 		hclge_enable_vector(&hdev->misc_vector, true);
3000 	}
3001 
3002 	return IRQ_HANDLED;
3003 }
3004 
3005 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3006 {
3007 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3008 		dev_warn(&hdev->pdev->dev,
3009 			 "vector(vector_id %d) has been freed.\n", vector_id);
3010 		return;
3011 	}
3012 
3013 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3014 	hdev->num_msi_left += 1;
3015 	hdev->num_msi_used -= 1;
3016 }
3017 
3018 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3019 {
3020 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3021 
3022 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3023 
3024 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3025 	hdev->vector_status[0] = 0;
3026 
3027 	hdev->num_msi_left -= 1;
3028 	hdev->num_msi_used += 1;
3029 }
3030 
3031 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3032 				      const cpumask_t *mask)
3033 {
3034 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3035 					      affinity_notify);
3036 
3037 	cpumask_copy(&hdev->affinity_mask, mask);
3038 }
3039 
3040 static void hclge_irq_affinity_release(struct kref *ref)
3041 {
3042 }
3043 
3044 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3045 {
3046 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3047 			      &hdev->affinity_mask);
3048 
3049 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3050 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3051 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3052 				  &hdev->affinity_notify);
3053 }
3054 
3055 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3056 {
3057 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3058 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3059 }
3060 
3061 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3062 {
3063 	int ret;
3064 
3065 	hclge_get_misc_vector(hdev);
3066 
3067 	/* this would be explicitly freed in the end */
3068 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3069 			  0, "hclge_misc", hdev);
3070 	if (ret) {
3071 		hclge_free_vector(hdev, 0);
3072 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3073 			hdev->misc_vector.vector_irq);
3074 	}
3075 
3076 	return ret;
3077 }
3078 
3079 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3080 {
3081 	free_irq(hdev->misc_vector.vector_irq, hdev);
3082 	hclge_free_vector(hdev, 0);
3083 }
3084 
3085 int hclge_notify_client(struct hclge_dev *hdev,
3086 			enum hnae3_reset_notify_type type)
3087 {
3088 	struct hnae3_client *client = hdev->nic_client;
3089 	u16 i;
3090 
3091 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3092 		return 0;
3093 
3094 	if (!client->ops->reset_notify)
3095 		return -EOPNOTSUPP;
3096 
3097 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3098 		struct hnae3_handle *handle = &hdev->vport[i].nic;
3099 		int ret;
3100 
3101 		ret = client->ops->reset_notify(handle, type);
3102 		if (ret) {
3103 			dev_err(&hdev->pdev->dev,
3104 				"notify nic client failed %d(%d)\n", type, ret);
3105 			return ret;
3106 		}
3107 	}
3108 
3109 	return 0;
3110 }
3111 
3112 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3113 				    enum hnae3_reset_notify_type type)
3114 {
3115 	struct hnae3_client *client = hdev->roce_client;
3116 	int ret = 0;
3117 	u16 i;
3118 
3119 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3120 		return 0;
3121 
3122 	if (!client->ops->reset_notify)
3123 		return -EOPNOTSUPP;
3124 
3125 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3126 		struct hnae3_handle *handle = &hdev->vport[i].roce;
3127 
3128 		ret = client->ops->reset_notify(handle, type);
3129 		if (ret) {
3130 			dev_err(&hdev->pdev->dev,
3131 				"notify roce client failed %d(%d)",
3132 				type, ret);
3133 			return ret;
3134 		}
3135 	}
3136 
3137 	return ret;
3138 }
3139 
3140 static int hclge_reset_wait(struct hclge_dev *hdev)
3141 {
3142 #define HCLGE_RESET_WATI_MS	100
3143 #define HCLGE_RESET_WAIT_CNT	200
3144 	u32 val, reg, reg_bit;
3145 	u32 cnt = 0;
3146 
3147 	switch (hdev->reset_type) {
3148 	case HNAE3_IMP_RESET:
3149 		reg = HCLGE_GLOBAL_RESET_REG;
3150 		reg_bit = HCLGE_IMP_RESET_BIT;
3151 		break;
3152 	case HNAE3_GLOBAL_RESET:
3153 		reg = HCLGE_GLOBAL_RESET_REG;
3154 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3155 		break;
3156 	case HNAE3_FUNC_RESET:
3157 		reg = HCLGE_FUN_RST_ING;
3158 		reg_bit = HCLGE_FUN_RST_ING_B;
3159 		break;
3160 	case HNAE3_FLR_RESET:
3161 		break;
3162 	default:
3163 		dev_err(&hdev->pdev->dev,
3164 			"Wait for unsupported reset type: %d\n",
3165 			hdev->reset_type);
3166 		return -EINVAL;
3167 	}
3168 
3169 	if (hdev->reset_type == HNAE3_FLR_RESET) {
3170 		while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3171 		       cnt++ < HCLGE_RESET_WAIT_CNT)
3172 			msleep(HCLGE_RESET_WATI_MS);
3173 
3174 		if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3175 			dev_err(&hdev->pdev->dev,
3176 				"flr wait timeout: %d\n", cnt);
3177 			return -EBUSY;
3178 		}
3179 
3180 		return 0;
3181 	}
3182 
3183 	val = hclge_read_dev(&hdev->hw, reg);
3184 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3185 		msleep(HCLGE_RESET_WATI_MS);
3186 		val = hclge_read_dev(&hdev->hw, reg);
3187 		cnt++;
3188 	}
3189 
3190 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3191 		dev_warn(&hdev->pdev->dev,
3192 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3193 		return -EBUSY;
3194 	}
3195 
3196 	return 0;
3197 }
3198 
3199 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3200 {
3201 	struct hclge_vf_rst_cmd *req;
3202 	struct hclge_desc desc;
3203 
3204 	req = (struct hclge_vf_rst_cmd *)desc.data;
3205 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3206 	req->dest_vfid = func_id;
3207 
3208 	if (reset)
3209 		req->vf_rst = 0x1;
3210 
3211 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3212 }
3213 
3214 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3215 {
3216 	int i;
3217 
3218 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3219 		struct hclge_vport *vport = &hdev->vport[i];
3220 		int ret;
3221 
3222 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3223 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3224 		if (ret) {
3225 			dev_err(&hdev->pdev->dev,
3226 				"set vf(%d) rst failed %d!\n",
3227 				vport->vport_id, ret);
3228 			return ret;
3229 		}
3230 
3231 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3232 			continue;
3233 
3234 		/* Inform VF to process the reset.
3235 		 * hclge_inform_reset_assert_to_vf may fail if VF
3236 		 * driver is not loaded.
3237 		 */
3238 		ret = hclge_inform_reset_assert_to_vf(vport);
3239 		if (ret)
3240 			dev_warn(&hdev->pdev->dev,
3241 				 "inform reset to vf(%d) failed %d!\n",
3242 				 vport->vport_id, ret);
3243 	}
3244 
3245 	return 0;
3246 }
3247 
3248 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3249 {
3250 	struct hclge_pf_rst_sync_cmd *req;
3251 	struct hclge_desc desc;
3252 	int cnt = 0;
3253 	int ret;
3254 
3255 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3256 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3257 
3258 	do {
3259 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3260 		/* for compatible with old firmware, wait
3261 		 * 100 ms for VF to stop IO
3262 		 */
3263 		if (ret == -EOPNOTSUPP) {
3264 			msleep(HCLGE_RESET_SYNC_TIME);
3265 			return 0;
3266 		} else if (ret) {
3267 			dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3268 				ret);
3269 			return ret;
3270 		} else if (req->all_vf_ready) {
3271 			return 0;
3272 		}
3273 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3274 		hclge_cmd_reuse_desc(&desc, true);
3275 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3276 
3277 	dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3278 	return -ETIME;
3279 }
3280 
3281 void hclge_report_hw_error(struct hclge_dev *hdev,
3282 			   enum hnae3_hw_error_type type)
3283 {
3284 	struct hnae3_client *client = hdev->nic_client;
3285 	u16 i;
3286 
3287 	if (!client || !client->ops->process_hw_error ||
3288 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3289 		return;
3290 
3291 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3292 		client->ops->process_hw_error(&hdev->vport[i].nic, type);
3293 }
3294 
3295 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3296 {
3297 	u32 reg_val;
3298 
3299 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3300 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3301 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3302 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3303 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3304 	}
3305 
3306 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3307 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3308 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3309 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3310 	}
3311 }
3312 
3313 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3314 {
3315 	struct hclge_desc desc;
3316 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3317 	int ret;
3318 
3319 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3320 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3321 	req->fun_reset_vfid = func_id;
3322 
3323 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3324 	if (ret)
3325 		dev_err(&hdev->pdev->dev,
3326 			"send function reset cmd fail, status =%d\n", ret);
3327 
3328 	return ret;
3329 }
3330 
3331 static void hclge_do_reset(struct hclge_dev *hdev)
3332 {
3333 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3334 	struct pci_dev *pdev = hdev->pdev;
3335 	u32 val;
3336 
3337 	if (hclge_get_hw_reset_stat(handle)) {
3338 		dev_info(&pdev->dev, "Hardware reset not finish\n");
3339 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3340 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3341 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3342 		return;
3343 	}
3344 
3345 	switch (hdev->reset_type) {
3346 	case HNAE3_GLOBAL_RESET:
3347 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3348 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3349 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3350 		dev_info(&pdev->dev, "Global Reset requested\n");
3351 		break;
3352 	case HNAE3_FUNC_RESET:
3353 		dev_info(&pdev->dev, "PF Reset requested\n");
3354 		/* schedule again to check later */
3355 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3356 		hclge_reset_task_schedule(hdev);
3357 		break;
3358 	case HNAE3_FLR_RESET:
3359 		dev_info(&pdev->dev, "FLR requested\n");
3360 		/* schedule again to check later */
3361 		set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3362 		hclge_reset_task_schedule(hdev);
3363 		break;
3364 	default:
3365 		dev_warn(&pdev->dev,
3366 			 "Unsupported reset type: %d\n", hdev->reset_type);
3367 		break;
3368 	}
3369 }
3370 
3371 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3372 						   unsigned long *addr)
3373 {
3374 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3375 	struct hclge_dev *hdev = ae_dev->priv;
3376 
3377 	/* first, resolve any unknown reset type to the known type(s) */
3378 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3379 		/* we will intentionally ignore any errors from this function
3380 		 *  as we will end up in *some* reset request in any case
3381 		 */
3382 		hclge_handle_hw_msix_error(hdev, addr);
3383 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3384 		/* We defered the clearing of the error event which caused
3385 		 * interrupt since it was not posssible to do that in
3386 		 * interrupt context (and this is the reason we introduced
3387 		 * new UNKNOWN reset type). Now, the errors have been
3388 		 * handled and cleared in hardware we can safely enable
3389 		 * interrupts. This is an exception to the norm.
3390 		 */
3391 		hclge_enable_vector(&hdev->misc_vector, true);
3392 	}
3393 
3394 	/* return the highest priority reset level amongst all */
3395 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3396 		rst_level = HNAE3_IMP_RESET;
3397 		clear_bit(HNAE3_IMP_RESET, addr);
3398 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3399 		clear_bit(HNAE3_FUNC_RESET, addr);
3400 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3401 		rst_level = HNAE3_GLOBAL_RESET;
3402 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3403 		clear_bit(HNAE3_FUNC_RESET, addr);
3404 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3405 		rst_level = HNAE3_FUNC_RESET;
3406 		clear_bit(HNAE3_FUNC_RESET, addr);
3407 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3408 		rst_level = HNAE3_FLR_RESET;
3409 		clear_bit(HNAE3_FLR_RESET, addr);
3410 	}
3411 
3412 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3413 	    rst_level < hdev->reset_type)
3414 		return HNAE3_NONE_RESET;
3415 
3416 	return rst_level;
3417 }
3418 
3419 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3420 {
3421 	u32 clearval = 0;
3422 
3423 	switch (hdev->reset_type) {
3424 	case HNAE3_IMP_RESET:
3425 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3426 		break;
3427 	case HNAE3_GLOBAL_RESET:
3428 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3429 		break;
3430 	default:
3431 		break;
3432 	}
3433 
3434 	if (!clearval)
3435 		return;
3436 
3437 	/* For revision 0x20, the reset interrupt source
3438 	 * can only be cleared after hardware reset done
3439 	 */
3440 	if (hdev->pdev->revision == 0x20)
3441 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3442 				clearval);
3443 
3444 	hclge_enable_vector(&hdev->misc_vector, true);
3445 }
3446 
3447 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3448 {
3449 	int ret = 0;
3450 
3451 	switch (hdev->reset_type) {
3452 	case HNAE3_FUNC_RESET:
3453 		/* fall through */
3454 	case HNAE3_FLR_RESET:
3455 		ret = hclge_set_all_vf_rst(hdev, true);
3456 		break;
3457 	default:
3458 		break;
3459 	}
3460 
3461 	return ret;
3462 }
3463 
3464 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3465 {
3466 	u32 reg_val;
3467 
3468 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3469 	if (enable)
3470 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3471 	else
3472 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3473 
3474 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3475 }
3476 
3477 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3478 {
3479 	u32 reg_val;
3480 	int ret = 0;
3481 
3482 	switch (hdev->reset_type) {
3483 	case HNAE3_FUNC_RESET:
3484 		/* to confirm whether all running VF is ready
3485 		 * before request PF reset
3486 		 */
3487 		ret = hclge_func_reset_sync_vf(hdev);
3488 		if (ret)
3489 			return ret;
3490 
3491 		ret = hclge_func_reset_cmd(hdev, 0);
3492 		if (ret) {
3493 			dev_err(&hdev->pdev->dev,
3494 				"asserting function reset fail %d!\n", ret);
3495 			return ret;
3496 		}
3497 
3498 		/* After performaning pf reset, it is not necessary to do the
3499 		 * mailbox handling or send any command to firmware, because
3500 		 * any mailbox handling or command to firmware is only valid
3501 		 * after hclge_cmd_init is called.
3502 		 */
3503 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3504 		hdev->rst_stats.pf_rst_cnt++;
3505 		break;
3506 	case HNAE3_FLR_RESET:
3507 		/* to confirm whether all running VF is ready
3508 		 * before request PF reset
3509 		 */
3510 		ret = hclge_func_reset_sync_vf(hdev);
3511 		if (ret)
3512 			return ret;
3513 
3514 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3515 		set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3516 		hdev->rst_stats.flr_rst_cnt++;
3517 		break;
3518 	case HNAE3_IMP_RESET:
3519 		hclge_handle_imp_error(hdev);
3520 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3521 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3522 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3523 		break;
3524 	default:
3525 		break;
3526 	}
3527 
3528 	/* inform hardware that preparatory work is done */
3529 	msleep(HCLGE_RESET_SYNC_TIME);
3530 	hclge_reset_handshake(hdev, true);
3531 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3532 
3533 	return ret;
3534 }
3535 
3536 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3537 {
3538 #define MAX_RESET_FAIL_CNT 5
3539 
3540 	if (hdev->reset_pending) {
3541 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3542 			 hdev->reset_pending);
3543 		return true;
3544 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3545 		   HCLGE_RESET_INT_M) {
3546 		dev_info(&hdev->pdev->dev,
3547 			 "reset failed because new reset interrupt\n");
3548 		hclge_clear_reset_cause(hdev);
3549 		return false;
3550 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3551 		hdev->rst_stats.reset_fail_cnt++;
3552 		set_bit(hdev->reset_type, &hdev->reset_pending);
3553 		dev_info(&hdev->pdev->dev,
3554 			 "re-schedule reset task(%d)\n",
3555 			 hdev->rst_stats.reset_fail_cnt);
3556 		return true;
3557 	}
3558 
3559 	hclge_clear_reset_cause(hdev);
3560 
3561 	/* recover the handshake status when reset fail */
3562 	hclge_reset_handshake(hdev, true);
3563 
3564 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3565 	return false;
3566 }
3567 
3568 static int hclge_set_rst_done(struct hclge_dev *hdev)
3569 {
3570 	struct hclge_pf_rst_done_cmd *req;
3571 	struct hclge_desc desc;
3572 
3573 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
3574 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3575 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3576 
3577 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3578 }
3579 
3580 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3581 {
3582 	int ret = 0;
3583 
3584 	switch (hdev->reset_type) {
3585 	case HNAE3_FUNC_RESET:
3586 		/* fall through */
3587 	case HNAE3_FLR_RESET:
3588 		ret = hclge_set_all_vf_rst(hdev, false);
3589 		break;
3590 	case HNAE3_GLOBAL_RESET:
3591 		/* fall through */
3592 	case HNAE3_IMP_RESET:
3593 		ret = hclge_set_rst_done(hdev);
3594 		break;
3595 	default:
3596 		break;
3597 	}
3598 
3599 	/* clear up the handshake status after re-initialize done */
3600 	hclge_reset_handshake(hdev, false);
3601 
3602 	return ret;
3603 }
3604 
3605 static int hclge_reset_stack(struct hclge_dev *hdev)
3606 {
3607 	int ret;
3608 
3609 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3610 	if (ret)
3611 		return ret;
3612 
3613 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3614 	if (ret)
3615 		return ret;
3616 
3617 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3618 	if (ret)
3619 		return ret;
3620 
3621 	return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3622 }
3623 
3624 static void hclge_reset(struct hclge_dev *hdev)
3625 {
3626 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3627 	enum hnae3_reset_type reset_level;
3628 	int ret;
3629 
3630 	/* Initialize ae_dev reset status as well, in case enet layer wants to
3631 	 * know if device is undergoing reset
3632 	 */
3633 	ae_dev->reset_type = hdev->reset_type;
3634 	hdev->rst_stats.reset_cnt++;
3635 	/* perform reset of the stack & ae device for a client */
3636 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3637 	if (ret)
3638 		goto err_reset;
3639 
3640 	ret = hclge_reset_prepare_down(hdev);
3641 	if (ret)
3642 		goto err_reset;
3643 
3644 	rtnl_lock();
3645 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3646 	if (ret)
3647 		goto err_reset_lock;
3648 
3649 	rtnl_unlock();
3650 
3651 	ret = hclge_reset_prepare_wait(hdev);
3652 	if (ret)
3653 		goto err_reset;
3654 
3655 	if (hclge_reset_wait(hdev))
3656 		goto err_reset;
3657 
3658 	hdev->rst_stats.hw_reset_done_cnt++;
3659 
3660 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3661 	if (ret)
3662 		goto err_reset;
3663 
3664 	rtnl_lock();
3665 
3666 	ret = hclge_reset_stack(hdev);
3667 	if (ret)
3668 		goto err_reset_lock;
3669 
3670 	hclge_clear_reset_cause(hdev);
3671 
3672 	ret = hclge_reset_prepare_up(hdev);
3673 	if (ret)
3674 		goto err_reset_lock;
3675 
3676 	rtnl_unlock();
3677 
3678 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3679 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3680 	 * times
3681 	 */
3682 	if (ret &&
3683 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3684 		goto err_reset;
3685 
3686 	rtnl_lock();
3687 
3688 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3689 	if (ret)
3690 		goto err_reset_lock;
3691 
3692 	rtnl_unlock();
3693 
3694 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3695 	if (ret)
3696 		goto err_reset;
3697 
3698 	hdev->last_reset_time = jiffies;
3699 	hdev->rst_stats.reset_fail_cnt = 0;
3700 	hdev->rst_stats.reset_done_cnt++;
3701 	ae_dev->reset_type = HNAE3_NONE_RESET;
3702 
3703 	/* if default_reset_request has a higher level reset request,
3704 	 * it should be handled as soon as possible. since some errors
3705 	 * need this kind of reset to fix.
3706 	 */
3707 	reset_level = hclge_get_reset_level(ae_dev,
3708 					    &hdev->default_reset_request);
3709 	if (reset_level != HNAE3_NONE_RESET)
3710 		set_bit(reset_level, &hdev->reset_request);
3711 
3712 	return;
3713 
3714 err_reset_lock:
3715 	rtnl_unlock();
3716 err_reset:
3717 	if (hclge_reset_err_handle(hdev))
3718 		hclge_reset_task_schedule(hdev);
3719 }
3720 
3721 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3722 {
3723 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3724 	struct hclge_dev *hdev = ae_dev->priv;
3725 
3726 	/* We might end up getting called broadly because of 2 below cases:
3727 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3728 	 *    normalcy is to reset.
3729 	 * 2. A new reset request from the stack due to timeout
3730 	 *
3731 	 * For the first case,error event might not have ae handle available.
3732 	 * check if this is a new reset request and we are not here just because
3733 	 * last reset attempt did not succeed and watchdog hit us again. We will
3734 	 * know this if last reset request did not occur very recently (watchdog
3735 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3736 	 * In case of new request we reset the "reset level" to PF reset.
3737 	 * And if it is a repeat reset request of the most recent one then we
3738 	 * want to make sure we throttle the reset request. Therefore, we will
3739 	 * not allow it again before 3*HZ times.
3740 	 */
3741 	if (!handle)
3742 		handle = &hdev->vport[0].nic;
3743 
3744 	if (time_before(jiffies, (hdev->last_reset_time +
3745 				  HCLGE_RESET_INTERVAL))) {
3746 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3747 		return;
3748 	} else if (hdev->default_reset_request)
3749 		hdev->reset_level =
3750 			hclge_get_reset_level(ae_dev,
3751 					      &hdev->default_reset_request);
3752 	else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3753 		hdev->reset_level = HNAE3_FUNC_RESET;
3754 
3755 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3756 		 hdev->reset_level);
3757 
3758 	/* request reset & schedule reset task */
3759 	set_bit(hdev->reset_level, &hdev->reset_request);
3760 	hclge_reset_task_schedule(hdev);
3761 
3762 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3763 		hdev->reset_level++;
3764 }
3765 
3766 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3767 					enum hnae3_reset_type rst_type)
3768 {
3769 	struct hclge_dev *hdev = ae_dev->priv;
3770 
3771 	set_bit(rst_type, &hdev->default_reset_request);
3772 }
3773 
3774 static void hclge_reset_timer(struct timer_list *t)
3775 {
3776 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3777 
3778 	/* if default_reset_request has no value, it means that this reset
3779 	 * request has already be handled, so just return here
3780 	 */
3781 	if (!hdev->default_reset_request)
3782 		return;
3783 
3784 	dev_info(&hdev->pdev->dev,
3785 		 "triggering reset in reset timer\n");
3786 	hclge_reset_event(hdev->pdev, NULL);
3787 }
3788 
3789 static void hclge_reset_subtask(struct hclge_dev *hdev)
3790 {
3791 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3792 
3793 	/* check if there is any ongoing reset in the hardware. This status can
3794 	 * be checked from reset_pending. If there is then, we need to wait for
3795 	 * hardware to complete reset.
3796 	 *    a. If we are able to figure out in reasonable time that hardware
3797 	 *       has fully resetted then, we can proceed with driver, client
3798 	 *       reset.
3799 	 *    b. else, we can come back later to check this status so re-sched
3800 	 *       now.
3801 	 */
3802 	hdev->last_reset_time = jiffies;
3803 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3804 	if (hdev->reset_type != HNAE3_NONE_RESET)
3805 		hclge_reset(hdev);
3806 
3807 	/* check if we got any *new* reset requests to be honored */
3808 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3809 	if (hdev->reset_type != HNAE3_NONE_RESET)
3810 		hclge_do_reset(hdev);
3811 
3812 	hdev->reset_type = HNAE3_NONE_RESET;
3813 }
3814 
3815 static void hclge_reset_service_task(struct work_struct *work)
3816 {
3817 	struct hclge_dev *hdev =
3818 		container_of(work, struct hclge_dev, rst_service_task);
3819 
3820 	if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3821 		return;
3822 
3823 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3824 
3825 	hclge_reset_subtask(hdev);
3826 
3827 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3828 }
3829 
3830 static void hclge_mailbox_service_task(struct work_struct *work)
3831 {
3832 	struct hclge_dev *hdev =
3833 		container_of(work, struct hclge_dev, mbx_service_task);
3834 
3835 	if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3836 		return;
3837 
3838 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3839 
3840 	hclge_mbx_handler(hdev);
3841 
3842 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3843 }
3844 
3845 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3846 {
3847 	int i;
3848 
3849 	/* start from vport 1 for PF is always alive */
3850 	for (i = 1; i < hdev->num_alloc_vport; i++) {
3851 		struct hclge_vport *vport = &hdev->vport[i];
3852 
3853 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3854 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3855 
3856 		/* If vf is not alive, set to default value */
3857 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3858 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3859 	}
3860 }
3861 
3862 static void hclge_service_task(struct work_struct *work)
3863 {
3864 	struct hclge_dev *hdev =
3865 		container_of(work, struct hclge_dev, service_task.work);
3866 
3867 	clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3868 
3869 	if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3870 		hclge_update_stats_for_all(hdev);
3871 		hdev->hw_stats.stats_timer = 0;
3872 	}
3873 
3874 	hclge_update_port_info(hdev);
3875 	hclge_update_link_status(hdev);
3876 	hclge_update_vport_alive(hdev);
3877 	hclge_sync_vlan_filter(hdev);
3878 	if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3879 		hclge_rfs_filter_expire(hdev);
3880 		hdev->fd_arfs_expire_timer = 0;
3881 	}
3882 
3883 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
3884 }
3885 
3886 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3887 {
3888 	/* VF handle has no client */
3889 	if (!handle->client)
3890 		return container_of(handle, struct hclge_vport, nic);
3891 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
3892 		return container_of(handle, struct hclge_vport, roce);
3893 	else
3894 		return container_of(handle, struct hclge_vport, nic);
3895 }
3896 
3897 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3898 			    struct hnae3_vector_info *vector_info)
3899 {
3900 	struct hclge_vport *vport = hclge_get_vport(handle);
3901 	struct hnae3_vector_info *vector = vector_info;
3902 	struct hclge_dev *hdev = vport->back;
3903 	int alloc = 0;
3904 	int i, j;
3905 
3906 	vector_num = min(hdev->num_msi_left, vector_num);
3907 
3908 	for (j = 0; j < vector_num; j++) {
3909 		for (i = 1; i < hdev->num_msi; i++) {
3910 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3911 				vector->vector = pci_irq_vector(hdev->pdev, i);
3912 				vector->io_addr = hdev->hw.io_base +
3913 					HCLGE_VECTOR_REG_BASE +
3914 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
3915 					vport->vport_id *
3916 					HCLGE_VECTOR_VF_OFFSET;
3917 				hdev->vector_status[i] = vport->vport_id;
3918 				hdev->vector_irq[i] = vector->vector;
3919 
3920 				vector++;
3921 				alloc++;
3922 
3923 				break;
3924 			}
3925 		}
3926 	}
3927 	hdev->num_msi_left -= alloc;
3928 	hdev->num_msi_used += alloc;
3929 
3930 	return alloc;
3931 }
3932 
3933 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3934 {
3935 	int i;
3936 
3937 	for (i = 0; i < hdev->num_msi; i++)
3938 		if (vector == hdev->vector_irq[i])
3939 			return i;
3940 
3941 	return -EINVAL;
3942 }
3943 
3944 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3945 {
3946 	struct hclge_vport *vport = hclge_get_vport(handle);
3947 	struct hclge_dev *hdev = vport->back;
3948 	int vector_id;
3949 
3950 	vector_id = hclge_get_vector_index(hdev, vector);
3951 	if (vector_id < 0) {
3952 		dev_err(&hdev->pdev->dev,
3953 			"Get vector index fail. vector_id =%d\n", vector_id);
3954 		return vector_id;
3955 	}
3956 
3957 	hclge_free_vector(hdev, vector_id);
3958 
3959 	return 0;
3960 }
3961 
3962 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3963 {
3964 	return HCLGE_RSS_KEY_SIZE;
3965 }
3966 
3967 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3968 {
3969 	return HCLGE_RSS_IND_TBL_SIZE;
3970 }
3971 
3972 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3973 				  const u8 hfunc, const u8 *key)
3974 {
3975 	struct hclge_rss_config_cmd *req;
3976 	unsigned int key_offset = 0;
3977 	struct hclge_desc desc;
3978 	int key_counts;
3979 	int key_size;
3980 	int ret;
3981 
3982 	key_counts = HCLGE_RSS_KEY_SIZE;
3983 	req = (struct hclge_rss_config_cmd *)desc.data;
3984 
3985 	while (key_counts) {
3986 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3987 					   false);
3988 
3989 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3990 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3991 
3992 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
3993 		memcpy(req->hash_key,
3994 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3995 
3996 		key_counts -= key_size;
3997 		key_offset++;
3998 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3999 		if (ret) {
4000 			dev_err(&hdev->pdev->dev,
4001 				"Configure RSS config fail, status = %d\n",
4002 				ret);
4003 			return ret;
4004 		}
4005 	}
4006 	return 0;
4007 }
4008 
4009 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4010 {
4011 	struct hclge_rss_indirection_table_cmd *req;
4012 	struct hclge_desc desc;
4013 	int i, j;
4014 	int ret;
4015 
4016 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4017 
4018 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4019 		hclge_cmd_setup_basic_desc
4020 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4021 
4022 		req->start_table_index =
4023 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4024 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4025 
4026 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4027 			req->rss_result[j] =
4028 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4029 
4030 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4031 		if (ret) {
4032 			dev_err(&hdev->pdev->dev,
4033 				"Configure rss indir table fail,status = %d\n",
4034 				ret);
4035 			return ret;
4036 		}
4037 	}
4038 	return 0;
4039 }
4040 
4041 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4042 				 u16 *tc_size, u16 *tc_offset)
4043 {
4044 	struct hclge_rss_tc_mode_cmd *req;
4045 	struct hclge_desc desc;
4046 	int ret;
4047 	int i;
4048 
4049 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4050 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4051 
4052 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4053 		u16 mode = 0;
4054 
4055 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4056 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4057 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4058 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4059 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4060 
4061 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4062 	}
4063 
4064 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4065 	if (ret)
4066 		dev_err(&hdev->pdev->dev,
4067 			"Configure rss tc mode fail, status = %d\n", ret);
4068 
4069 	return ret;
4070 }
4071 
4072 static void hclge_get_rss_type(struct hclge_vport *vport)
4073 {
4074 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4075 	    vport->rss_tuple_sets.ipv4_udp_en ||
4076 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4077 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4078 	    vport->rss_tuple_sets.ipv6_udp_en ||
4079 	    vport->rss_tuple_sets.ipv6_sctp_en)
4080 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4081 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4082 		 vport->rss_tuple_sets.ipv6_fragment_en)
4083 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4084 	else
4085 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4086 }
4087 
4088 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4089 {
4090 	struct hclge_rss_input_tuple_cmd *req;
4091 	struct hclge_desc desc;
4092 	int ret;
4093 
4094 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4095 
4096 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4097 
4098 	/* Get the tuple cfg from pf */
4099 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4100 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4101 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4102 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4103 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4104 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4105 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4106 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4107 	hclge_get_rss_type(&hdev->vport[0]);
4108 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4109 	if (ret)
4110 		dev_err(&hdev->pdev->dev,
4111 			"Configure rss input fail, status = %d\n", ret);
4112 	return ret;
4113 }
4114 
4115 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4116 			 u8 *key, u8 *hfunc)
4117 {
4118 	struct hclge_vport *vport = hclge_get_vport(handle);
4119 	int i;
4120 
4121 	/* Get hash algorithm */
4122 	if (hfunc) {
4123 		switch (vport->rss_algo) {
4124 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4125 			*hfunc = ETH_RSS_HASH_TOP;
4126 			break;
4127 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4128 			*hfunc = ETH_RSS_HASH_XOR;
4129 			break;
4130 		default:
4131 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4132 			break;
4133 		}
4134 	}
4135 
4136 	/* Get the RSS Key required by the user */
4137 	if (key)
4138 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4139 
4140 	/* Get indirect table */
4141 	if (indir)
4142 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4143 			indir[i] =  vport->rss_indirection_tbl[i];
4144 
4145 	return 0;
4146 }
4147 
4148 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4149 			 const  u8 *key, const  u8 hfunc)
4150 {
4151 	struct hclge_vport *vport = hclge_get_vport(handle);
4152 	struct hclge_dev *hdev = vport->back;
4153 	u8 hash_algo;
4154 	int ret, i;
4155 
4156 	/* Set the RSS Hash Key if specififed by the user */
4157 	if (key) {
4158 		switch (hfunc) {
4159 		case ETH_RSS_HASH_TOP:
4160 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4161 			break;
4162 		case ETH_RSS_HASH_XOR:
4163 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4164 			break;
4165 		case ETH_RSS_HASH_NO_CHANGE:
4166 			hash_algo = vport->rss_algo;
4167 			break;
4168 		default:
4169 			return -EINVAL;
4170 		}
4171 
4172 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4173 		if (ret)
4174 			return ret;
4175 
4176 		/* Update the shadow RSS key with user specified qids */
4177 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4178 		vport->rss_algo = hash_algo;
4179 	}
4180 
4181 	/* Update the shadow RSS table with user specified qids */
4182 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4183 		vport->rss_indirection_tbl[i] = indir[i];
4184 
4185 	/* Update the hardware */
4186 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4187 }
4188 
4189 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4190 {
4191 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4192 
4193 	if (nfc->data & RXH_L4_B_2_3)
4194 		hash_sets |= HCLGE_D_PORT_BIT;
4195 	else
4196 		hash_sets &= ~HCLGE_D_PORT_BIT;
4197 
4198 	if (nfc->data & RXH_IP_SRC)
4199 		hash_sets |= HCLGE_S_IP_BIT;
4200 	else
4201 		hash_sets &= ~HCLGE_S_IP_BIT;
4202 
4203 	if (nfc->data & RXH_IP_DST)
4204 		hash_sets |= HCLGE_D_IP_BIT;
4205 	else
4206 		hash_sets &= ~HCLGE_D_IP_BIT;
4207 
4208 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4209 		hash_sets |= HCLGE_V_TAG_BIT;
4210 
4211 	return hash_sets;
4212 }
4213 
4214 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4215 			       struct ethtool_rxnfc *nfc)
4216 {
4217 	struct hclge_vport *vport = hclge_get_vport(handle);
4218 	struct hclge_dev *hdev = vport->back;
4219 	struct hclge_rss_input_tuple_cmd *req;
4220 	struct hclge_desc desc;
4221 	u8 tuple_sets;
4222 	int ret;
4223 
4224 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4225 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4226 		return -EINVAL;
4227 
4228 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4229 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4230 
4231 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4232 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4233 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4234 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4235 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4236 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4237 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4238 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4239 
4240 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4241 	switch (nfc->flow_type) {
4242 	case TCP_V4_FLOW:
4243 		req->ipv4_tcp_en = tuple_sets;
4244 		break;
4245 	case TCP_V6_FLOW:
4246 		req->ipv6_tcp_en = tuple_sets;
4247 		break;
4248 	case UDP_V4_FLOW:
4249 		req->ipv4_udp_en = tuple_sets;
4250 		break;
4251 	case UDP_V6_FLOW:
4252 		req->ipv6_udp_en = tuple_sets;
4253 		break;
4254 	case SCTP_V4_FLOW:
4255 		req->ipv4_sctp_en = tuple_sets;
4256 		break;
4257 	case SCTP_V6_FLOW:
4258 		if ((nfc->data & RXH_L4_B_0_1) ||
4259 		    (nfc->data & RXH_L4_B_2_3))
4260 			return -EINVAL;
4261 
4262 		req->ipv6_sctp_en = tuple_sets;
4263 		break;
4264 	case IPV4_FLOW:
4265 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4266 		break;
4267 	case IPV6_FLOW:
4268 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4269 		break;
4270 	default:
4271 		return -EINVAL;
4272 	}
4273 
4274 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4275 	if (ret) {
4276 		dev_err(&hdev->pdev->dev,
4277 			"Set rss tuple fail, status = %d\n", ret);
4278 		return ret;
4279 	}
4280 
4281 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4282 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4283 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4284 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4285 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4286 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4287 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4288 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4289 	hclge_get_rss_type(vport);
4290 	return 0;
4291 }
4292 
4293 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4294 			       struct ethtool_rxnfc *nfc)
4295 {
4296 	struct hclge_vport *vport = hclge_get_vport(handle);
4297 	u8 tuple_sets;
4298 
4299 	nfc->data = 0;
4300 
4301 	switch (nfc->flow_type) {
4302 	case TCP_V4_FLOW:
4303 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4304 		break;
4305 	case UDP_V4_FLOW:
4306 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4307 		break;
4308 	case TCP_V6_FLOW:
4309 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4310 		break;
4311 	case UDP_V6_FLOW:
4312 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4313 		break;
4314 	case SCTP_V4_FLOW:
4315 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4316 		break;
4317 	case SCTP_V6_FLOW:
4318 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4319 		break;
4320 	case IPV4_FLOW:
4321 	case IPV6_FLOW:
4322 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4323 		break;
4324 	default:
4325 		return -EINVAL;
4326 	}
4327 
4328 	if (!tuple_sets)
4329 		return 0;
4330 
4331 	if (tuple_sets & HCLGE_D_PORT_BIT)
4332 		nfc->data |= RXH_L4_B_2_3;
4333 	if (tuple_sets & HCLGE_S_PORT_BIT)
4334 		nfc->data |= RXH_L4_B_0_1;
4335 	if (tuple_sets & HCLGE_D_IP_BIT)
4336 		nfc->data |= RXH_IP_DST;
4337 	if (tuple_sets & HCLGE_S_IP_BIT)
4338 		nfc->data |= RXH_IP_SRC;
4339 
4340 	return 0;
4341 }
4342 
4343 static int hclge_get_tc_size(struct hnae3_handle *handle)
4344 {
4345 	struct hclge_vport *vport = hclge_get_vport(handle);
4346 	struct hclge_dev *hdev = vport->back;
4347 
4348 	return hdev->rss_size_max;
4349 }
4350 
4351 int hclge_rss_init_hw(struct hclge_dev *hdev)
4352 {
4353 	struct hclge_vport *vport = hdev->vport;
4354 	u8 *rss_indir = vport[0].rss_indirection_tbl;
4355 	u16 rss_size = vport[0].alloc_rss_size;
4356 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4357 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4358 	u8 *key = vport[0].rss_hash_key;
4359 	u8 hfunc = vport[0].rss_algo;
4360 	u16 tc_valid[HCLGE_MAX_TC_NUM];
4361 	u16 roundup_size;
4362 	unsigned int i;
4363 	int ret;
4364 
4365 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4366 	if (ret)
4367 		return ret;
4368 
4369 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4370 	if (ret)
4371 		return ret;
4372 
4373 	ret = hclge_set_rss_input_tuple(hdev);
4374 	if (ret)
4375 		return ret;
4376 
4377 	/* Each TC have the same queue size, and tc_size set to hardware is
4378 	 * the log2 of roundup power of two of rss_size, the acutal queue
4379 	 * size is limited by indirection table.
4380 	 */
4381 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4382 		dev_err(&hdev->pdev->dev,
4383 			"Configure rss tc size failed, invalid TC_SIZE = %d\n",
4384 			rss_size);
4385 		return -EINVAL;
4386 	}
4387 
4388 	roundup_size = roundup_pow_of_two(rss_size);
4389 	roundup_size = ilog2(roundup_size);
4390 
4391 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4392 		tc_valid[i] = 0;
4393 
4394 		if (!(hdev->hw_tc_map & BIT(i)))
4395 			continue;
4396 
4397 		tc_valid[i] = 1;
4398 		tc_size[i] = roundup_size;
4399 		tc_offset[i] = rss_size * i;
4400 	}
4401 
4402 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4403 }
4404 
4405 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4406 {
4407 	struct hclge_vport *vport = hdev->vport;
4408 	int i, j;
4409 
4410 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4411 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4412 			vport[j].rss_indirection_tbl[i] =
4413 				i % vport[j].alloc_rss_size;
4414 	}
4415 }
4416 
4417 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4418 {
4419 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4420 	struct hclge_vport *vport = hdev->vport;
4421 
4422 	if (hdev->pdev->revision >= 0x21)
4423 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4424 
4425 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4426 		vport[i].rss_tuple_sets.ipv4_tcp_en =
4427 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4428 		vport[i].rss_tuple_sets.ipv4_udp_en =
4429 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4430 		vport[i].rss_tuple_sets.ipv4_sctp_en =
4431 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4432 		vport[i].rss_tuple_sets.ipv4_fragment_en =
4433 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4434 		vport[i].rss_tuple_sets.ipv6_tcp_en =
4435 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4436 		vport[i].rss_tuple_sets.ipv6_udp_en =
4437 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4438 		vport[i].rss_tuple_sets.ipv6_sctp_en =
4439 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4440 		vport[i].rss_tuple_sets.ipv6_fragment_en =
4441 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4442 
4443 		vport[i].rss_algo = rss_algo;
4444 
4445 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
4446 		       HCLGE_RSS_KEY_SIZE);
4447 	}
4448 
4449 	hclge_rss_indir_init_cfg(hdev);
4450 }
4451 
4452 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4453 				int vector_id, bool en,
4454 				struct hnae3_ring_chain_node *ring_chain)
4455 {
4456 	struct hclge_dev *hdev = vport->back;
4457 	struct hnae3_ring_chain_node *node;
4458 	struct hclge_desc desc;
4459 	struct hclge_ctrl_vector_chain_cmd *req =
4460 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
4461 	enum hclge_cmd_status status;
4462 	enum hclge_opcode_type op;
4463 	u16 tqp_type_and_id;
4464 	int i;
4465 
4466 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4467 	hclge_cmd_setup_basic_desc(&desc, op, false);
4468 	req->int_vector_id = vector_id;
4469 
4470 	i = 0;
4471 	for (node = ring_chain; node; node = node->next) {
4472 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4473 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4474 				HCLGE_INT_TYPE_S,
4475 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4476 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4477 				HCLGE_TQP_ID_S, node->tqp_index);
4478 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4479 				HCLGE_INT_GL_IDX_S,
4480 				hnae3_get_field(node->int_gl_idx,
4481 						HNAE3_RING_GL_IDX_M,
4482 						HNAE3_RING_GL_IDX_S));
4483 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4484 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4485 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4486 			req->vfid = vport->vport_id;
4487 
4488 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
4489 			if (status) {
4490 				dev_err(&hdev->pdev->dev,
4491 					"Map TQP fail, status is %d.\n",
4492 					status);
4493 				return -EIO;
4494 			}
4495 			i = 0;
4496 
4497 			hclge_cmd_setup_basic_desc(&desc,
4498 						   op,
4499 						   false);
4500 			req->int_vector_id = vector_id;
4501 		}
4502 	}
4503 
4504 	if (i > 0) {
4505 		req->int_cause_num = i;
4506 		req->vfid = vport->vport_id;
4507 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
4508 		if (status) {
4509 			dev_err(&hdev->pdev->dev,
4510 				"Map TQP fail, status is %d.\n", status);
4511 			return -EIO;
4512 		}
4513 	}
4514 
4515 	return 0;
4516 }
4517 
4518 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4519 				    struct hnae3_ring_chain_node *ring_chain)
4520 {
4521 	struct hclge_vport *vport = hclge_get_vport(handle);
4522 	struct hclge_dev *hdev = vport->back;
4523 	int vector_id;
4524 
4525 	vector_id = hclge_get_vector_index(hdev, vector);
4526 	if (vector_id < 0) {
4527 		dev_err(&hdev->pdev->dev,
4528 			"Get vector index fail. vector_id =%d\n", vector_id);
4529 		return vector_id;
4530 	}
4531 
4532 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4533 }
4534 
4535 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4536 				       struct hnae3_ring_chain_node *ring_chain)
4537 {
4538 	struct hclge_vport *vport = hclge_get_vport(handle);
4539 	struct hclge_dev *hdev = vport->back;
4540 	int vector_id, ret;
4541 
4542 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4543 		return 0;
4544 
4545 	vector_id = hclge_get_vector_index(hdev, vector);
4546 	if (vector_id < 0) {
4547 		dev_err(&handle->pdev->dev,
4548 			"Get vector index fail. ret =%d\n", vector_id);
4549 		return vector_id;
4550 	}
4551 
4552 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4553 	if (ret)
4554 		dev_err(&handle->pdev->dev,
4555 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4556 			vector_id, ret);
4557 
4558 	return ret;
4559 }
4560 
4561 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4562 			       struct hclge_promisc_param *param)
4563 {
4564 	struct hclge_promisc_cfg_cmd *req;
4565 	struct hclge_desc desc;
4566 	int ret;
4567 
4568 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4569 
4570 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
4571 	req->vf_id = param->vf_id;
4572 
4573 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4574 	 * pdev revision(0x20), new revision support them. The
4575 	 * value of this two fields will not return error when driver
4576 	 * send command to fireware in revision(0x20).
4577 	 */
4578 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4579 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4580 
4581 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4582 	if (ret)
4583 		dev_err(&hdev->pdev->dev,
4584 			"Set promisc mode fail, status is %d.\n", ret);
4585 
4586 	return ret;
4587 }
4588 
4589 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4590 			      bool en_mc, bool en_bc, int vport_id)
4591 {
4592 	if (!param)
4593 		return;
4594 
4595 	memset(param, 0, sizeof(struct hclge_promisc_param));
4596 	if (en_uc)
4597 		param->enable = HCLGE_PROMISC_EN_UC;
4598 	if (en_mc)
4599 		param->enable |= HCLGE_PROMISC_EN_MC;
4600 	if (en_bc)
4601 		param->enable |= HCLGE_PROMISC_EN_BC;
4602 	param->vf_id = vport_id;
4603 }
4604 
4605 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4606 				  bool en_mc_pmc)
4607 {
4608 	struct hclge_vport *vport = hclge_get_vport(handle);
4609 	struct hclge_dev *hdev = vport->back;
4610 	struct hclge_promisc_param param;
4611 	bool en_bc_pmc = true;
4612 
4613 	/* For revision 0x20, if broadcast promisc enabled, vlan filter is
4614 	 * always bypassed. So broadcast promisc should be disabled until
4615 	 * user enable promisc mode
4616 	 */
4617 	if (handle->pdev->revision == 0x20)
4618 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4619 
4620 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4621 				 vport->vport_id);
4622 	return hclge_cmd_set_promisc_mode(hdev, &param);
4623 }
4624 
4625 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4626 {
4627 	struct hclge_get_fd_mode_cmd *req;
4628 	struct hclge_desc desc;
4629 	int ret;
4630 
4631 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4632 
4633 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
4634 
4635 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4636 	if (ret) {
4637 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4638 		return ret;
4639 	}
4640 
4641 	*fd_mode = req->mode;
4642 
4643 	return ret;
4644 }
4645 
4646 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4647 				   u32 *stage1_entry_num,
4648 				   u32 *stage2_entry_num,
4649 				   u16 *stage1_counter_num,
4650 				   u16 *stage2_counter_num)
4651 {
4652 	struct hclge_get_fd_allocation_cmd *req;
4653 	struct hclge_desc desc;
4654 	int ret;
4655 
4656 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4657 
4658 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4659 
4660 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4661 	if (ret) {
4662 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4663 			ret);
4664 		return ret;
4665 	}
4666 
4667 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4668 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4669 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4670 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4671 
4672 	return ret;
4673 }
4674 
4675 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4676 {
4677 	struct hclge_set_fd_key_config_cmd *req;
4678 	struct hclge_fd_key_cfg *stage;
4679 	struct hclge_desc desc;
4680 	int ret;
4681 
4682 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4683 
4684 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4685 	stage = &hdev->fd_cfg.key_cfg[stage_num];
4686 	req->stage = stage_num;
4687 	req->key_select = stage->key_sel;
4688 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4689 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4690 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4691 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4692 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4693 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4694 
4695 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4696 	if (ret)
4697 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4698 
4699 	return ret;
4700 }
4701 
4702 static int hclge_init_fd_config(struct hclge_dev *hdev)
4703 {
4704 #define LOW_2_WORDS		0x03
4705 	struct hclge_fd_key_cfg *key_cfg;
4706 	int ret;
4707 
4708 	if (!hnae3_dev_fd_supported(hdev))
4709 		return 0;
4710 
4711 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4712 	if (ret)
4713 		return ret;
4714 
4715 	switch (hdev->fd_cfg.fd_mode) {
4716 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4717 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4718 		break;
4719 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4720 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4721 		break;
4722 	default:
4723 		dev_err(&hdev->pdev->dev,
4724 			"Unsupported flow director mode %d\n",
4725 			hdev->fd_cfg.fd_mode);
4726 		return -EOPNOTSUPP;
4727 	}
4728 
4729 	hdev->fd_cfg.proto_support =
4730 		TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4731 		UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4732 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4733 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4734 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4735 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4736 	key_cfg->outer_sipv6_word_en = 0;
4737 	key_cfg->outer_dipv6_word_en = 0;
4738 
4739 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4740 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4741 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4742 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4743 
4744 	/* If use max 400bit key, we can support tuples for ether type */
4745 	if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4746 		hdev->fd_cfg.proto_support |= ETHER_FLOW;
4747 		key_cfg->tuple_active |=
4748 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4749 	}
4750 
4751 	/* roce_type is used to filter roce frames
4752 	 * dst_vport is used to specify the rule
4753 	 */
4754 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4755 
4756 	ret = hclge_get_fd_allocation(hdev,
4757 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4758 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4759 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4760 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4761 	if (ret)
4762 		return ret;
4763 
4764 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4765 }
4766 
4767 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4768 				int loc, u8 *key, bool is_add)
4769 {
4770 	struct hclge_fd_tcam_config_1_cmd *req1;
4771 	struct hclge_fd_tcam_config_2_cmd *req2;
4772 	struct hclge_fd_tcam_config_3_cmd *req3;
4773 	struct hclge_desc desc[3];
4774 	int ret;
4775 
4776 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4777 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4778 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4779 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4780 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4781 
4782 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4783 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4784 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4785 
4786 	req1->stage = stage;
4787 	req1->xy_sel = sel_x ? 1 : 0;
4788 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4789 	req1->index = cpu_to_le32(loc);
4790 	req1->entry_vld = sel_x ? is_add : 0;
4791 
4792 	if (key) {
4793 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4794 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4795 		       sizeof(req2->tcam_data));
4796 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4797 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4798 	}
4799 
4800 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
4801 	if (ret)
4802 		dev_err(&hdev->pdev->dev,
4803 			"config tcam key fail, ret=%d\n",
4804 			ret);
4805 
4806 	return ret;
4807 }
4808 
4809 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4810 			      struct hclge_fd_ad_data *action)
4811 {
4812 	struct hclge_fd_ad_config_cmd *req;
4813 	struct hclge_desc desc;
4814 	u64 ad_data = 0;
4815 	int ret;
4816 
4817 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4818 
4819 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
4820 	req->index = cpu_to_le32(loc);
4821 	req->stage = stage;
4822 
4823 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4824 		      action->write_rule_id_to_bd);
4825 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4826 			action->rule_id);
4827 	ad_data <<= 32;
4828 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4829 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4830 		      action->forward_to_direct_queue);
4831 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4832 			action->queue_id);
4833 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4834 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4835 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4836 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4837 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4838 			action->counter_id);
4839 
4840 	req->ad_data = cpu_to_le64(ad_data);
4841 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4842 	if (ret)
4843 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4844 
4845 	return ret;
4846 }
4847 
4848 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4849 				   struct hclge_fd_rule *rule)
4850 {
4851 	u16 tmp_x_s, tmp_y_s;
4852 	u32 tmp_x_l, tmp_y_l;
4853 	int i;
4854 
4855 	if (rule->unused_tuple & tuple_bit)
4856 		return true;
4857 
4858 	switch (tuple_bit) {
4859 	case 0:
4860 		return false;
4861 	case BIT(INNER_DST_MAC):
4862 		for (i = 0; i < ETH_ALEN; i++) {
4863 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4864 			       rule->tuples_mask.dst_mac[i]);
4865 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4866 			       rule->tuples_mask.dst_mac[i]);
4867 		}
4868 
4869 		return true;
4870 	case BIT(INNER_SRC_MAC):
4871 		for (i = 0; i < ETH_ALEN; i++) {
4872 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4873 			       rule->tuples.src_mac[i]);
4874 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4875 			       rule->tuples.src_mac[i]);
4876 		}
4877 
4878 		return true;
4879 	case BIT(INNER_VLAN_TAG_FST):
4880 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4881 		       rule->tuples_mask.vlan_tag1);
4882 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4883 		       rule->tuples_mask.vlan_tag1);
4884 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4885 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4886 
4887 		return true;
4888 	case BIT(INNER_ETH_TYPE):
4889 		calc_x(tmp_x_s, rule->tuples.ether_proto,
4890 		       rule->tuples_mask.ether_proto);
4891 		calc_y(tmp_y_s, rule->tuples.ether_proto,
4892 		       rule->tuples_mask.ether_proto);
4893 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4894 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4895 
4896 		return true;
4897 	case BIT(INNER_IP_TOS):
4898 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4899 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4900 
4901 		return true;
4902 	case BIT(INNER_IP_PROTO):
4903 		calc_x(*key_x, rule->tuples.ip_proto,
4904 		       rule->tuples_mask.ip_proto);
4905 		calc_y(*key_y, rule->tuples.ip_proto,
4906 		       rule->tuples_mask.ip_proto);
4907 
4908 		return true;
4909 	case BIT(INNER_SRC_IP):
4910 		calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4911 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
4912 		calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4913 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
4914 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4915 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4916 
4917 		return true;
4918 	case BIT(INNER_DST_IP):
4919 		calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4920 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
4921 		calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4922 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
4923 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4924 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4925 
4926 		return true;
4927 	case BIT(INNER_SRC_PORT):
4928 		calc_x(tmp_x_s, rule->tuples.src_port,
4929 		       rule->tuples_mask.src_port);
4930 		calc_y(tmp_y_s, rule->tuples.src_port,
4931 		       rule->tuples_mask.src_port);
4932 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4933 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4934 
4935 		return true;
4936 	case BIT(INNER_DST_PORT):
4937 		calc_x(tmp_x_s, rule->tuples.dst_port,
4938 		       rule->tuples_mask.dst_port);
4939 		calc_y(tmp_y_s, rule->tuples.dst_port,
4940 		       rule->tuples_mask.dst_port);
4941 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4942 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4943 
4944 		return true;
4945 	default:
4946 		return false;
4947 	}
4948 }
4949 
4950 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4951 				 u8 vf_id, u8 network_port_id)
4952 {
4953 	u32 port_number = 0;
4954 
4955 	if (port_type == HOST_PORT) {
4956 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4957 				pf_id);
4958 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4959 				vf_id);
4960 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4961 	} else {
4962 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4963 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
4964 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4965 	}
4966 
4967 	return port_number;
4968 }
4969 
4970 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4971 				       __le32 *key_x, __le32 *key_y,
4972 				       struct hclge_fd_rule *rule)
4973 {
4974 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4975 	u8 cur_pos = 0, tuple_size, shift_bits;
4976 	unsigned int i;
4977 
4978 	for (i = 0; i < MAX_META_DATA; i++) {
4979 		tuple_size = meta_data_key_info[i].key_length;
4980 		tuple_bit = key_cfg->meta_data_active & BIT(i);
4981 
4982 		switch (tuple_bit) {
4983 		case BIT(ROCE_TYPE):
4984 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4985 			cur_pos += tuple_size;
4986 			break;
4987 		case BIT(DST_VPORT):
4988 			port_number = hclge_get_port_number(HOST_PORT, 0,
4989 							    rule->vf_id, 0);
4990 			hnae3_set_field(meta_data,
4991 					GENMASK(cur_pos + tuple_size, cur_pos),
4992 					cur_pos, port_number);
4993 			cur_pos += tuple_size;
4994 			break;
4995 		default:
4996 			break;
4997 		}
4998 	}
4999 
5000 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5001 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5002 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5003 
5004 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5005 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5006 }
5007 
5008 /* A complete key is combined with meta data key and tuple key.
5009  * Meta data key is stored at the MSB region, and tuple key is stored at
5010  * the LSB region, unused bits will be filled 0.
5011  */
5012 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5013 			    struct hclge_fd_rule *rule)
5014 {
5015 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5016 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5017 	u8 *cur_key_x, *cur_key_y;
5018 	unsigned int i;
5019 	int ret, tuple_size;
5020 	u8 meta_data_region;
5021 
5022 	memset(key_x, 0, sizeof(key_x));
5023 	memset(key_y, 0, sizeof(key_y));
5024 	cur_key_x = key_x;
5025 	cur_key_y = key_y;
5026 
5027 	for (i = 0 ; i < MAX_TUPLE; i++) {
5028 		bool tuple_valid;
5029 		u32 check_tuple;
5030 
5031 		tuple_size = tuple_key_info[i].key_length / 8;
5032 		check_tuple = key_cfg->tuple_active & BIT(i);
5033 
5034 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5035 						     cur_key_y, rule);
5036 		if (tuple_valid) {
5037 			cur_key_x += tuple_size;
5038 			cur_key_y += tuple_size;
5039 		}
5040 	}
5041 
5042 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5043 			MAX_META_DATA_LENGTH / 8;
5044 
5045 	hclge_fd_convert_meta_data(key_cfg,
5046 				   (__le32 *)(key_x + meta_data_region),
5047 				   (__le32 *)(key_y + meta_data_region),
5048 				   rule);
5049 
5050 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5051 				   true);
5052 	if (ret) {
5053 		dev_err(&hdev->pdev->dev,
5054 			"fd key_y config fail, loc=%d, ret=%d\n",
5055 			rule->queue_id, ret);
5056 		return ret;
5057 	}
5058 
5059 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5060 				   true);
5061 	if (ret)
5062 		dev_err(&hdev->pdev->dev,
5063 			"fd key_x config fail, loc=%d, ret=%d\n",
5064 			rule->queue_id, ret);
5065 	return ret;
5066 }
5067 
5068 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5069 			       struct hclge_fd_rule *rule)
5070 {
5071 	struct hclge_fd_ad_data ad_data;
5072 
5073 	ad_data.ad_id = rule->location;
5074 
5075 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5076 		ad_data.drop_packet = true;
5077 		ad_data.forward_to_direct_queue = false;
5078 		ad_data.queue_id = 0;
5079 	} else {
5080 		ad_data.drop_packet = false;
5081 		ad_data.forward_to_direct_queue = true;
5082 		ad_data.queue_id = rule->queue_id;
5083 	}
5084 
5085 	ad_data.use_counter = false;
5086 	ad_data.counter_id = 0;
5087 
5088 	ad_data.use_next_stage = false;
5089 	ad_data.next_input_key = 0;
5090 
5091 	ad_data.write_rule_id_to_bd = true;
5092 	ad_data.rule_id = rule->location;
5093 
5094 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5095 }
5096 
5097 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5098 			       struct ethtool_rx_flow_spec *fs, u32 *unused)
5099 {
5100 	struct ethtool_tcpip4_spec *tcp_ip4_spec;
5101 	struct ethtool_usrip4_spec *usr_ip4_spec;
5102 	struct ethtool_tcpip6_spec *tcp_ip6_spec;
5103 	struct ethtool_usrip6_spec *usr_ip6_spec;
5104 	struct ethhdr *ether_spec;
5105 
5106 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5107 		return -EINVAL;
5108 
5109 	if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5110 		return -EOPNOTSUPP;
5111 
5112 	if ((fs->flow_type & FLOW_EXT) &&
5113 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5114 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5115 		return -EOPNOTSUPP;
5116 	}
5117 
5118 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5119 	case SCTP_V4_FLOW:
5120 	case TCP_V4_FLOW:
5121 	case UDP_V4_FLOW:
5122 		tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5123 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5124 
5125 		if (!tcp_ip4_spec->ip4src)
5126 			*unused |= BIT(INNER_SRC_IP);
5127 
5128 		if (!tcp_ip4_spec->ip4dst)
5129 			*unused |= BIT(INNER_DST_IP);
5130 
5131 		if (!tcp_ip4_spec->psrc)
5132 			*unused |= BIT(INNER_SRC_PORT);
5133 
5134 		if (!tcp_ip4_spec->pdst)
5135 			*unused |= BIT(INNER_DST_PORT);
5136 
5137 		if (!tcp_ip4_spec->tos)
5138 			*unused |= BIT(INNER_IP_TOS);
5139 
5140 		break;
5141 	case IP_USER_FLOW:
5142 		usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5143 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5144 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5145 
5146 		if (!usr_ip4_spec->ip4src)
5147 			*unused |= BIT(INNER_SRC_IP);
5148 
5149 		if (!usr_ip4_spec->ip4dst)
5150 			*unused |= BIT(INNER_DST_IP);
5151 
5152 		if (!usr_ip4_spec->tos)
5153 			*unused |= BIT(INNER_IP_TOS);
5154 
5155 		if (!usr_ip4_spec->proto)
5156 			*unused |= BIT(INNER_IP_PROTO);
5157 
5158 		if (usr_ip4_spec->l4_4_bytes)
5159 			return -EOPNOTSUPP;
5160 
5161 		if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5162 			return -EOPNOTSUPP;
5163 
5164 		break;
5165 	case SCTP_V6_FLOW:
5166 	case TCP_V6_FLOW:
5167 	case UDP_V6_FLOW:
5168 		tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5169 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5170 			BIT(INNER_IP_TOS);
5171 
5172 		/* check whether src/dst ip address used */
5173 		if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5174 		    !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5175 			*unused |= BIT(INNER_SRC_IP);
5176 
5177 		if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5178 		    !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5179 			*unused |= BIT(INNER_DST_IP);
5180 
5181 		if (!tcp_ip6_spec->psrc)
5182 			*unused |= BIT(INNER_SRC_PORT);
5183 
5184 		if (!tcp_ip6_spec->pdst)
5185 			*unused |= BIT(INNER_DST_PORT);
5186 
5187 		if (tcp_ip6_spec->tclass)
5188 			return -EOPNOTSUPP;
5189 
5190 		break;
5191 	case IPV6_USER_FLOW:
5192 		usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5193 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5194 			BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5195 			BIT(INNER_DST_PORT);
5196 
5197 		/* check whether src/dst ip address used */
5198 		if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5199 		    !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5200 			*unused |= BIT(INNER_SRC_IP);
5201 
5202 		if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5203 		    !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5204 			*unused |= BIT(INNER_DST_IP);
5205 
5206 		if (!usr_ip6_spec->l4_proto)
5207 			*unused |= BIT(INNER_IP_PROTO);
5208 
5209 		if (usr_ip6_spec->tclass)
5210 			return -EOPNOTSUPP;
5211 
5212 		if (usr_ip6_spec->l4_4_bytes)
5213 			return -EOPNOTSUPP;
5214 
5215 		break;
5216 	case ETHER_FLOW:
5217 		ether_spec = &fs->h_u.ether_spec;
5218 		*unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5219 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5220 			BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5221 
5222 		if (is_zero_ether_addr(ether_spec->h_source))
5223 			*unused |= BIT(INNER_SRC_MAC);
5224 
5225 		if (is_zero_ether_addr(ether_spec->h_dest))
5226 			*unused |= BIT(INNER_DST_MAC);
5227 
5228 		if (!ether_spec->h_proto)
5229 			*unused |= BIT(INNER_ETH_TYPE);
5230 
5231 		break;
5232 	default:
5233 		return -EOPNOTSUPP;
5234 	}
5235 
5236 	if ((fs->flow_type & FLOW_EXT)) {
5237 		if (fs->h_ext.vlan_etype)
5238 			return -EOPNOTSUPP;
5239 		if (!fs->h_ext.vlan_tci)
5240 			*unused |= BIT(INNER_VLAN_TAG_FST);
5241 
5242 		if (fs->m_ext.vlan_tci) {
5243 			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5244 				return -EINVAL;
5245 		}
5246 	} else {
5247 		*unused |= BIT(INNER_VLAN_TAG_FST);
5248 	}
5249 
5250 	if (fs->flow_type & FLOW_MAC_EXT) {
5251 		if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5252 			return -EOPNOTSUPP;
5253 
5254 		if (is_zero_ether_addr(fs->h_ext.h_dest))
5255 			*unused |= BIT(INNER_DST_MAC);
5256 		else
5257 			*unused &= ~(BIT(INNER_DST_MAC));
5258 	}
5259 
5260 	return 0;
5261 }
5262 
5263 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5264 {
5265 	struct hclge_fd_rule *rule = NULL;
5266 	struct hlist_node *node2;
5267 
5268 	spin_lock_bh(&hdev->fd_rule_lock);
5269 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5270 		if (rule->location >= location)
5271 			break;
5272 	}
5273 
5274 	spin_unlock_bh(&hdev->fd_rule_lock);
5275 
5276 	return  rule && rule->location == location;
5277 }
5278 
5279 /* make sure being called after lock up with fd_rule_lock */
5280 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5281 				     struct hclge_fd_rule *new_rule,
5282 				     u16 location,
5283 				     bool is_add)
5284 {
5285 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
5286 	struct hlist_node *node2;
5287 
5288 	if (is_add && !new_rule)
5289 		return -EINVAL;
5290 
5291 	hlist_for_each_entry_safe(rule, node2,
5292 				  &hdev->fd_rule_list, rule_node) {
5293 		if (rule->location >= location)
5294 			break;
5295 		parent = rule;
5296 	}
5297 
5298 	if (rule && rule->location == location) {
5299 		hlist_del(&rule->rule_node);
5300 		kfree(rule);
5301 		hdev->hclge_fd_rule_num--;
5302 
5303 		if (!is_add) {
5304 			if (!hdev->hclge_fd_rule_num)
5305 				hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5306 			clear_bit(location, hdev->fd_bmap);
5307 
5308 			return 0;
5309 		}
5310 	} else if (!is_add) {
5311 		dev_err(&hdev->pdev->dev,
5312 			"delete fail, rule %d is inexistent\n",
5313 			location);
5314 		return -EINVAL;
5315 	}
5316 
5317 	INIT_HLIST_NODE(&new_rule->rule_node);
5318 
5319 	if (parent)
5320 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5321 	else
5322 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5323 
5324 	set_bit(location, hdev->fd_bmap);
5325 	hdev->hclge_fd_rule_num++;
5326 	hdev->fd_active_type = new_rule->rule_type;
5327 
5328 	return 0;
5329 }
5330 
5331 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5332 			      struct ethtool_rx_flow_spec *fs,
5333 			      struct hclge_fd_rule *rule)
5334 {
5335 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5336 
5337 	switch (flow_type) {
5338 	case SCTP_V4_FLOW:
5339 	case TCP_V4_FLOW:
5340 	case UDP_V4_FLOW:
5341 		rule->tuples.src_ip[IPV4_INDEX] =
5342 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5343 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5344 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5345 
5346 		rule->tuples.dst_ip[IPV4_INDEX] =
5347 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5348 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5349 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5350 
5351 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5352 		rule->tuples_mask.src_port =
5353 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5354 
5355 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5356 		rule->tuples_mask.dst_port =
5357 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5358 
5359 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5360 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5361 
5362 		rule->tuples.ether_proto = ETH_P_IP;
5363 		rule->tuples_mask.ether_proto = 0xFFFF;
5364 
5365 		break;
5366 	case IP_USER_FLOW:
5367 		rule->tuples.src_ip[IPV4_INDEX] =
5368 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5369 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5370 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5371 
5372 		rule->tuples.dst_ip[IPV4_INDEX] =
5373 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5374 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5375 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5376 
5377 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5378 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5379 
5380 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5381 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5382 
5383 		rule->tuples.ether_proto = ETH_P_IP;
5384 		rule->tuples_mask.ether_proto = 0xFFFF;
5385 
5386 		break;
5387 	case SCTP_V6_FLOW:
5388 	case TCP_V6_FLOW:
5389 	case UDP_V6_FLOW:
5390 		be32_to_cpu_array(rule->tuples.src_ip,
5391 				  fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5392 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5393 				  fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5394 
5395 		be32_to_cpu_array(rule->tuples.dst_ip,
5396 				  fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5397 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5398 				  fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5399 
5400 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5401 		rule->tuples_mask.src_port =
5402 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5403 
5404 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5405 		rule->tuples_mask.dst_port =
5406 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5407 
5408 		rule->tuples.ether_proto = ETH_P_IPV6;
5409 		rule->tuples_mask.ether_proto = 0xFFFF;
5410 
5411 		break;
5412 	case IPV6_USER_FLOW:
5413 		be32_to_cpu_array(rule->tuples.src_ip,
5414 				  fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5415 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5416 				  fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5417 
5418 		be32_to_cpu_array(rule->tuples.dst_ip,
5419 				  fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5420 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5421 				  fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5422 
5423 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5424 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5425 
5426 		rule->tuples.ether_proto = ETH_P_IPV6;
5427 		rule->tuples_mask.ether_proto = 0xFFFF;
5428 
5429 		break;
5430 	case ETHER_FLOW:
5431 		ether_addr_copy(rule->tuples.src_mac,
5432 				fs->h_u.ether_spec.h_source);
5433 		ether_addr_copy(rule->tuples_mask.src_mac,
5434 				fs->m_u.ether_spec.h_source);
5435 
5436 		ether_addr_copy(rule->tuples.dst_mac,
5437 				fs->h_u.ether_spec.h_dest);
5438 		ether_addr_copy(rule->tuples_mask.dst_mac,
5439 				fs->m_u.ether_spec.h_dest);
5440 
5441 		rule->tuples.ether_proto =
5442 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
5443 		rule->tuples_mask.ether_proto =
5444 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
5445 
5446 		break;
5447 	default:
5448 		return -EOPNOTSUPP;
5449 	}
5450 
5451 	switch (flow_type) {
5452 	case SCTP_V4_FLOW:
5453 	case SCTP_V6_FLOW:
5454 		rule->tuples.ip_proto = IPPROTO_SCTP;
5455 		rule->tuples_mask.ip_proto = 0xFF;
5456 		break;
5457 	case TCP_V4_FLOW:
5458 	case TCP_V6_FLOW:
5459 		rule->tuples.ip_proto = IPPROTO_TCP;
5460 		rule->tuples_mask.ip_proto = 0xFF;
5461 		break;
5462 	case UDP_V4_FLOW:
5463 	case UDP_V6_FLOW:
5464 		rule->tuples.ip_proto = IPPROTO_UDP;
5465 		rule->tuples_mask.ip_proto = 0xFF;
5466 		break;
5467 	default:
5468 		break;
5469 	}
5470 
5471 	if ((fs->flow_type & FLOW_EXT)) {
5472 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5473 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5474 	}
5475 
5476 	if (fs->flow_type & FLOW_MAC_EXT) {
5477 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5478 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5479 	}
5480 
5481 	return 0;
5482 }
5483 
5484 /* make sure being called after lock up with fd_rule_lock */
5485 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5486 				struct hclge_fd_rule *rule)
5487 {
5488 	int ret;
5489 
5490 	if (!rule) {
5491 		dev_err(&hdev->pdev->dev,
5492 			"The flow director rule is NULL\n");
5493 		return -EINVAL;
5494 	}
5495 
5496 	/* it will never fail here, so needn't to check return value */
5497 	hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5498 
5499 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5500 	if (ret)
5501 		goto clear_rule;
5502 
5503 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5504 	if (ret)
5505 		goto clear_rule;
5506 
5507 	return 0;
5508 
5509 clear_rule:
5510 	hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5511 	return ret;
5512 }
5513 
5514 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5515 			      struct ethtool_rxnfc *cmd)
5516 {
5517 	struct hclge_vport *vport = hclge_get_vport(handle);
5518 	struct hclge_dev *hdev = vport->back;
5519 	u16 dst_vport_id = 0, q_index = 0;
5520 	struct ethtool_rx_flow_spec *fs;
5521 	struct hclge_fd_rule *rule;
5522 	u32 unused = 0;
5523 	u8 action;
5524 	int ret;
5525 
5526 	if (!hnae3_dev_fd_supported(hdev))
5527 		return -EOPNOTSUPP;
5528 
5529 	if (!hdev->fd_en) {
5530 		dev_warn(&hdev->pdev->dev,
5531 			 "Please enable flow director first\n");
5532 		return -EOPNOTSUPP;
5533 	}
5534 
5535 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5536 
5537 	ret = hclge_fd_check_spec(hdev, fs, &unused);
5538 	if (ret) {
5539 		dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5540 		return ret;
5541 	}
5542 
5543 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5544 		action = HCLGE_FD_ACTION_DROP_PACKET;
5545 	} else {
5546 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5547 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5548 		u16 tqps;
5549 
5550 		if (vf > hdev->num_req_vfs) {
5551 			dev_err(&hdev->pdev->dev,
5552 				"Error: vf id (%d) > max vf num (%d)\n",
5553 				vf, hdev->num_req_vfs);
5554 			return -EINVAL;
5555 		}
5556 
5557 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5558 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5559 
5560 		if (ring >= tqps) {
5561 			dev_err(&hdev->pdev->dev,
5562 				"Error: queue id (%d) > max tqp num (%d)\n",
5563 				ring, tqps - 1);
5564 			return -EINVAL;
5565 		}
5566 
5567 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5568 		q_index = ring;
5569 	}
5570 
5571 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5572 	if (!rule)
5573 		return -ENOMEM;
5574 
5575 	ret = hclge_fd_get_tuple(hdev, fs, rule);
5576 	if (ret) {
5577 		kfree(rule);
5578 		return ret;
5579 	}
5580 
5581 	rule->flow_type = fs->flow_type;
5582 
5583 	rule->location = fs->location;
5584 	rule->unused_tuple = unused;
5585 	rule->vf_id = dst_vport_id;
5586 	rule->queue_id = q_index;
5587 	rule->action = action;
5588 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
5589 
5590 	/* to avoid rule conflict, when user configure rule by ethtool,
5591 	 * we need to clear all arfs rules
5592 	 */
5593 	hclge_clear_arfs_rules(handle);
5594 
5595 	spin_lock_bh(&hdev->fd_rule_lock);
5596 	ret = hclge_fd_config_rule(hdev, rule);
5597 
5598 	spin_unlock_bh(&hdev->fd_rule_lock);
5599 
5600 	return ret;
5601 }
5602 
5603 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5604 			      struct ethtool_rxnfc *cmd)
5605 {
5606 	struct hclge_vport *vport = hclge_get_vport(handle);
5607 	struct hclge_dev *hdev = vport->back;
5608 	struct ethtool_rx_flow_spec *fs;
5609 	int ret;
5610 
5611 	if (!hnae3_dev_fd_supported(hdev))
5612 		return -EOPNOTSUPP;
5613 
5614 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5615 
5616 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5617 		return -EINVAL;
5618 
5619 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
5620 		dev_err(&hdev->pdev->dev,
5621 			"Delete fail, rule %d is inexistent\n", fs->location);
5622 		return -ENOENT;
5623 	}
5624 
5625 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5626 				   NULL, false);
5627 	if (ret)
5628 		return ret;
5629 
5630 	spin_lock_bh(&hdev->fd_rule_lock);
5631 	ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5632 
5633 	spin_unlock_bh(&hdev->fd_rule_lock);
5634 
5635 	return ret;
5636 }
5637 
5638 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5639 				     bool clear_list)
5640 {
5641 	struct hclge_vport *vport = hclge_get_vport(handle);
5642 	struct hclge_dev *hdev = vport->back;
5643 	struct hclge_fd_rule *rule;
5644 	struct hlist_node *node;
5645 	u16 location;
5646 
5647 	if (!hnae3_dev_fd_supported(hdev))
5648 		return;
5649 
5650 	spin_lock_bh(&hdev->fd_rule_lock);
5651 	for_each_set_bit(location, hdev->fd_bmap,
5652 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5653 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5654 				     NULL, false);
5655 
5656 	if (clear_list) {
5657 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5658 					  rule_node) {
5659 			hlist_del(&rule->rule_node);
5660 			kfree(rule);
5661 		}
5662 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5663 		hdev->hclge_fd_rule_num = 0;
5664 		bitmap_zero(hdev->fd_bmap,
5665 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5666 	}
5667 
5668 	spin_unlock_bh(&hdev->fd_rule_lock);
5669 }
5670 
5671 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5672 {
5673 	struct hclge_vport *vport = hclge_get_vport(handle);
5674 	struct hclge_dev *hdev = vport->back;
5675 	struct hclge_fd_rule *rule;
5676 	struct hlist_node *node;
5677 	int ret;
5678 
5679 	/* Return ok here, because reset error handling will check this
5680 	 * return value. If error is returned here, the reset process will
5681 	 * fail.
5682 	 */
5683 	if (!hnae3_dev_fd_supported(hdev))
5684 		return 0;
5685 
5686 	/* if fd is disabled, should not restore it when reset */
5687 	if (!hdev->fd_en)
5688 		return 0;
5689 
5690 	spin_lock_bh(&hdev->fd_rule_lock);
5691 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5692 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5693 		if (!ret)
5694 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5695 
5696 		if (ret) {
5697 			dev_warn(&hdev->pdev->dev,
5698 				 "Restore rule %d failed, remove it\n",
5699 				 rule->location);
5700 			clear_bit(rule->location, hdev->fd_bmap);
5701 			hlist_del(&rule->rule_node);
5702 			kfree(rule);
5703 			hdev->hclge_fd_rule_num--;
5704 		}
5705 	}
5706 
5707 	if (hdev->hclge_fd_rule_num)
5708 		hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5709 
5710 	spin_unlock_bh(&hdev->fd_rule_lock);
5711 
5712 	return 0;
5713 }
5714 
5715 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5716 				 struct ethtool_rxnfc *cmd)
5717 {
5718 	struct hclge_vport *vport = hclge_get_vport(handle);
5719 	struct hclge_dev *hdev = vport->back;
5720 
5721 	if (!hnae3_dev_fd_supported(hdev))
5722 		return -EOPNOTSUPP;
5723 
5724 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
5725 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5726 
5727 	return 0;
5728 }
5729 
5730 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5731 				  struct ethtool_rxnfc *cmd)
5732 {
5733 	struct hclge_vport *vport = hclge_get_vport(handle);
5734 	struct hclge_fd_rule *rule = NULL;
5735 	struct hclge_dev *hdev = vport->back;
5736 	struct ethtool_rx_flow_spec *fs;
5737 	struct hlist_node *node2;
5738 
5739 	if (!hnae3_dev_fd_supported(hdev))
5740 		return -EOPNOTSUPP;
5741 
5742 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5743 
5744 	spin_lock_bh(&hdev->fd_rule_lock);
5745 
5746 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5747 		if (rule->location >= fs->location)
5748 			break;
5749 	}
5750 
5751 	if (!rule || fs->location != rule->location) {
5752 		spin_unlock_bh(&hdev->fd_rule_lock);
5753 
5754 		return -ENOENT;
5755 	}
5756 
5757 	fs->flow_type = rule->flow_type;
5758 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5759 	case SCTP_V4_FLOW:
5760 	case TCP_V4_FLOW:
5761 	case UDP_V4_FLOW:
5762 		fs->h_u.tcp_ip4_spec.ip4src =
5763 				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5764 		fs->m_u.tcp_ip4_spec.ip4src =
5765 			rule->unused_tuple & BIT(INNER_SRC_IP) ?
5766 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5767 
5768 		fs->h_u.tcp_ip4_spec.ip4dst =
5769 				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5770 		fs->m_u.tcp_ip4_spec.ip4dst =
5771 			rule->unused_tuple & BIT(INNER_DST_IP) ?
5772 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5773 
5774 		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5775 		fs->m_u.tcp_ip4_spec.psrc =
5776 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5777 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5778 
5779 		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5780 		fs->m_u.tcp_ip4_spec.pdst =
5781 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5782 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5783 
5784 		fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5785 		fs->m_u.tcp_ip4_spec.tos =
5786 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5787 				0 : rule->tuples_mask.ip_tos;
5788 
5789 		break;
5790 	case IP_USER_FLOW:
5791 		fs->h_u.usr_ip4_spec.ip4src =
5792 				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5793 		fs->m_u.tcp_ip4_spec.ip4src =
5794 			rule->unused_tuple & BIT(INNER_SRC_IP) ?
5795 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5796 
5797 		fs->h_u.usr_ip4_spec.ip4dst =
5798 				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5799 		fs->m_u.usr_ip4_spec.ip4dst =
5800 			rule->unused_tuple & BIT(INNER_DST_IP) ?
5801 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5802 
5803 		fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5804 		fs->m_u.usr_ip4_spec.tos =
5805 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5806 				0 : rule->tuples_mask.ip_tos;
5807 
5808 		fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5809 		fs->m_u.usr_ip4_spec.proto =
5810 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5811 				0 : rule->tuples_mask.ip_proto;
5812 
5813 		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5814 
5815 		break;
5816 	case SCTP_V6_FLOW:
5817 	case TCP_V6_FLOW:
5818 	case UDP_V6_FLOW:
5819 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5820 				  rule->tuples.src_ip, IPV6_SIZE);
5821 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5822 			memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5823 			       sizeof(int) * IPV6_SIZE);
5824 		else
5825 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5826 					  rule->tuples_mask.src_ip, IPV6_SIZE);
5827 
5828 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5829 				  rule->tuples.dst_ip, IPV6_SIZE);
5830 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5831 			memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5832 			       sizeof(int) * IPV6_SIZE);
5833 		else
5834 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5835 					  rule->tuples_mask.dst_ip, IPV6_SIZE);
5836 
5837 		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5838 		fs->m_u.tcp_ip6_spec.psrc =
5839 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5840 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5841 
5842 		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5843 		fs->m_u.tcp_ip6_spec.pdst =
5844 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5845 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5846 
5847 		break;
5848 	case IPV6_USER_FLOW:
5849 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5850 				  rule->tuples.src_ip, IPV6_SIZE);
5851 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5852 			memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5853 			       sizeof(int) * IPV6_SIZE);
5854 		else
5855 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5856 					  rule->tuples_mask.src_ip, IPV6_SIZE);
5857 
5858 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5859 				  rule->tuples.dst_ip, IPV6_SIZE);
5860 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5861 			memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5862 			       sizeof(int) * IPV6_SIZE);
5863 		else
5864 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5865 					  rule->tuples_mask.dst_ip, IPV6_SIZE);
5866 
5867 		fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5868 		fs->m_u.usr_ip6_spec.l4_proto =
5869 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5870 				0 : rule->tuples_mask.ip_proto;
5871 
5872 		break;
5873 	case ETHER_FLOW:
5874 		ether_addr_copy(fs->h_u.ether_spec.h_source,
5875 				rule->tuples.src_mac);
5876 		if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5877 			eth_zero_addr(fs->m_u.ether_spec.h_source);
5878 		else
5879 			ether_addr_copy(fs->m_u.ether_spec.h_source,
5880 					rule->tuples_mask.src_mac);
5881 
5882 		ether_addr_copy(fs->h_u.ether_spec.h_dest,
5883 				rule->tuples.dst_mac);
5884 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5885 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5886 		else
5887 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5888 					rule->tuples_mask.dst_mac);
5889 
5890 		fs->h_u.ether_spec.h_proto =
5891 				cpu_to_be16(rule->tuples.ether_proto);
5892 		fs->m_u.ether_spec.h_proto =
5893 				rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5894 				0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5895 
5896 		break;
5897 	default:
5898 		spin_unlock_bh(&hdev->fd_rule_lock);
5899 		return -EOPNOTSUPP;
5900 	}
5901 
5902 	if (fs->flow_type & FLOW_EXT) {
5903 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5904 		fs->m_ext.vlan_tci =
5905 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5906 				cpu_to_be16(VLAN_VID_MASK) :
5907 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
5908 	}
5909 
5910 	if (fs->flow_type & FLOW_MAC_EXT) {
5911 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5912 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
5913 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
5914 		else
5915 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
5916 					rule->tuples_mask.dst_mac);
5917 	}
5918 
5919 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5920 		fs->ring_cookie = RX_CLS_FLOW_DISC;
5921 	} else {
5922 		u64 vf_id;
5923 
5924 		fs->ring_cookie = rule->queue_id;
5925 		vf_id = rule->vf_id;
5926 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5927 		fs->ring_cookie |= vf_id;
5928 	}
5929 
5930 	spin_unlock_bh(&hdev->fd_rule_lock);
5931 
5932 	return 0;
5933 }
5934 
5935 static int hclge_get_all_rules(struct hnae3_handle *handle,
5936 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
5937 {
5938 	struct hclge_vport *vport = hclge_get_vport(handle);
5939 	struct hclge_dev *hdev = vport->back;
5940 	struct hclge_fd_rule *rule;
5941 	struct hlist_node *node2;
5942 	int cnt = 0;
5943 
5944 	if (!hnae3_dev_fd_supported(hdev))
5945 		return -EOPNOTSUPP;
5946 
5947 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5948 
5949 	spin_lock_bh(&hdev->fd_rule_lock);
5950 	hlist_for_each_entry_safe(rule, node2,
5951 				  &hdev->fd_rule_list, rule_node) {
5952 		if (cnt == cmd->rule_cnt) {
5953 			spin_unlock_bh(&hdev->fd_rule_lock);
5954 			return -EMSGSIZE;
5955 		}
5956 
5957 		rule_locs[cnt] = rule->location;
5958 		cnt++;
5959 	}
5960 
5961 	spin_unlock_bh(&hdev->fd_rule_lock);
5962 
5963 	cmd->rule_cnt = cnt;
5964 
5965 	return 0;
5966 }
5967 
5968 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5969 				     struct hclge_fd_rule_tuples *tuples)
5970 {
5971 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5972 	tuples->ip_proto = fkeys->basic.ip_proto;
5973 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5974 
5975 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5976 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5977 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5978 	} else {
5979 		memcpy(tuples->src_ip,
5980 		       fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5981 		       sizeof(tuples->src_ip));
5982 		memcpy(tuples->dst_ip,
5983 		       fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5984 		       sizeof(tuples->dst_ip));
5985 	}
5986 }
5987 
5988 /* traverse all rules, check whether an existed rule has the same tuples */
5989 static struct hclge_fd_rule *
5990 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5991 			  const struct hclge_fd_rule_tuples *tuples)
5992 {
5993 	struct hclge_fd_rule *rule = NULL;
5994 	struct hlist_node *node;
5995 
5996 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5997 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5998 			return rule;
5999 	}
6000 
6001 	return NULL;
6002 }
6003 
6004 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6005 				     struct hclge_fd_rule *rule)
6006 {
6007 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6008 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6009 			     BIT(INNER_SRC_PORT);
6010 	rule->action = 0;
6011 	rule->vf_id = 0;
6012 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6013 	if (tuples->ether_proto == ETH_P_IP) {
6014 		if (tuples->ip_proto == IPPROTO_TCP)
6015 			rule->flow_type = TCP_V4_FLOW;
6016 		else
6017 			rule->flow_type = UDP_V4_FLOW;
6018 	} else {
6019 		if (tuples->ip_proto == IPPROTO_TCP)
6020 			rule->flow_type = TCP_V6_FLOW;
6021 		else
6022 			rule->flow_type = UDP_V6_FLOW;
6023 	}
6024 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6025 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6026 }
6027 
6028 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6029 				      u16 flow_id, struct flow_keys *fkeys)
6030 {
6031 	struct hclge_vport *vport = hclge_get_vport(handle);
6032 	struct hclge_fd_rule_tuples new_tuples;
6033 	struct hclge_dev *hdev = vport->back;
6034 	struct hclge_fd_rule *rule;
6035 	u16 tmp_queue_id;
6036 	u16 bit_id;
6037 	int ret;
6038 
6039 	if (!hnae3_dev_fd_supported(hdev))
6040 		return -EOPNOTSUPP;
6041 
6042 	memset(&new_tuples, 0, sizeof(new_tuples));
6043 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6044 
6045 	spin_lock_bh(&hdev->fd_rule_lock);
6046 
6047 	/* when there is already fd rule existed add by user,
6048 	 * arfs should not work
6049 	 */
6050 	if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6051 		spin_unlock_bh(&hdev->fd_rule_lock);
6052 
6053 		return -EOPNOTSUPP;
6054 	}
6055 
6056 	/* check is there flow director filter existed for this flow,
6057 	 * if not, create a new filter for it;
6058 	 * if filter exist with different queue id, modify the filter;
6059 	 * if filter exist with same queue id, do nothing
6060 	 */
6061 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6062 	if (!rule) {
6063 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6064 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6065 			spin_unlock_bh(&hdev->fd_rule_lock);
6066 
6067 			return -ENOSPC;
6068 		}
6069 
6070 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6071 		if (!rule) {
6072 			spin_unlock_bh(&hdev->fd_rule_lock);
6073 
6074 			return -ENOMEM;
6075 		}
6076 
6077 		set_bit(bit_id, hdev->fd_bmap);
6078 		rule->location = bit_id;
6079 		rule->flow_id = flow_id;
6080 		rule->queue_id = queue_id;
6081 		hclge_fd_build_arfs_rule(&new_tuples, rule);
6082 		ret = hclge_fd_config_rule(hdev, rule);
6083 
6084 		spin_unlock_bh(&hdev->fd_rule_lock);
6085 
6086 		if (ret)
6087 			return ret;
6088 
6089 		return rule->location;
6090 	}
6091 
6092 	spin_unlock_bh(&hdev->fd_rule_lock);
6093 
6094 	if (rule->queue_id == queue_id)
6095 		return rule->location;
6096 
6097 	tmp_queue_id = rule->queue_id;
6098 	rule->queue_id = queue_id;
6099 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6100 	if (ret) {
6101 		rule->queue_id = tmp_queue_id;
6102 		return ret;
6103 	}
6104 
6105 	return rule->location;
6106 }
6107 
6108 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6109 {
6110 #ifdef CONFIG_RFS_ACCEL
6111 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6112 	struct hclge_fd_rule *rule;
6113 	struct hlist_node *node;
6114 	HLIST_HEAD(del_list);
6115 
6116 	spin_lock_bh(&hdev->fd_rule_lock);
6117 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6118 		spin_unlock_bh(&hdev->fd_rule_lock);
6119 		return;
6120 	}
6121 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6122 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6123 					rule->flow_id, rule->location)) {
6124 			hlist_del_init(&rule->rule_node);
6125 			hlist_add_head(&rule->rule_node, &del_list);
6126 			hdev->hclge_fd_rule_num--;
6127 			clear_bit(rule->location, hdev->fd_bmap);
6128 		}
6129 	}
6130 	spin_unlock_bh(&hdev->fd_rule_lock);
6131 
6132 	hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6133 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6134 				     rule->location, NULL, false);
6135 		kfree(rule);
6136 	}
6137 #endif
6138 }
6139 
6140 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6141 {
6142 #ifdef CONFIG_RFS_ACCEL
6143 	struct hclge_vport *vport = hclge_get_vport(handle);
6144 	struct hclge_dev *hdev = vport->back;
6145 
6146 	if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6147 		hclge_del_all_fd_entries(handle, true);
6148 #endif
6149 }
6150 
6151 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6152 {
6153 	struct hclge_vport *vport = hclge_get_vport(handle);
6154 	struct hclge_dev *hdev = vport->back;
6155 
6156 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6157 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6158 }
6159 
6160 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6161 {
6162 	struct hclge_vport *vport = hclge_get_vport(handle);
6163 	struct hclge_dev *hdev = vport->back;
6164 
6165 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6166 }
6167 
6168 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6169 {
6170 	struct hclge_vport *vport = hclge_get_vport(handle);
6171 	struct hclge_dev *hdev = vport->back;
6172 
6173 	return hdev->rst_stats.hw_reset_done_cnt;
6174 }
6175 
6176 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6177 {
6178 	struct hclge_vport *vport = hclge_get_vport(handle);
6179 	struct hclge_dev *hdev = vport->back;
6180 	bool clear;
6181 
6182 	hdev->fd_en = enable;
6183 	clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6184 	if (!enable)
6185 		hclge_del_all_fd_entries(handle, clear);
6186 	else
6187 		hclge_restore_fd_entries(handle);
6188 }
6189 
6190 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6191 {
6192 	struct hclge_desc desc;
6193 	struct hclge_config_mac_mode_cmd *req =
6194 		(struct hclge_config_mac_mode_cmd *)desc.data;
6195 	u32 loop_en = 0;
6196 	int ret;
6197 
6198 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6199 
6200 	if (enable) {
6201 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6202 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6203 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6204 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6205 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6206 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6207 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6208 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6209 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6210 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6211 	}
6212 
6213 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6214 
6215 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6216 	if (ret)
6217 		dev_err(&hdev->pdev->dev,
6218 			"mac enable fail, ret =%d.\n", ret);
6219 }
6220 
6221 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6222 				     u8 switch_param, u8 param_mask)
6223 {
6224 	struct hclge_mac_vlan_switch_cmd *req;
6225 	struct hclge_desc desc;
6226 	u32 func_id;
6227 	int ret;
6228 
6229 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6230 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6231 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6232 				   false);
6233 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6234 	req->func_id = cpu_to_le32(func_id);
6235 	req->switch_param = switch_param;
6236 	req->param_mask = param_mask;
6237 
6238 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6239 	if (ret)
6240 		dev_err(&hdev->pdev->dev,
6241 			"set mac vlan switch parameter fail, ret = %d\n", ret);
6242 	return ret;
6243 }
6244 
6245 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6246 				       int link_ret)
6247 {
6248 #define HCLGE_PHY_LINK_STATUS_NUM  200
6249 
6250 	struct phy_device *phydev = hdev->hw.mac.phydev;
6251 	int i = 0;
6252 	int ret;
6253 
6254 	do {
6255 		ret = phy_read_status(phydev);
6256 		if (ret) {
6257 			dev_err(&hdev->pdev->dev,
6258 				"phy update link status fail, ret = %d\n", ret);
6259 			return;
6260 		}
6261 
6262 		if (phydev->link == link_ret)
6263 			break;
6264 
6265 		msleep(HCLGE_LINK_STATUS_MS);
6266 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6267 }
6268 
6269 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6270 {
6271 #define HCLGE_MAC_LINK_STATUS_NUM  100
6272 
6273 	int i = 0;
6274 	int ret;
6275 
6276 	do {
6277 		ret = hclge_get_mac_link_status(hdev);
6278 		if (ret < 0)
6279 			return ret;
6280 		else if (ret == link_ret)
6281 			return 0;
6282 
6283 		msleep(HCLGE_LINK_STATUS_MS);
6284 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6285 	return -EBUSY;
6286 }
6287 
6288 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6289 					  bool is_phy)
6290 {
6291 #define HCLGE_LINK_STATUS_DOWN 0
6292 #define HCLGE_LINK_STATUS_UP   1
6293 
6294 	int link_ret;
6295 
6296 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6297 
6298 	if (is_phy)
6299 		hclge_phy_link_status_wait(hdev, link_ret);
6300 
6301 	return hclge_mac_link_status_wait(hdev, link_ret);
6302 }
6303 
6304 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6305 {
6306 	struct hclge_config_mac_mode_cmd *req;
6307 	struct hclge_desc desc;
6308 	u32 loop_en;
6309 	int ret;
6310 
6311 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6312 	/* 1 Read out the MAC mode config at first */
6313 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6314 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6315 	if (ret) {
6316 		dev_err(&hdev->pdev->dev,
6317 			"mac loopback get fail, ret =%d.\n", ret);
6318 		return ret;
6319 	}
6320 
6321 	/* 2 Then setup the loopback flag */
6322 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6323 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6324 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6325 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6326 
6327 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6328 
6329 	/* 3 Config mac work mode with loopback flag
6330 	 * and its original configure parameters
6331 	 */
6332 	hclge_cmd_reuse_desc(&desc, false);
6333 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6334 	if (ret)
6335 		dev_err(&hdev->pdev->dev,
6336 			"mac loopback set fail, ret =%d.\n", ret);
6337 	return ret;
6338 }
6339 
6340 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6341 				     enum hnae3_loop loop_mode)
6342 {
6343 #define HCLGE_SERDES_RETRY_MS	10
6344 #define HCLGE_SERDES_RETRY_NUM	100
6345 
6346 	struct hclge_serdes_lb_cmd *req;
6347 	struct hclge_desc desc;
6348 	int ret, i = 0;
6349 	u8 loop_mode_b;
6350 
6351 	req = (struct hclge_serdes_lb_cmd *)desc.data;
6352 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6353 
6354 	switch (loop_mode) {
6355 	case HNAE3_LOOP_SERIAL_SERDES:
6356 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6357 		break;
6358 	case HNAE3_LOOP_PARALLEL_SERDES:
6359 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6360 		break;
6361 	default:
6362 		dev_err(&hdev->pdev->dev,
6363 			"unsupported serdes loopback mode %d\n", loop_mode);
6364 		return -ENOTSUPP;
6365 	}
6366 
6367 	if (en) {
6368 		req->enable = loop_mode_b;
6369 		req->mask = loop_mode_b;
6370 	} else {
6371 		req->mask = loop_mode_b;
6372 	}
6373 
6374 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6375 	if (ret) {
6376 		dev_err(&hdev->pdev->dev,
6377 			"serdes loopback set fail, ret = %d\n", ret);
6378 		return ret;
6379 	}
6380 
6381 	do {
6382 		msleep(HCLGE_SERDES_RETRY_MS);
6383 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6384 					   true);
6385 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6386 		if (ret) {
6387 			dev_err(&hdev->pdev->dev,
6388 				"serdes loopback get, ret = %d\n", ret);
6389 			return ret;
6390 		}
6391 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
6392 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6393 
6394 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6395 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6396 		return -EBUSY;
6397 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6398 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6399 		return -EIO;
6400 	}
6401 	return ret;
6402 }
6403 
6404 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6405 				     enum hnae3_loop loop_mode)
6406 {
6407 	int ret;
6408 
6409 	ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6410 	if (ret)
6411 		return ret;
6412 
6413 	hclge_cfg_mac_mode(hdev, en);
6414 
6415 	ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
6416 	if (ret)
6417 		dev_err(&hdev->pdev->dev,
6418 			"serdes loopback config mac mode timeout\n");
6419 
6420 	return ret;
6421 }
6422 
6423 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6424 				     struct phy_device *phydev)
6425 {
6426 	int ret;
6427 
6428 	if (!phydev->suspended) {
6429 		ret = phy_suspend(phydev);
6430 		if (ret)
6431 			return ret;
6432 	}
6433 
6434 	ret = phy_resume(phydev);
6435 	if (ret)
6436 		return ret;
6437 
6438 	return phy_loopback(phydev, true);
6439 }
6440 
6441 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6442 				      struct phy_device *phydev)
6443 {
6444 	int ret;
6445 
6446 	ret = phy_loopback(phydev, false);
6447 	if (ret)
6448 		return ret;
6449 
6450 	return phy_suspend(phydev);
6451 }
6452 
6453 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6454 {
6455 	struct phy_device *phydev = hdev->hw.mac.phydev;
6456 	int ret;
6457 
6458 	if (!phydev)
6459 		return -ENOTSUPP;
6460 
6461 	if (en)
6462 		ret = hclge_enable_phy_loopback(hdev, phydev);
6463 	else
6464 		ret = hclge_disable_phy_loopback(hdev, phydev);
6465 	if (ret) {
6466 		dev_err(&hdev->pdev->dev,
6467 			"set phy loopback fail, ret = %d\n", ret);
6468 		return ret;
6469 	}
6470 
6471 	hclge_cfg_mac_mode(hdev, en);
6472 
6473 	ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
6474 	if (ret)
6475 		dev_err(&hdev->pdev->dev,
6476 			"phy loopback config mac mode timeout\n");
6477 
6478 	return ret;
6479 }
6480 
6481 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6482 			    int stream_id, bool enable)
6483 {
6484 	struct hclge_desc desc;
6485 	struct hclge_cfg_com_tqp_queue_cmd *req =
6486 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6487 	int ret;
6488 
6489 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6490 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6491 	req->stream_id = cpu_to_le16(stream_id);
6492 	if (enable)
6493 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6494 
6495 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6496 	if (ret)
6497 		dev_err(&hdev->pdev->dev,
6498 			"Tqp enable fail, status =%d.\n", ret);
6499 	return ret;
6500 }
6501 
6502 static int hclge_set_loopback(struct hnae3_handle *handle,
6503 			      enum hnae3_loop loop_mode, bool en)
6504 {
6505 	struct hclge_vport *vport = hclge_get_vport(handle);
6506 	struct hnae3_knic_private_info *kinfo;
6507 	struct hclge_dev *hdev = vport->back;
6508 	int i, ret;
6509 
6510 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6511 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6512 	 * the same, the packets are looped back in the SSU. If SSU loopback
6513 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6514 	 */
6515 	if (hdev->pdev->revision >= 0x21) {
6516 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6517 
6518 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6519 						HCLGE_SWITCH_ALW_LPBK_MASK);
6520 		if (ret)
6521 			return ret;
6522 	}
6523 
6524 	switch (loop_mode) {
6525 	case HNAE3_LOOP_APP:
6526 		ret = hclge_set_app_loopback(hdev, en);
6527 		break;
6528 	case HNAE3_LOOP_SERIAL_SERDES:
6529 	case HNAE3_LOOP_PARALLEL_SERDES:
6530 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6531 		break;
6532 	case HNAE3_LOOP_PHY:
6533 		ret = hclge_set_phy_loopback(hdev, en);
6534 		break;
6535 	default:
6536 		ret = -ENOTSUPP;
6537 		dev_err(&hdev->pdev->dev,
6538 			"loop_mode %d is not supported\n", loop_mode);
6539 		break;
6540 	}
6541 
6542 	if (ret)
6543 		return ret;
6544 
6545 	kinfo = &vport->nic.kinfo;
6546 	for (i = 0; i < kinfo->num_tqps; i++) {
6547 		ret = hclge_tqp_enable(hdev, i, 0, en);
6548 		if (ret)
6549 			return ret;
6550 	}
6551 
6552 	return 0;
6553 }
6554 
6555 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6556 {
6557 	int ret;
6558 
6559 	ret = hclge_set_app_loopback(hdev, false);
6560 	if (ret)
6561 		return ret;
6562 
6563 	ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6564 	if (ret)
6565 		return ret;
6566 
6567 	return hclge_cfg_serdes_loopback(hdev, false,
6568 					 HNAE3_LOOP_PARALLEL_SERDES);
6569 }
6570 
6571 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6572 {
6573 	struct hclge_vport *vport = hclge_get_vport(handle);
6574 	struct hnae3_knic_private_info *kinfo;
6575 	struct hnae3_queue *queue;
6576 	struct hclge_tqp *tqp;
6577 	int i;
6578 
6579 	kinfo = &vport->nic.kinfo;
6580 	for (i = 0; i < kinfo->num_tqps; i++) {
6581 		queue = handle->kinfo.tqp[i];
6582 		tqp = container_of(queue, struct hclge_tqp, q);
6583 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6584 	}
6585 }
6586 
6587 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6588 {
6589 	struct hclge_vport *vport = hclge_get_vport(handle);
6590 	struct hclge_dev *hdev = vport->back;
6591 
6592 	if (enable) {
6593 		hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6594 	} else {
6595 		/* Set the DOWN flag here to disable the service to be
6596 		 * scheduled again
6597 		 */
6598 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
6599 		cancel_delayed_work_sync(&hdev->service_task);
6600 		clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6601 	}
6602 }
6603 
6604 static int hclge_ae_start(struct hnae3_handle *handle)
6605 {
6606 	struct hclge_vport *vport = hclge_get_vport(handle);
6607 	struct hclge_dev *hdev = vport->back;
6608 
6609 	/* mac enable */
6610 	hclge_cfg_mac_mode(hdev, true);
6611 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6612 	hdev->hw.mac.link = 0;
6613 
6614 	/* reset tqp stats */
6615 	hclge_reset_tqp_stats(handle);
6616 
6617 	hclge_mac_start_phy(hdev);
6618 
6619 	return 0;
6620 }
6621 
6622 static void hclge_ae_stop(struct hnae3_handle *handle)
6623 {
6624 	struct hclge_vport *vport = hclge_get_vport(handle);
6625 	struct hclge_dev *hdev = vport->back;
6626 	int i;
6627 
6628 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
6629 
6630 	hclge_clear_arfs_rules(handle);
6631 
6632 	/* If it is not PF reset, the firmware will disable the MAC,
6633 	 * so it only need to stop phy here.
6634 	 */
6635 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6636 	    hdev->reset_type != HNAE3_FUNC_RESET) {
6637 		hclge_mac_stop_phy(hdev);
6638 		hclge_update_link_status(hdev);
6639 		return;
6640 	}
6641 
6642 	for (i = 0; i < handle->kinfo.num_tqps; i++)
6643 		hclge_reset_tqp(handle, i);
6644 
6645 	hclge_config_mac_tnl_int(hdev, false);
6646 
6647 	/* Mac disable */
6648 	hclge_cfg_mac_mode(hdev, false);
6649 
6650 	hclge_mac_stop_phy(hdev);
6651 
6652 	/* reset tqp stats */
6653 	hclge_reset_tqp_stats(handle);
6654 	hclge_update_link_status(hdev);
6655 }
6656 
6657 int hclge_vport_start(struct hclge_vport *vport)
6658 {
6659 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6660 	vport->last_active_jiffies = jiffies;
6661 	return 0;
6662 }
6663 
6664 void hclge_vport_stop(struct hclge_vport *vport)
6665 {
6666 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6667 }
6668 
6669 static int hclge_client_start(struct hnae3_handle *handle)
6670 {
6671 	struct hclge_vport *vport = hclge_get_vport(handle);
6672 
6673 	return hclge_vport_start(vport);
6674 }
6675 
6676 static void hclge_client_stop(struct hnae3_handle *handle)
6677 {
6678 	struct hclge_vport *vport = hclge_get_vport(handle);
6679 
6680 	hclge_vport_stop(vport);
6681 }
6682 
6683 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6684 					 u16 cmdq_resp, u8  resp_code,
6685 					 enum hclge_mac_vlan_tbl_opcode op)
6686 {
6687 	struct hclge_dev *hdev = vport->back;
6688 
6689 	if (cmdq_resp) {
6690 		dev_err(&hdev->pdev->dev,
6691 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6692 			cmdq_resp);
6693 		return -EIO;
6694 	}
6695 
6696 	if (op == HCLGE_MAC_VLAN_ADD) {
6697 		if ((!resp_code) || (resp_code == 1)) {
6698 			return 0;
6699 		} else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6700 			dev_err(&hdev->pdev->dev,
6701 				"add mac addr failed for uc_overflow.\n");
6702 			return -ENOSPC;
6703 		} else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6704 			dev_err(&hdev->pdev->dev,
6705 				"add mac addr failed for mc_overflow.\n");
6706 			return -ENOSPC;
6707 		}
6708 
6709 		dev_err(&hdev->pdev->dev,
6710 			"add mac addr failed for undefined, code=%u.\n",
6711 			resp_code);
6712 		return -EIO;
6713 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
6714 		if (!resp_code) {
6715 			return 0;
6716 		} else if (resp_code == 1) {
6717 			dev_dbg(&hdev->pdev->dev,
6718 				"remove mac addr failed for miss.\n");
6719 			return -ENOENT;
6720 		}
6721 
6722 		dev_err(&hdev->pdev->dev,
6723 			"remove mac addr failed for undefined, code=%u.\n",
6724 			resp_code);
6725 		return -EIO;
6726 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
6727 		if (!resp_code) {
6728 			return 0;
6729 		} else if (resp_code == 1) {
6730 			dev_dbg(&hdev->pdev->dev,
6731 				"lookup mac addr failed for miss.\n");
6732 			return -ENOENT;
6733 		}
6734 
6735 		dev_err(&hdev->pdev->dev,
6736 			"lookup mac addr failed for undefined, code=%u.\n",
6737 			resp_code);
6738 		return -EIO;
6739 	}
6740 
6741 	dev_err(&hdev->pdev->dev,
6742 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6743 
6744 	return -EINVAL;
6745 }
6746 
6747 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6748 {
6749 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6750 
6751 	unsigned int word_num;
6752 	unsigned int bit_num;
6753 
6754 	if (vfid > 255 || vfid < 0)
6755 		return -EIO;
6756 
6757 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6758 		word_num = vfid / 32;
6759 		bit_num  = vfid % 32;
6760 		if (clr)
6761 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6762 		else
6763 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6764 	} else {
6765 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6766 		bit_num  = vfid % 32;
6767 		if (clr)
6768 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6769 		else
6770 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6771 	}
6772 
6773 	return 0;
6774 }
6775 
6776 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6777 {
6778 #define HCLGE_DESC_NUMBER 3
6779 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6780 	int i, j;
6781 
6782 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6783 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6784 			if (desc[i].data[j])
6785 				return false;
6786 
6787 	return true;
6788 }
6789 
6790 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6791 				   const u8 *addr, bool is_mc)
6792 {
6793 	const unsigned char *mac_addr = addr;
6794 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6795 		       (mac_addr[0]) | (mac_addr[1] << 8);
6796 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6797 
6798 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6799 	if (is_mc) {
6800 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6801 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6802 	}
6803 
6804 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6805 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6806 }
6807 
6808 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6809 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
6810 {
6811 	struct hclge_dev *hdev = vport->back;
6812 	struct hclge_desc desc;
6813 	u8 resp_code;
6814 	u16 retval;
6815 	int ret;
6816 
6817 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6818 
6819 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6820 
6821 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6822 	if (ret) {
6823 		dev_err(&hdev->pdev->dev,
6824 			"del mac addr failed for cmd_send, ret =%d.\n",
6825 			ret);
6826 		return ret;
6827 	}
6828 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6829 	retval = le16_to_cpu(desc.retval);
6830 
6831 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6832 					     HCLGE_MAC_VLAN_REMOVE);
6833 }
6834 
6835 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6836 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
6837 				     struct hclge_desc *desc,
6838 				     bool is_mc)
6839 {
6840 	struct hclge_dev *hdev = vport->back;
6841 	u8 resp_code;
6842 	u16 retval;
6843 	int ret;
6844 
6845 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6846 	if (is_mc) {
6847 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6848 		memcpy(desc[0].data,
6849 		       req,
6850 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6851 		hclge_cmd_setup_basic_desc(&desc[1],
6852 					   HCLGE_OPC_MAC_VLAN_ADD,
6853 					   true);
6854 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6855 		hclge_cmd_setup_basic_desc(&desc[2],
6856 					   HCLGE_OPC_MAC_VLAN_ADD,
6857 					   true);
6858 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
6859 	} else {
6860 		memcpy(desc[0].data,
6861 		       req,
6862 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6863 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
6864 	}
6865 	if (ret) {
6866 		dev_err(&hdev->pdev->dev,
6867 			"lookup mac addr failed for cmd_send, ret =%d.\n",
6868 			ret);
6869 		return ret;
6870 	}
6871 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6872 	retval = le16_to_cpu(desc[0].retval);
6873 
6874 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6875 					     HCLGE_MAC_VLAN_LKUP);
6876 }
6877 
6878 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6879 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
6880 				  struct hclge_desc *mc_desc)
6881 {
6882 	struct hclge_dev *hdev = vport->back;
6883 	int cfg_status;
6884 	u8 resp_code;
6885 	u16 retval;
6886 	int ret;
6887 
6888 	if (!mc_desc) {
6889 		struct hclge_desc desc;
6890 
6891 		hclge_cmd_setup_basic_desc(&desc,
6892 					   HCLGE_OPC_MAC_VLAN_ADD,
6893 					   false);
6894 		memcpy(desc.data, req,
6895 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6896 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6897 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6898 		retval = le16_to_cpu(desc.retval);
6899 
6900 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6901 							   resp_code,
6902 							   HCLGE_MAC_VLAN_ADD);
6903 	} else {
6904 		hclge_cmd_reuse_desc(&mc_desc[0], false);
6905 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6906 		hclge_cmd_reuse_desc(&mc_desc[1], false);
6907 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6908 		hclge_cmd_reuse_desc(&mc_desc[2], false);
6909 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6910 		memcpy(mc_desc[0].data, req,
6911 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6912 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6913 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6914 		retval = le16_to_cpu(mc_desc[0].retval);
6915 
6916 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6917 							   resp_code,
6918 							   HCLGE_MAC_VLAN_ADD);
6919 	}
6920 
6921 	if (ret) {
6922 		dev_err(&hdev->pdev->dev,
6923 			"add mac addr failed for cmd_send, ret =%d.\n",
6924 			ret);
6925 		return ret;
6926 	}
6927 
6928 	return cfg_status;
6929 }
6930 
6931 static int hclge_init_umv_space(struct hclge_dev *hdev)
6932 {
6933 	u16 allocated_size = 0;
6934 	int ret;
6935 
6936 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6937 				  true);
6938 	if (ret)
6939 		return ret;
6940 
6941 	if (allocated_size < hdev->wanted_umv_size)
6942 		dev_warn(&hdev->pdev->dev,
6943 			 "Alloc umv space failed, want %d, get %d\n",
6944 			 hdev->wanted_umv_size, allocated_size);
6945 
6946 	mutex_init(&hdev->umv_mutex);
6947 	hdev->max_umv_size = allocated_size;
6948 	/* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6949 	 * preserve some unicast mac vlan table entries shared by pf
6950 	 * and its vfs.
6951 	 */
6952 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6953 	hdev->share_umv_size = hdev->priv_umv_size +
6954 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
6955 
6956 	return 0;
6957 }
6958 
6959 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6960 {
6961 	int ret;
6962 
6963 	if (hdev->max_umv_size > 0) {
6964 		ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6965 					  false);
6966 		if (ret)
6967 			return ret;
6968 		hdev->max_umv_size = 0;
6969 	}
6970 	mutex_destroy(&hdev->umv_mutex);
6971 
6972 	return 0;
6973 }
6974 
6975 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6976 			       u16 *allocated_size, bool is_alloc)
6977 {
6978 	struct hclge_umv_spc_alc_cmd *req;
6979 	struct hclge_desc desc;
6980 	int ret;
6981 
6982 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6983 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6984 	if (!is_alloc)
6985 		hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
6986 
6987 	req->space_size = cpu_to_le32(space_size);
6988 
6989 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6990 	if (ret) {
6991 		dev_err(&hdev->pdev->dev,
6992 			"%s umv space failed for cmd_send, ret =%d\n",
6993 			is_alloc ? "allocate" : "free", ret);
6994 		return ret;
6995 	}
6996 
6997 	if (is_alloc && allocated_size)
6998 		*allocated_size = le32_to_cpu(desc.data[1]);
6999 
7000 	return 0;
7001 }
7002 
7003 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7004 {
7005 	struct hclge_vport *vport;
7006 	int i;
7007 
7008 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7009 		vport = &hdev->vport[i];
7010 		vport->used_umv_num = 0;
7011 	}
7012 
7013 	mutex_lock(&hdev->umv_mutex);
7014 	hdev->share_umv_size = hdev->priv_umv_size +
7015 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
7016 	mutex_unlock(&hdev->umv_mutex);
7017 }
7018 
7019 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7020 {
7021 	struct hclge_dev *hdev = vport->back;
7022 	bool is_full;
7023 
7024 	mutex_lock(&hdev->umv_mutex);
7025 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7026 		   hdev->share_umv_size == 0);
7027 	mutex_unlock(&hdev->umv_mutex);
7028 
7029 	return is_full;
7030 }
7031 
7032 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7033 {
7034 	struct hclge_dev *hdev = vport->back;
7035 
7036 	mutex_lock(&hdev->umv_mutex);
7037 	if (is_free) {
7038 		if (vport->used_umv_num > hdev->priv_umv_size)
7039 			hdev->share_umv_size++;
7040 
7041 		if (vport->used_umv_num > 0)
7042 			vport->used_umv_num--;
7043 	} else {
7044 		if (vport->used_umv_num >= hdev->priv_umv_size &&
7045 		    hdev->share_umv_size > 0)
7046 			hdev->share_umv_size--;
7047 		vport->used_umv_num++;
7048 	}
7049 	mutex_unlock(&hdev->umv_mutex);
7050 }
7051 
7052 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7053 			     const unsigned char *addr)
7054 {
7055 	struct hclge_vport *vport = hclge_get_vport(handle);
7056 
7057 	return hclge_add_uc_addr_common(vport, addr);
7058 }
7059 
7060 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7061 			     const unsigned char *addr)
7062 {
7063 	struct hclge_dev *hdev = vport->back;
7064 	struct hclge_mac_vlan_tbl_entry_cmd req;
7065 	struct hclge_desc desc;
7066 	u16 egress_port = 0;
7067 	int ret;
7068 
7069 	/* mac addr check */
7070 	if (is_zero_ether_addr(addr) ||
7071 	    is_broadcast_ether_addr(addr) ||
7072 	    is_multicast_ether_addr(addr)) {
7073 		dev_err(&hdev->pdev->dev,
7074 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7075 			 addr, is_zero_ether_addr(addr),
7076 			 is_broadcast_ether_addr(addr),
7077 			 is_multicast_ether_addr(addr));
7078 		return -EINVAL;
7079 	}
7080 
7081 	memset(&req, 0, sizeof(req));
7082 
7083 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7084 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7085 
7086 	req.egress_port = cpu_to_le16(egress_port);
7087 
7088 	hclge_prepare_mac_addr(&req, addr, false);
7089 
7090 	/* Lookup the mac address in the mac_vlan table, and add
7091 	 * it if the entry is inexistent. Repeated unicast entry
7092 	 * is not allowed in the mac vlan table.
7093 	 */
7094 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7095 	if (ret == -ENOENT) {
7096 		if (!hclge_is_umv_space_full(vport)) {
7097 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7098 			if (!ret)
7099 				hclge_update_umv_space(vport, false);
7100 			return ret;
7101 		}
7102 
7103 		dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7104 			hdev->priv_umv_size);
7105 
7106 		return -ENOSPC;
7107 	}
7108 
7109 	/* check if we just hit the duplicate */
7110 	if (!ret) {
7111 		dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
7112 			 vport->vport_id, addr);
7113 		return 0;
7114 	}
7115 
7116 	dev_err(&hdev->pdev->dev,
7117 		"PF failed to add unicast entry(%pM) in the MAC table\n",
7118 		addr);
7119 
7120 	return ret;
7121 }
7122 
7123 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7124 			    const unsigned char *addr)
7125 {
7126 	struct hclge_vport *vport = hclge_get_vport(handle);
7127 
7128 	return hclge_rm_uc_addr_common(vport, addr);
7129 }
7130 
7131 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7132 			    const unsigned char *addr)
7133 {
7134 	struct hclge_dev *hdev = vport->back;
7135 	struct hclge_mac_vlan_tbl_entry_cmd req;
7136 	int ret;
7137 
7138 	/* mac addr check */
7139 	if (is_zero_ether_addr(addr) ||
7140 	    is_broadcast_ether_addr(addr) ||
7141 	    is_multicast_ether_addr(addr)) {
7142 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7143 			addr);
7144 		return -EINVAL;
7145 	}
7146 
7147 	memset(&req, 0, sizeof(req));
7148 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7149 	hclge_prepare_mac_addr(&req, addr, false);
7150 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
7151 	if (!ret)
7152 		hclge_update_umv_space(vport, true);
7153 
7154 	return ret;
7155 }
7156 
7157 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7158 			     const unsigned char *addr)
7159 {
7160 	struct hclge_vport *vport = hclge_get_vport(handle);
7161 
7162 	return hclge_add_mc_addr_common(vport, addr);
7163 }
7164 
7165 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7166 			     const unsigned char *addr)
7167 {
7168 	struct hclge_dev *hdev = vport->back;
7169 	struct hclge_mac_vlan_tbl_entry_cmd req;
7170 	struct hclge_desc desc[3];
7171 	int status;
7172 
7173 	/* mac addr check */
7174 	if (!is_multicast_ether_addr(addr)) {
7175 		dev_err(&hdev->pdev->dev,
7176 			"Add mc mac err! invalid mac:%pM.\n",
7177 			 addr);
7178 		return -EINVAL;
7179 	}
7180 	memset(&req, 0, sizeof(req));
7181 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7182 	hclge_prepare_mac_addr(&req, addr, true);
7183 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7184 	if (status) {
7185 		/* This mac addr do not exist, add new entry for it */
7186 		memset(desc[0].data, 0, sizeof(desc[0].data));
7187 		memset(desc[1].data, 0, sizeof(desc[0].data));
7188 		memset(desc[2].data, 0, sizeof(desc[0].data));
7189 	}
7190 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7191 	if (status)
7192 		return status;
7193 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7194 
7195 	if (status == -ENOSPC)
7196 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7197 
7198 	return status;
7199 }
7200 
7201 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7202 			    const unsigned char *addr)
7203 {
7204 	struct hclge_vport *vport = hclge_get_vport(handle);
7205 
7206 	return hclge_rm_mc_addr_common(vport, addr);
7207 }
7208 
7209 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7210 			    const unsigned char *addr)
7211 {
7212 	struct hclge_dev *hdev = vport->back;
7213 	struct hclge_mac_vlan_tbl_entry_cmd req;
7214 	enum hclge_cmd_status status;
7215 	struct hclge_desc desc[3];
7216 
7217 	/* mac addr check */
7218 	if (!is_multicast_ether_addr(addr)) {
7219 		dev_dbg(&hdev->pdev->dev,
7220 			"Remove mc mac err! invalid mac:%pM.\n",
7221 			 addr);
7222 		return -EINVAL;
7223 	}
7224 
7225 	memset(&req, 0, sizeof(req));
7226 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7227 	hclge_prepare_mac_addr(&req, addr, true);
7228 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7229 	if (!status) {
7230 		/* This mac addr exist, remove this handle's VFID for it */
7231 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7232 		if (status)
7233 			return status;
7234 
7235 		if (hclge_is_all_function_id_zero(desc))
7236 			/* All the vfid is zero, so need to delete this entry */
7237 			status = hclge_remove_mac_vlan_tbl(vport, &req);
7238 		else
7239 			/* Not all the vfid is zero, update the vfid */
7240 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7241 
7242 	} else {
7243 		/* Maybe this mac address is in mta table, but it cannot be
7244 		 * deleted here because an entry of mta represents an address
7245 		 * range rather than a specific address. the delete action to
7246 		 * all entries will take effect in update_mta_status called by
7247 		 * hns3_nic_set_rx_mode.
7248 		 */
7249 		status = 0;
7250 	}
7251 
7252 	return status;
7253 }
7254 
7255 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7256 			       enum HCLGE_MAC_ADDR_TYPE mac_type)
7257 {
7258 	struct hclge_vport_mac_addr_cfg *mac_cfg;
7259 	struct list_head *list;
7260 
7261 	if (!vport->vport_id)
7262 		return;
7263 
7264 	mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7265 	if (!mac_cfg)
7266 		return;
7267 
7268 	mac_cfg->hd_tbl_status = true;
7269 	memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7270 
7271 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7272 	       &vport->uc_mac_list : &vport->mc_mac_list;
7273 
7274 	list_add_tail(&mac_cfg->node, list);
7275 }
7276 
7277 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7278 			      bool is_write_tbl,
7279 			      enum HCLGE_MAC_ADDR_TYPE mac_type)
7280 {
7281 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7282 	struct list_head *list;
7283 	bool uc_flag, mc_flag;
7284 
7285 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7286 	       &vport->uc_mac_list : &vport->mc_mac_list;
7287 
7288 	uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7289 	mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7290 
7291 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7292 		if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
7293 			if (uc_flag && mac_cfg->hd_tbl_status)
7294 				hclge_rm_uc_addr_common(vport, mac_addr);
7295 
7296 			if (mc_flag && mac_cfg->hd_tbl_status)
7297 				hclge_rm_mc_addr_common(vport, mac_addr);
7298 
7299 			list_del(&mac_cfg->node);
7300 			kfree(mac_cfg);
7301 			break;
7302 		}
7303 	}
7304 }
7305 
7306 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7307 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
7308 {
7309 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7310 	struct list_head *list;
7311 
7312 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7313 	       &vport->uc_mac_list : &vport->mc_mac_list;
7314 
7315 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7316 		if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7317 			hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7318 
7319 		if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7320 			hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7321 
7322 		mac_cfg->hd_tbl_status = false;
7323 		if (is_del_list) {
7324 			list_del(&mac_cfg->node);
7325 			kfree(mac_cfg);
7326 		}
7327 	}
7328 }
7329 
7330 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7331 {
7332 	struct hclge_vport_mac_addr_cfg *mac, *tmp;
7333 	struct hclge_vport *vport;
7334 	int i;
7335 
7336 	mutex_lock(&hdev->vport_cfg_mutex);
7337 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7338 		vport = &hdev->vport[i];
7339 		list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7340 			list_del(&mac->node);
7341 			kfree(mac);
7342 		}
7343 
7344 		list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7345 			list_del(&mac->node);
7346 			kfree(mac);
7347 		}
7348 	}
7349 	mutex_unlock(&hdev->vport_cfg_mutex);
7350 }
7351 
7352 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7353 					      u16 cmdq_resp, u8 resp_code)
7354 {
7355 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
7356 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
7357 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
7358 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
7359 
7360 	int return_status;
7361 
7362 	if (cmdq_resp) {
7363 		dev_err(&hdev->pdev->dev,
7364 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
7365 			cmdq_resp);
7366 		return -EIO;
7367 	}
7368 
7369 	switch (resp_code) {
7370 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
7371 	case HCLGE_ETHERTYPE_ALREADY_ADD:
7372 		return_status = 0;
7373 		break;
7374 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7375 		dev_err(&hdev->pdev->dev,
7376 			"add mac ethertype failed for manager table overflow.\n");
7377 		return_status = -EIO;
7378 		break;
7379 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
7380 		dev_err(&hdev->pdev->dev,
7381 			"add mac ethertype failed for key conflict.\n");
7382 		return_status = -EIO;
7383 		break;
7384 	default:
7385 		dev_err(&hdev->pdev->dev,
7386 			"add mac ethertype failed for undefined, code=%d.\n",
7387 			resp_code);
7388 		return_status = -EIO;
7389 	}
7390 
7391 	return return_status;
7392 }
7393 
7394 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7395 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
7396 {
7397 	struct hclge_desc desc;
7398 	u8 resp_code;
7399 	u16 retval;
7400 	int ret;
7401 
7402 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7403 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7404 
7405 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7406 	if (ret) {
7407 		dev_err(&hdev->pdev->dev,
7408 			"add mac ethertype failed for cmd_send, ret =%d.\n",
7409 			ret);
7410 		return ret;
7411 	}
7412 
7413 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7414 	retval = le16_to_cpu(desc.retval);
7415 
7416 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7417 }
7418 
7419 static int init_mgr_tbl(struct hclge_dev *hdev)
7420 {
7421 	int ret;
7422 	int i;
7423 
7424 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7425 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7426 		if (ret) {
7427 			dev_err(&hdev->pdev->dev,
7428 				"add mac ethertype failed, ret =%d.\n",
7429 				ret);
7430 			return ret;
7431 		}
7432 	}
7433 
7434 	return 0;
7435 }
7436 
7437 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7438 {
7439 	struct hclge_vport *vport = hclge_get_vport(handle);
7440 	struct hclge_dev *hdev = vport->back;
7441 
7442 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
7443 }
7444 
7445 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7446 			      bool is_first)
7447 {
7448 	const unsigned char *new_addr = (const unsigned char *)p;
7449 	struct hclge_vport *vport = hclge_get_vport(handle);
7450 	struct hclge_dev *hdev = vport->back;
7451 	int ret;
7452 
7453 	/* mac addr check */
7454 	if (is_zero_ether_addr(new_addr) ||
7455 	    is_broadcast_ether_addr(new_addr) ||
7456 	    is_multicast_ether_addr(new_addr)) {
7457 		dev_err(&hdev->pdev->dev,
7458 			"Change uc mac err! invalid mac:%pM.\n",
7459 			 new_addr);
7460 		return -EINVAL;
7461 	}
7462 
7463 	if ((!is_first || is_kdump_kernel()) &&
7464 	    hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7465 		dev_warn(&hdev->pdev->dev,
7466 			 "remove old uc mac address fail.\n");
7467 
7468 	ret = hclge_add_uc_addr(handle, new_addr);
7469 	if (ret) {
7470 		dev_err(&hdev->pdev->dev,
7471 			"add uc mac address fail, ret =%d.\n",
7472 			ret);
7473 
7474 		if (!is_first &&
7475 		    hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7476 			dev_err(&hdev->pdev->dev,
7477 				"restore uc mac address fail.\n");
7478 
7479 		return -EIO;
7480 	}
7481 
7482 	ret = hclge_pause_addr_cfg(hdev, new_addr);
7483 	if (ret) {
7484 		dev_err(&hdev->pdev->dev,
7485 			"configure mac pause address fail, ret =%d.\n",
7486 			ret);
7487 		return -EIO;
7488 	}
7489 
7490 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7491 
7492 	return 0;
7493 }
7494 
7495 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7496 			  int cmd)
7497 {
7498 	struct hclge_vport *vport = hclge_get_vport(handle);
7499 	struct hclge_dev *hdev = vport->back;
7500 
7501 	if (!hdev->hw.mac.phydev)
7502 		return -EOPNOTSUPP;
7503 
7504 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7505 }
7506 
7507 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7508 				      u8 fe_type, bool filter_en, u8 vf_id)
7509 {
7510 	struct hclge_vlan_filter_ctrl_cmd *req;
7511 	struct hclge_desc desc;
7512 	int ret;
7513 
7514 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7515 
7516 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7517 	req->vlan_type = vlan_type;
7518 	req->vlan_fe = filter_en ? fe_type : 0;
7519 	req->vf_id = vf_id;
7520 
7521 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7522 	if (ret)
7523 		dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7524 			ret);
7525 
7526 	return ret;
7527 }
7528 
7529 #define HCLGE_FILTER_TYPE_VF		0
7530 #define HCLGE_FILTER_TYPE_PORT		1
7531 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
7532 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
7533 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
7534 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
7535 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
7536 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
7537 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
7538 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
7539 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
7540 
7541 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7542 {
7543 	struct hclge_vport *vport = hclge_get_vport(handle);
7544 	struct hclge_dev *hdev = vport->back;
7545 
7546 	if (hdev->pdev->revision >= 0x21) {
7547 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7548 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
7549 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7550 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
7551 	} else {
7552 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7553 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7554 					   0);
7555 	}
7556 	if (enable)
7557 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
7558 	else
7559 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7560 }
7561 
7562 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7563 				    bool is_kill, u16 vlan,
7564 				    __be16 proto)
7565 {
7566 #define HCLGE_MAX_VF_BYTES  16
7567 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
7568 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
7569 	struct hclge_desc desc[2];
7570 	u8 vf_byte_val;
7571 	u8 vf_byte_off;
7572 	int ret;
7573 
7574 	/* if vf vlan table is full, firmware will close vf vlan filter, it
7575 	 * is unable and unnecessary to add new vlan id to vf vlan filter
7576 	 */
7577 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7578 		return 0;
7579 
7580 	hclge_cmd_setup_basic_desc(&desc[0],
7581 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7582 	hclge_cmd_setup_basic_desc(&desc[1],
7583 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7584 
7585 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7586 
7587 	vf_byte_off = vfid / 8;
7588 	vf_byte_val = 1 << (vfid % 8);
7589 
7590 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7591 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7592 
7593 	req0->vlan_id  = cpu_to_le16(vlan);
7594 	req0->vlan_cfg = is_kill;
7595 
7596 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7597 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7598 	else
7599 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7600 
7601 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
7602 	if (ret) {
7603 		dev_err(&hdev->pdev->dev,
7604 			"Send vf vlan command fail, ret =%d.\n",
7605 			ret);
7606 		return ret;
7607 	}
7608 
7609 	if (!is_kill) {
7610 #define HCLGE_VF_VLAN_NO_ENTRY	2
7611 		if (!req0->resp_code || req0->resp_code == 1)
7612 			return 0;
7613 
7614 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7615 			set_bit(vfid, hdev->vf_vlan_full);
7616 			dev_warn(&hdev->pdev->dev,
7617 				 "vf vlan table is full, vf vlan filter is disabled\n");
7618 			return 0;
7619 		}
7620 
7621 		dev_err(&hdev->pdev->dev,
7622 			"Add vf vlan filter fail, ret =%d.\n",
7623 			req0->resp_code);
7624 	} else {
7625 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
7626 		if (!req0->resp_code)
7627 			return 0;
7628 
7629 		/* vf vlan filter is disabled when vf vlan table is full,
7630 		 * then new vlan id will not be added into vf vlan table.
7631 		 * Just return 0 without warning, avoid massive verbose
7632 		 * print logs when unload.
7633 		 */
7634 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7635 			return 0;
7636 
7637 		dev_err(&hdev->pdev->dev,
7638 			"Kill vf vlan filter fail, ret =%d.\n",
7639 			req0->resp_code);
7640 	}
7641 
7642 	return -EIO;
7643 }
7644 
7645 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7646 				      u16 vlan_id, bool is_kill)
7647 {
7648 	struct hclge_vlan_filter_pf_cfg_cmd *req;
7649 	struct hclge_desc desc;
7650 	u8 vlan_offset_byte_val;
7651 	u8 vlan_offset_byte;
7652 	u8 vlan_offset_160;
7653 	int ret;
7654 
7655 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7656 
7657 	vlan_offset_160 = vlan_id / 160;
7658 	vlan_offset_byte = (vlan_id % 160) / 8;
7659 	vlan_offset_byte_val = 1 << (vlan_id % 8);
7660 
7661 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7662 	req->vlan_offset = vlan_offset_160;
7663 	req->vlan_cfg = is_kill;
7664 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7665 
7666 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7667 	if (ret)
7668 		dev_err(&hdev->pdev->dev,
7669 			"port vlan command, send fail, ret =%d.\n", ret);
7670 	return ret;
7671 }
7672 
7673 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7674 				    u16 vport_id, u16 vlan_id,
7675 				    bool is_kill)
7676 {
7677 	u16 vport_idx, vport_num = 0;
7678 	int ret;
7679 
7680 	if (is_kill && !vlan_id)
7681 		return 0;
7682 
7683 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7684 				       proto);
7685 	if (ret) {
7686 		dev_err(&hdev->pdev->dev,
7687 			"Set %d vport vlan filter config fail, ret =%d.\n",
7688 			vport_id, ret);
7689 		return ret;
7690 	}
7691 
7692 	/* vlan 0 may be added twice when 8021q module is enabled */
7693 	if (!is_kill && !vlan_id &&
7694 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
7695 		return 0;
7696 
7697 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7698 		dev_err(&hdev->pdev->dev,
7699 			"Add port vlan failed, vport %d is already in vlan %d\n",
7700 			vport_id, vlan_id);
7701 		return -EINVAL;
7702 	}
7703 
7704 	if (is_kill &&
7705 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7706 		dev_err(&hdev->pdev->dev,
7707 			"Delete port vlan failed, vport %d is not in vlan %d\n",
7708 			vport_id, vlan_id);
7709 		return -EINVAL;
7710 	}
7711 
7712 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7713 		vport_num++;
7714 
7715 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7716 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7717 						 is_kill);
7718 
7719 	return ret;
7720 }
7721 
7722 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7723 {
7724 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7725 	struct hclge_vport_vtag_tx_cfg_cmd *req;
7726 	struct hclge_dev *hdev = vport->back;
7727 	struct hclge_desc desc;
7728 	u16 bmap_index;
7729 	int status;
7730 
7731 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7732 
7733 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7734 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7735 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7736 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7737 		      vcfg->accept_tag1 ? 1 : 0);
7738 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7739 		      vcfg->accept_untag1 ? 1 : 0);
7740 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7741 		      vcfg->accept_tag2 ? 1 : 0);
7742 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7743 		      vcfg->accept_untag2 ? 1 : 0);
7744 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7745 		      vcfg->insert_tag1_en ? 1 : 0);
7746 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7747 		      vcfg->insert_tag2_en ? 1 : 0);
7748 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7749 
7750 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7751 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7752 			HCLGE_VF_NUM_PER_BYTE;
7753 	req->vf_bitmap[bmap_index] =
7754 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7755 
7756 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7757 	if (status)
7758 		dev_err(&hdev->pdev->dev,
7759 			"Send port txvlan cfg command fail, ret =%d\n",
7760 			status);
7761 
7762 	return status;
7763 }
7764 
7765 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7766 {
7767 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7768 	struct hclge_vport_vtag_rx_cfg_cmd *req;
7769 	struct hclge_dev *hdev = vport->back;
7770 	struct hclge_desc desc;
7771 	u16 bmap_index;
7772 	int status;
7773 
7774 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7775 
7776 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7777 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7778 		      vcfg->strip_tag1_en ? 1 : 0);
7779 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7780 		      vcfg->strip_tag2_en ? 1 : 0);
7781 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7782 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
7783 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7784 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
7785 
7786 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7787 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7788 			HCLGE_VF_NUM_PER_BYTE;
7789 	req->vf_bitmap[bmap_index] =
7790 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7791 
7792 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7793 	if (status)
7794 		dev_err(&hdev->pdev->dev,
7795 			"Send port rxvlan cfg command fail, ret =%d\n",
7796 			status);
7797 
7798 	return status;
7799 }
7800 
7801 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7802 				  u16 port_base_vlan_state,
7803 				  u16 vlan_tag)
7804 {
7805 	int ret;
7806 
7807 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7808 		vport->txvlan_cfg.accept_tag1 = true;
7809 		vport->txvlan_cfg.insert_tag1_en = false;
7810 		vport->txvlan_cfg.default_tag1 = 0;
7811 	} else {
7812 		vport->txvlan_cfg.accept_tag1 = false;
7813 		vport->txvlan_cfg.insert_tag1_en = true;
7814 		vport->txvlan_cfg.default_tag1 = vlan_tag;
7815 	}
7816 
7817 	vport->txvlan_cfg.accept_untag1 = true;
7818 
7819 	/* accept_tag2 and accept_untag2 are not supported on
7820 	 * pdev revision(0x20), new revision support them,
7821 	 * this two fields can not be configured by user.
7822 	 */
7823 	vport->txvlan_cfg.accept_tag2 = true;
7824 	vport->txvlan_cfg.accept_untag2 = true;
7825 	vport->txvlan_cfg.insert_tag2_en = false;
7826 	vport->txvlan_cfg.default_tag2 = 0;
7827 
7828 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7829 		vport->rxvlan_cfg.strip_tag1_en = false;
7830 		vport->rxvlan_cfg.strip_tag2_en =
7831 				vport->rxvlan_cfg.rx_vlan_offload_en;
7832 	} else {
7833 		vport->rxvlan_cfg.strip_tag1_en =
7834 				vport->rxvlan_cfg.rx_vlan_offload_en;
7835 		vport->rxvlan_cfg.strip_tag2_en = true;
7836 	}
7837 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7838 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7839 
7840 	ret = hclge_set_vlan_tx_offload_cfg(vport);
7841 	if (ret)
7842 		return ret;
7843 
7844 	return hclge_set_vlan_rx_offload_cfg(vport);
7845 }
7846 
7847 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7848 {
7849 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7850 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7851 	struct hclge_desc desc;
7852 	int status;
7853 
7854 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7855 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7856 	rx_req->ot_fst_vlan_type =
7857 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7858 	rx_req->ot_sec_vlan_type =
7859 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7860 	rx_req->in_fst_vlan_type =
7861 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7862 	rx_req->in_sec_vlan_type =
7863 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7864 
7865 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7866 	if (status) {
7867 		dev_err(&hdev->pdev->dev,
7868 			"Send rxvlan protocol type command fail, ret =%d\n",
7869 			status);
7870 		return status;
7871 	}
7872 
7873 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7874 
7875 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7876 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7877 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7878 
7879 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7880 	if (status)
7881 		dev_err(&hdev->pdev->dev,
7882 			"Send txvlan protocol type command fail, ret =%d\n",
7883 			status);
7884 
7885 	return status;
7886 }
7887 
7888 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7889 {
7890 #define HCLGE_DEF_VLAN_TYPE		0x8100
7891 
7892 	struct hnae3_handle *handle = &hdev->vport[0].nic;
7893 	struct hclge_vport *vport;
7894 	int ret;
7895 	int i;
7896 
7897 	if (hdev->pdev->revision >= 0x21) {
7898 		/* for revision 0x21, vf vlan filter is per function */
7899 		for (i = 0; i < hdev->num_alloc_vport; i++) {
7900 			vport = &hdev->vport[i];
7901 			ret = hclge_set_vlan_filter_ctrl(hdev,
7902 							 HCLGE_FILTER_TYPE_VF,
7903 							 HCLGE_FILTER_FE_EGRESS,
7904 							 true,
7905 							 vport->vport_id);
7906 			if (ret)
7907 				return ret;
7908 		}
7909 
7910 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7911 						 HCLGE_FILTER_FE_INGRESS, true,
7912 						 0);
7913 		if (ret)
7914 			return ret;
7915 	} else {
7916 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7917 						 HCLGE_FILTER_FE_EGRESS_V1_B,
7918 						 true, 0);
7919 		if (ret)
7920 			return ret;
7921 	}
7922 
7923 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
7924 
7925 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7926 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7927 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7928 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7929 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7930 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7931 
7932 	ret = hclge_set_vlan_protocol_type(hdev);
7933 	if (ret)
7934 		return ret;
7935 
7936 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7937 		u16 vlan_tag;
7938 
7939 		vport = &hdev->vport[i];
7940 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7941 
7942 		ret = hclge_vlan_offload_cfg(vport,
7943 					     vport->port_base_vlan_cfg.state,
7944 					     vlan_tag);
7945 		if (ret)
7946 			return ret;
7947 	}
7948 
7949 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7950 }
7951 
7952 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7953 				       bool writen_to_tbl)
7954 {
7955 	struct hclge_vport_vlan_cfg *vlan;
7956 
7957 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7958 	if (!vlan)
7959 		return;
7960 
7961 	vlan->hd_tbl_status = writen_to_tbl;
7962 	vlan->vlan_id = vlan_id;
7963 
7964 	list_add_tail(&vlan->node, &vport->vlan_list);
7965 }
7966 
7967 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7968 {
7969 	struct hclge_vport_vlan_cfg *vlan, *tmp;
7970 	struct hclge_dev *hdev = vport->back;
7971 	int ret;
7972 
7973 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7974 		if (!vlan->hd_tbl_status) {
7975 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7976 						       vport->vport_id,
7977 						       vlan->vlan_id, false);
7978 			if (ret) {
7979 				dev_err(&hdev->pdev->dev,
7980 					"restore vport vlan list failed, ret=%d\n",
7981 					ret);
7982 				return ret;
7983 			}
7984 		}
7985 		vlan->hd_tbl_status = true;
7986 	}
7987 
7988 	return 0;
7989 }
7990 
7991 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7992 				      bool is_write_tbl)
7993 {
7994 	struct hclge_vport_vlan_cfg *vlan, *tmp;
7995 	struct hclge_dev *hdev = vport->back;
7996 
7997 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7998 		if (vlan->vlan_id == vlan_id) {
7999 			if (is_write_tbl && vlan->hd_tbl_status)
8000 				hclge_set_vlan_filter_hw(hdev,
8001 							 htons(ETH_P_8021Q),
8002 							 vport->vport_id,
8003 							 vlan_id,
8004 							 true);
8005 
8006 			list_del(&vlan->node);
8007 			kfree(vlan);
8008 			break;
8009 		}
8010 	}
8011 }
8012 
8013 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8014 {
8015 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8016 	struct hclge_dev *hdev = vport->back;
8017 
8018 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8019 		if (vlan->hd_tbl_status)
8020 			hclge_set_vlan_filter_hw(hdev,
8021 						 htons(ETH_P_8021Q),
8022 						 vport->vport_id,
8023 						 vlan->vlan_id,
8024 						 true);
8025 
8026 		vlan->hd_tbl_status = false;
8027 		if (is_del_list) {
8028 			list_del(&vlan->node);
8029 			kfree(vlan);
8030 		}
8031 	}
8032 }
8033 
8034 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8035 {
8036 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8037 	struct hclge_vport *vport;
8038 	int i;
8039 
8040 	mutex_lock(&hdev->vport_cfg_mutex);
8041 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8042 		vport = &hdev->vport[i];
8043 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8044 			list_del(&vlan->node);
8045 			kfree(vlan);
8046 		}
8047 	}
8048 	mutex_unlock(&hdev->vport_cfg_mutex);
8049 }
8050 
8051 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8052 {
8053 	struct hclge_vport *vport = hclge_get_vport(handle);
8054 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8055 	struct hclge_dev *hdev = vport->back;
8056 	u16 vlan_proto;
8057 	u16 state, vlan_id;
8058 	int i;
8059 
8060 	mutex_lock(&hdev->vport_cfg_mutex);
8061 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8062 		vport = &hdev->vport[i];
8063 		vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8064 		vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8065 		state = vport->port_base_vlan_cfg.state;
8066 
8067 		if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8068 			hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8069 						 vport->vport_id, vlan_id,
8070 						 false);
8071 			continue;
8072 		}
8073 
8074 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8075 			if (vlan->hd_tbl_status)
8076 				hclge_set_vlan_filter_hw(hdev,
8077 							 htons(ETH_P_8021Q),
8078 							 vport->vport_id,
8079 							 vlan->vlan_id,
8080 							 false);
8081 		}
8082 	}
8083 
8084 	mutex_unlock(&hdev->vport_cfg_mutex);
8085 }
8086 
8087 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8088 {
8089 	struct hclge_vport *vport = hclge_get_vport(handle);
8090 
8091 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8092 		vport->rxvlan_cfg.strip_tag1_en = false;
8093 		vport->rxvlan_cfg.strip_tag2_en = enable;
8094 	} else {
8095 		vport->rxvlan_cfg.strip_tag1_en = enable;
8096 		vport->rxvlan_cfg.strip_tag2_en = true;
8097 	}
8098 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8099 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8100 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8101 
8102 	return hclge_set_vlan_rx_offload_cfg(vport);
8103 }
8104 
8105 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8106 					    u16 port_base_vlan_state,
8107 					    struct hclge_vlan_info *new_info,
8108 					    struct hclge_vlan_info *old_info)
8109 {
8110 	struct hclge_dev *hdev = vport->back;
8111 	int ret;
8112 
8113 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8114 		hclge_rm_vport_all_vlan_table(vport, false);
8115 		return hclge_set_vlan_filter_hw(hdev,
8116 						 htons(new_info->vlan_proto),
8117 						 vport->vport_id,
8118 						 new_info->vlan_tag,
8119 						 false);
8120 	}
8121 
8122 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8123 				       vport->vport_id, old_info->vlan_tag,
8124 				       true);
8125 	if (ret)
8126 		return ret;
8127 
8128 	return hclge_add_vport_all_vlan_table(vport);
8129 }
8130 
8131 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8132 				    struct hclge_vlan_info *vlan_info)
8133 {
8134 	struct hnae3_handle *nic = &vport->nic;
8135 	struct hclge_vlan_info *old_vlan_info;
8136 	struct hclge_dev *hdev = vport->back;
8137 	int ret;
8138 
8139 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8140 
8141 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8142 	if (ret)
8143 		return ret;
8144 
8145 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8146 		/* add new VLAN tag */
8147 		ret = hclge_set_vlan_filter_hw(hdev,
8148 					       htons(vlan_info->vlan_proto),
8149 					       vport->vport_id,
8150 					       vlan_info->vlan_tag,
8151 					       false);
8152 		if (ret)
8153 			return ret;
8154 
8155 		/* remove old VLAN tag */
8156 		ret = hclge_set_vlan_filter_hw(hdev,
8157 					       htons(old_vlan_info->vlan_proto),
8158 					       vport->vport_id,
8159 					       old_vlan_info->vlan_tag,
8160 					       true);
8161 		if (ret)
8162 			return ret;
8163 
8164 		goto update;
8165 	}
8166 
8167 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8168 					       old_vlan_info);
8169 	if (ret)
8170 		return ret;
8171 
8172 	/* update state only when disable/enable port based VLAN */
8173 	vport->port_base_vlan_cfg.state = state;
8174 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8175 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8176 	else
8177 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8178 
8179 update:
8180 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8181 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8182 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8183 
8184 	return 0;
8185 }
8186 
8187 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8188 					  enum hnae3_port_base_vlan_state state,
8189 					  u16 vlan)
8190 {
8191 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8192 		if (!vlan)
8193 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8194 		else
8195 			return HNAE3_PORT_BASE_VLAN_ENABLE;
8196 	} else {
8197 		if (!vlan)
8198 			return HNAE3_PORT_BASE_VLAN_DISABLE;
8199 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8200 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8201 		else
8202 			return HNAE3_PORT_BASE_VLAN_MODIFY;
8203 	}
8204 }
8205 
8206 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8207 				    u16 vlan, u8 qos, __be16 proto)
8208 {
8209 	struct hclge_vport *vport = hclge_get_vport(handle);
8210 	struct hclge_dev *hdev = vport->back;
8211 	struct hclge_vlan_info vlan_info;
8212 	u16 state;
8213 	int ret;
8214 
8215 	if (hdev->pdev->revision == 0x20)
8216 		return -EOPNOTSUPP;
8217 
8218 	/* qos is a 3 bits value, so can not be bigger than 7 */
8219 	if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
8220 		return -EINVAL;
8221 	if (proto != htons(ETH_P_8021Q))
8222 		return -EPROTONOSUPPORT;
8223 
8224 	vport = &hdev->vport[vfid];
8225 	state = hclge_get_port_base_vlan_state(vport,
8226 					       vport->port_base_vlan_cfg.state,
8227 					       vlan);
8228 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8229 		return 0;
8230 
8231 	vlan_info.vlan_tag = vlan;
8232 	vlan_info.qos = qos;
8233 	vlan_info.vlan_proto = ntohs(proto);
8234 
8235 	/* update port based VLAN for PF */
8236 	if (!vfid) {
8237 		hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8238 		ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
8239 		hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8240 
8241 		return ret;
8242 	}
8243 
8244 	if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8245 		return hclge_update_port_base_vlan_cfg(vport, state,
8246 						       &vlan_info);
8247 	} else {
8248 		ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8249 							(u8)vfid, state,
8250 							vlan, qos,
8251 							ntohs(proto));
8252 		return ret;
8253 	}
8254 }
8255 
8256 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8257 			  u16 vlan_id, bool is_kill)
8258 {
8259 	struct hclge_vport *vport = hclge_get_vport(handle);
8260 	struct hclge_dev *hdev = vport->back;
8261 	bool writen_to_tbl = false;
8262 	int ret = 0;
8263 
8264 	/* When device is resetting, firmware is unable to handle
8265 	 * mailbox. Just record the vlan id, and remove it after
8266 	 * reset finished.
8267 	 */
8268 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8269 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
8270 		return -EBUSY;
8271 	}
8272 
8273 	/* when port base vlan enabled, we use port base vlan as the vlan
8274 	 * filter entry. In this case, we don't update vlan filter table
8275 	 * when user add new vlan or remove exist vlan, just update the vport
8276 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
8277 	 * table until port base vlan disabled
8278 	 */
8279 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8280 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8281 					       vlan_id, is_kill);
8282 		writen_to_tbl = true;
8283 	}
8284 
8285 	if (!ret) {
8286 		if (is_kill)
8287 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
8288 		else
8289 			hclge_add_vport_vlan_table(vport, vlan_id,
8290 						   writen_to_tbl);
8291 	} else if (is_kill) {
8292 		/* when remove hw vlan filter failed, record the vlan id,
8293 		 * and try to remove it from hw later, to be consistence
8294 		 * with stack
8295 		 */
8296 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
8297 	}
8298 	return ret;
8299 }
8300 
8301 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8302 {
8303 #define HCLGE_MAX_SYNC_COUNT	60
8304 
8305 	int i, ret, sync_cnt = 0;
8306 	u16 vlan_id;
8307 
8308 	/* start from vport 1 for PF is always alive */
8309 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8310 		struct hclge_vport *vport = &hdev->vport[i];
8311 
8312 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8313 					 VLAN_N_VID);
8314 		while (vlan_id != VLAN_N_VID) {
8315 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8316 						       vport->vport_id, vlan_id,
8317 						       true);
8318 			if (ret && ret != -EINVAL)
8319 				return;
8320 
8321 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8322 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
8323 
8324 			sync_cnt++;
8325 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8326 				return;
8327 
8328 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8329 						 VLAN_N_VID);
8330 		}
8331 	}
8332 }
8333 
8334 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8335 {
8336 	struct hclge_config_max_frm_size_cmd *req;
8337 	struct hclge_desc desc;
8338 
8339 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8340 
8341 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8342 	req->max_frm_size = cpu_to_le16(new_mps);
8343 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8344 
8345 	return hclge_cmd_send(&hdev->hw, &desc, 1);
8346 }
8347 
8348 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8349 {
8350 	struct hclge_vport *vport = hclge_get_vport(handle);
8351 
8352 	return hclge_set_vport_mtu(vport, new_mtu);
8353 }
8354 
8355 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8356 {
8357 	struct hclge_dev *hdev = vport->back;
8358 	int i, max_frm_size, ret;
8359 
8360 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8361 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8362 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
8363 		return -EINVAL;
8364 
8365 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8366 	mutex_lock(&hdev->vport_lock);
8367 	/* VF's mps must fit within hdev->mps */
8368 	if (vport->vport_id && max_frm_size > hdev->mps) {
8369 		mutex_unlock(&hdev->vport_lock);
8370 		return -EINVAL;
8371 	} else if (vport->vport_id) {
8372 		vport->mps = max_frm_size;
8373 		mutex_unlock(&hdev->vport_lock);
8374 		return 0;
8375 	}
8376 
8377 	/* PF's mps must be greater then VF's mps */
8378 	for (i = 1; i < hdev->num_alloc_vport; i++)
8379 		if (max_frm_size < hdev->vport[i].mps) {
8380 			mutex_unlock(&hdev->vport_lock);
8381 			return -EINVAL;
8382 		}
8383 
8384 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8385 
8386 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
8387 	if (ret) {
8388 		dev_err(&hdev->pdev->dev,
8389 			"Change mtu fail, ret =%d\n", ret);
8390 		goto out;
8391 	}
8392 
8393 	hdev->mps = max_frm_size;
8394 	vport->mps = max_frm_size;
8395 
8396 	ret = hclge_buffer_alloc(hdev);
8397 	if (ret)
8398 		dev_err(&hdev->pdev->dev,
8399 			"Allocate buffer fail, ret =%d\n", ret);
8400 
8401 out:
8402 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8403 	mutex_unlock(&hdev->vport_lock);
8404 	return ret;
8405 }
8406 
8407 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8408 				    bool enable)
8409 {
8410 	struct hclge_reset_tqp_queue_cmd *req;
8411 	struct hclge_desc desc;
8412 	int ret;
8413 
8414 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8415 
8416 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8417 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8418 	if (enable)
8419 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8420 
8421 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8422 	if (ret) {
8423 		dev_err(&hdev->pdev->dev,
8424 			"Send tqp reset cmd error, status =%d\n", ret);
8425 		return ret;
8426 	}
8427 
8428 	return 0;
8429 }
8430 
8431 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8432 {
8433 	struct hclge_reset_tqp_queue_cmd *req;
8434 	struct hclge_desc desc;
8435 	int ret;
8436 
8437 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8438 
8439 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8440 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8441 
8442 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8443 	if (ret) {
8444 		dev_err(&hdev->pdev->dev,
8445 			"Get reset status error, status =%d\n", ret);
8446 		return ret;
8447 	}
8448 
8449 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8450 }
8451 
8452 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8453 {
8454 	struct hnae3_queue *queue;
8455 	struct hclge_tqp *tqp;
8456 
8457 	queue = handle->kinfo.tqp[queue_id];
8458 	tqp = container_of(queue, struct hclge_tqp, q);
8459 
8460 	return tqp->index;
8461 }
8462 
8463 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8464 {
8465 	struct hclge_vport *vport = hclge_get_vport(handle);
8466 	struct hclge_dev *hdev = vport->back;
8467 	int reset_try_times = 0;
8468 	int reset_status;
8469 	u16 queue_gid;
8470 	int ret;
8471 
8472 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8473 
8474 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8475 	if (ret) {
8476 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8477 		return ret;
8478 	}
8479 
8480 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8481 	if (ret) {
8482 		dev_err(&hdev->pdev->dev,
8483 			"Send reset tqp cmd fail, ret = %d\n", ret);
8484 		return ret;
8485 	}
8486 
8487 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8488 		reset_status = hclge_get_reset_status(hdev, queue_gid);
8489 		if (reset_status)
8490 			break;
8491 
8492 		/* Wait for tqp hw reset */
8493 		usleep_range(1000, 1200);
8494 	}
8495 
8496 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8497 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8498 		return ret;
8499 	}
8500 
8501 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8502 	if (ret)
8503 		dev_err(&hdev->pdev->dev,
8504 			"Deassert the soft reset fail, ret = %d\n", ret);
8505 
8506 	return ret;
8507 }
8508 
8509 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8510 {
8511 	struct hclge_dev *hdev = vport->back;
8512 	int reset_try_times = 0;
8513 	int reset_status;
8514 	u16 queue_gid;
8515 	int ret;
8516 
8517 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8518 
8519 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8520 	if (ret) {
8521 		dev_warn(&hdev->pdev->dev,
8522 			 "Send reset tqp cmd fail, ret = %d\n", ret);
8523 		return;
8524 	}
8525 
8526 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8527 		reset_status = hclge_get_reset_status(hdev, queue_gid);
8528 		if (reset_status)
8529 			break;
8530 
8531 		/* Wait for tqp hw reset */
8532 		usleep_range(1000, 1200);
8533 	}
8534 
8535 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8536 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8537 		return;
8538 	}
8539 
8540 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8541 	if (ret)
8542 		dev_warn(&hdev->pdev->dev,
8543 			 "Deassert the soft reset fail, ret = %d\n", ret);
8544 }
8545 
8546 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8547 {
8548 	struct hclge_vport *vport = hclge_get_vport(handle);
8549 	struct hclge_dev *hdev = vport->back;
8550 
8551 	return hdev->fw_version;
8552 }
8553 
8554 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8555 {
8556 	struct phy_device *phydev = hdev->hw.mac.phydev;
8557 
8558 	if (!phydev)
8559 		return;
8560 
8561 	phy_set_asym_pause(phydev, rx_en, tx_en);
8562 }
8563 
8564 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8565 {
8566 	int ret;
8567 
8568 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8569 		return 0;
8570 
8571 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8572 	if (ret)
8573 		dev_err(&hdev->pdev->dev,
8574 			"configure pauseparam error, ret = %d.\n", ret);
8575 
8576 	return ret;
8577 }
8578 
8579 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8580 {
8581 	struct phy_device *phydev = hdev->hw.mac.phydev;
8582 	u16 remote_advertising = 0;
8583 	u16 local_advertising;
8584 	u32 rx_pause, tx_pause;
8585 	u8 flowctl;
8586 
8587 	if (!phydev->link || !phydev->autoneg)
8588 		return 0;
8589 
8590 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8591 
8592 	if (phydev->pause)
8593 		remote_advertising = LPA_PAUSE_CAP;
8594 
8595 	if (phydev->asym_pause)
8596 		remote_advertising |= LPA_PAUSE_ASYM;
8597 
8598 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8599 					   remote_advertising);
8600 	tx_pause = flowctl & FLOW_CTRL_TX;
8601 	rx_pause = flowctl & FLOW_CTRL_RX;
8602 
8603 	if (phydev->duplex == HCLGE_MAC_HALF) {
8604 		tx_pause = 0;
8605 		rx_pause = 0;
8606 	}
8607 
8608 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8609 }
8610 
8611 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8612 				 u32 *rx_en, u32 *tx_en)
8613 {
8614 	struct hclge_vport *vport = hclge_get_vport(handle);
8615 	struct hclge_dev *hdev = vport->back;
8616 	struct phy_device *phydev = hdev->hw.mac.phydev;
8617 
8618 	*auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8619 
8620 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8621 		*rx_en = 0;
8622 		*tx_en = 0;
8623 		return;
8624 	}
8625 
8626 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8627 		*rx_en = 1;
8628 		*tx_en = 0;
8629 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8630 		*tx_en = 1;
8631 		*rx_en = 0;
8632 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8633 		*rx_en = 1;
8634 		*tx_en = 1;
8635 	} else {
8636 		*rx_en = 0;
8637 		*tx_en = 0;
8638 	}
8639 }
8640 
8641 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8642 					 u32 rx_en, u32 tx_en)
8643 {
8644 	if (rx_en && tx_en)
8645 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
8646 	else if (rx_en && !tx_en)
8647 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8648 	else if (!rx_en && tx_en)
8649 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8650 	else
8651 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
8652 
8653 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8654 }
8655 
8656 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8657 				u32 rx_en, u32 tx_en)
8658 {
8659 	struct hclge_vport *vport = hclge_get_vport(handle);
8660 	struct hclge_dev *hdev = vport->back;
8661 	struct phy_device *phydev = hdev->hw.mac.phydev;
8662 	u32 fc_autoneg;
8663 
8664 	if (phydev) {
8665 		fc_autoneg = hclge_get_autoneg(handle);
8666 		if (auto_neg != fc_autoneg) {
8667 			dev_info(&hdev->pdev->dev,
8668 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8669 			return -EOPNOTSUPP;
8670 		}
8671 	}
8672 
8673 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8674 		dev_info(&hdev->pdev->dev,
8675 			 "Priority flow control enabled. Cannot set link flow control.\n");
8676 		return -EOPNOTSUPP;
8677 	}
8678 
8679 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8680 
8681 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8682 
8683 	if (!auto_neg)
8684 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8685 
8686 	if (phydev)
8687 		return phy_start_aneg(phydev);
8688 
8689 	return -EOPNOTSUPP;
8690 }
8691 
8692 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8693 					  u8 *auto_neg, u32 *speed, u8 *duplex)
8694 {
8695 	struct hclge_vport *vport = hclge_get_vport(handle);
8696 	struct hclge_dev *hdev = vport->back;
8697 
8698 	if (speed)
8699 		*speed = hdev->hw.mac.speed;
8700 	if (duplex)
8701 		*duplex = hdev->hw.mac.duplex;
8702 	if (auto_neg)
8703 		*auto_neg = hdev->hw.mac.autoneg;
8704 }
8705 
8706 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8707 				 u8 *module_type)
8708 {
8709 	struct hclge_vport *vport = hclge_get_vport(handle);
8710 	struct hclge_dev *hdev = vport->back;
8711 
8712 	if (media_type)
8713 		*media_type = hdev->hw.mac.media_type;
8714 
8715 	if (module_type)
8716 		*module_type = hdev->hw.mac.module_type;
8717 }
8718 
8719 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8720 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
8721 {
8722 	struct hclge_vport *vport = hclge_get_vport(handle);
8723 	struct hclge_dev *hdev = vport->back;
8724 	struct phy_device *phydev = hdev->hw.mac.phydev;
8725 	int mdix_ctrl, mdix, is_resolved;
8726 	unsigned int retval;
8727 
8728 	if (!phydev) {
8729 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8730 		*tp_mdix = ETH_TP_MDI_INVALID;
8731 		return;
8732 	}
8733 
8734 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8735 
8736 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8737 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8738 				    HCLGE_PHY_MDIX_CTRL_S);
8739 
8740 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8741 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8742 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8743 
8744 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8745 
8746 	switch (mdix_ctrl) {
8747 	case 0x0:
8748 		*tp_mdix_ctrl = ETH_TP_MDI;
8749 		break;
8750 	case 0x1:
8751 		*tp_mdix_ctrl = ETH_TP_MDI_X;
8752 		break;
8753 	case 0x3:
8754 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8755 		break;
8756 	default:
8757 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8758 		break;
8759 	}
8760 
8761 	if (!is_resolved)
8762 		*tp_mdix = ETH_TP_MDI_INVALID;
8763 	else if (mdix)
8764 		*tp_mdix = ETH_TP_MDI_X;
8765 	else
8766 		*tp_mdix = ETH_TP_MDI;
8767 }
8768 
8769 static void hclge_info_show(struct hclge_dev *hdev)
8770 {
8771 	struct device *dev = &hdev->pdev->dev;
8772 
8773 	dev_info(dev, "PF info begin:\n");
8774 
8775 	dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8776 	dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8777 	dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8778 	dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8779 	dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8780 	dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8781 	dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8782 	dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8783 	dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8784 	dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8785 	dev_info(dev, "This is %s PF\n",
8786 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8787 	dev_info(dev, "DCB %s\n",
8788 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8789 	dev_info(dev, "MQPRIO %s\n",
8790 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8791 
8792 	dev_info(dev, "PF info end.\n");
8793 }
8794 
8795 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8796 					  struct hclge_vport *vport)
8797 {
8798 	struct hnae3_client *client = vport->nic.client;
8799 	struct hclge_dev *hdev = ae_dev->priv;
8800 	int rst_cnt;
8801 	int ret;
8802 
8803 	rst_cnt = hdev->rst_stats.reset_cnt;
8804 	ret = client->ops->init_instance(&vport->nic);
8805 	if (ret)
8806 		return ret;
8807 
8808 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8809 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8810 	    rst_cnt != hdev->rst_stats.reset_cnt) {
8811 		ret = -EBUSY;
8812 		goto init_nic_err;
8813 	}
8814 
8815 	/* Enable nic hw error interrupts */
8816 	ret = hclge_config_nic_hw_error(hdev, true);
8817 	if (ret) {
8818 		dev_err(&ae_dev->pdev->dev,
8819 			"fail(%d) to enable hw error interrupts\n", ret);
8820 		goto init_nic_err;
8821 	}
8822 
8823 	hnae3_set_client_init_flag(client, ae_dev, 1);
8824 
8825 	if (netif_msg_drv(&hdev->vport->nic))
8826 		hclge_info_show(hdev);
8827 
8828 	return ret;
8829 
8830 init_nic_err:
8831 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8832 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8833 		msleep(HCLGE_WAIT_RESET_DONE);
8834 
8835 	client->ops->uninit_instance(&vport->nic, 0);
8836 
8837 	return ret;
8838 }
8839 
8840 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8841 					   struct hclge_vport *vport)
8842 {
8843 	struct hnae3_client *client = vport->roce.client;
8844 	struct hclge_dev *hdev = ae_dev->priv;
8845 	int rst_cnt;
8846 	int ret;
8847 
8848 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8849 	    !hdev->nic_client)
8850 		return 0;
8851 
8852 	client = hdev->roce_client;
8853 	ret = hclge_init_roce_base_info(vport);
8854 	if (ret)
8855 		return ret;
8856 
8857 	rst_cnt = hdev->rst_stats.reset_cnt;
8858 	ret = client->ops->init_instance(&vport->roce);
8859 	if (ret)
8860 		return ret;
8861 
8862 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8863 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8864 	    rst_cnt != hdev->rst_stats.reset_cnt) {
8865 		ret = -EBUSY;
8866 		goto init_roce_err;
8867 	}
8868 
8869 	/* Enable roce ras interrupts */
8870 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
8871 	if (ret) {
8872 		dev_err(&ae_dev->pdev->dev,
8873 			"fail(%d) to enable roce ras interrupts\n", ret);
8874 		goto init_roce_err;
8875 	}
8876 
8877 	hnae3_set_client_init_flag(client, ae_dev, 1);
8878 
8879 	return 0;
8880 
8881 init_roce_err:
8882 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8883 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8884 		msleep(HCLGE_WAIT_RESET_DONE);
8885 
8886 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
8887 
8888 	return ret;
8889 }
8890 
8891 static int hclge_init_client_instance(struct hnae3_client *client,
8892 				      struct hnae3_ae_dev *ae_dev)
8893 {
8894 	struct hclge_dev *hdev = ae_dev->priv;
8895 	struct hclge_vport *vport;
8896 	int i, ret;
8897 
8898 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8899 		vport = &hdev->vport[i];
8900 
8901 		switch (client->type) {
8902 		case HNAE3_CLIENT_KNIC:
8903 
8904 			hdev->nic_client = client;
8905 			vport->nic.client = client;
8906 			ret = hclge_init_nic_client_instance(ae_dev, vport);
8907 			if (ret)
8908 				goto clear_nic;
8909 
8910 			ret = hclge_init_roce_client_instance(ae_dev, vport);
8911 			if (ret)
8912 				goto clear_roce;
8913 
8914 			break;
8915 		case HNAE3_CLIENT_ROCE:
8916 			if (hnae3_dev_roce_supported(hdev)) {
8917 				hdev->roce_client = client;
8918 				vport->roce.client = client;
8919 			}
8920 
8921 			ret = hclge_init_roce_client_instance(ae_dev, vport);
8922 			if (ret)
8923 				goto clear_roce;
8924 
8925 			break;
8926 		default:
8927 			return -EINVAL;
8928 		}
8929 	}
8930 
8931 	return 0;
8932 
8933 clear_nic:
8934 	hdev->nic_client = NULL;
8935 	vport->nic.client = NULL;
8936 	return ret;
8937 clear_roce:
8938 	hdev->roce_client = NULL;
8939 	vport->roce.client = NULL;
8940 	return ret;
8941 }
8942 
8943 static void hclge_uninit_client_instance(struct hnae3_client *client,
8944 					 struct hnae3_ae_dev *ae_dev)
8945 {
8946 	struct hclge_dev *hdev = ae_dev->priv;
8947 	struct hclge_vport *vport;
8948 	int i;
8949 
8950 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8951 		vport = &hdev->vport[i];
8952 		if (hdev->roce_client) {
8953 			clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8954 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8955 				msleep(HCLGE_WAIT_RESET_DONE);
8956 
8957 			hdev->roce_client->ops->uninit_instance(&vport->roce,
8958 								0);
8959 			hdev->roce_client = NULL;
8960 			vport->roce.client = NULL;
8961 		}
8962 		if (client->type == HNAE3_CLIENT_ROCE)
8963 			return;
8964 		if (hdev->nic_client && client->ops->uninit_instance) {
8965 			clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8966 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8967 				msleep(HCLGE_WAIT_RESET_DONE);
8968 
8969 			client->ops->uninit_instance(&vport->nic, 0);
8970 			hdev->nic_client = NULL;
8971 			vport->nic.client = NULL;
8972 		}
8973 	}
8974 }
8975 
8976 static int hclge_pci_init(struct hclge_dev *hdev)
8977 {
8978 	struct pci_dev *pdev = hdev->pdev;
8979 	struct hclge_hw *hw;
8980 	int ret;
8981 
8982 	ret = pci_enable_device(pdev);
8983 	if (ret) {
8984 		dev_err(&pdev->dev, "failed to enable PCI device\n");
8985 		return ret;
8986 	}
8987 
8988 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8989 	if (ret) {
8990 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8991 		if (ret) {
8992 			dev_err(&pdev->dev,
8993 				"can't set consistent PCI DMA");
8994 			goto err_disable_device;
8995 		}
8996 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8997 	}
8998 
8999 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9000 	if (ret) {
9001 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9002 		goto err_disable_device;
9003 	}
9004 
9005 	pci_set_master(pdev);
9006 	hw = &hdev->hw;
9007 	hw->io_base = pcim_iomap(pdev, 2, 0);
9008 	if (!hw->io_base) {
9009 		dev_err(&pdev->dev, "Can't map configuration register space\n");
9010 		ret = -ENOMEM;
9011 		goto err_clr_master;
9012 	}
9013 
9014 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9015 
9016 	return 0;
9017 err_clr_master:
9018 	pci_clear_master(pdev);
9019 	pci_release_regions(pdev);
9020 err_disable_device:
9021 	pci_disable_device(pdev);
9022 
9023 	return ret;
9024 }
9025 
9026 static void hclge_pci_uninit(struct hclge_dev *hdev)
9027 {
9028 	struct pci_dev *pdev = hdev->pdev;
9029 
9030 	pcim_iounmap(pdev, hdev->hw.io_base);
9031 	pci_free_irq_vectors(pdev);
9032 	pci_clear_master(pdev);
9033 	pci_release_mem_regions(pdev);
9034 	pci_disable_device(pdev);
9035 }
9036 
9037 static void hclge_state_init(struct hclge_dev *hdev)
9038 {
9039 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9040 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9041 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9042 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9043 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9044 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9045 }
9046 
9047 static void hclge_state_uninit(struct hclge_dev *hdev)
9048 {
9049 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9050 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9051 
9052 	if (hdev->reset_timer.function)
9053 		del_timer_sync(&hdev->reset_timer);
9054 	if (hdev->service_task.work.func)
9055 		cancel_delayed_work_sync(&hdev->service_task);
9056 	if (hdev->rst_service_task.func)
9057 		cancel_work_sync(&hdev->rst_service_task);
9058 	if (hdev->mbx_service_task.func)
9059 		cancel_work_sync(&hdev->mbx_service_task);
9060 }
9061 
9062 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9063 {
9064 #define HCLGE_FLR_WAIT_MS	100
9065 #define HCLGE_FLR_WAIT_CNT	50
9066 	struct hclge_dev *hdev = ae_dev->priv;
9067 	int cnt = 0;
9068 
9069 	clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
9070 	clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9071 	set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
9072 	hclge_reset_event(hdev->pdev, NULL);
9073 
9074 	while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
9075 	       cnt++ < HCLGE_FLR_WAIT_CNT)
9076 		msleep(HCLGE_FLR_WAIT_MS);
9077 
9078 	if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
9079 		dev_err(&hdev->pdev->dev,
9080 			"flr wait down timeout: %d\n", cnt);
9081 }
9082 
9083 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9084 {
9085 	struct hclge_dev *hdev = ae_dev->priv;
9086 
9087 	set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9088 }
9089 
9090 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9091 {
9092 	u16 i;
9093 
9094 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9095 		struct hclge_vport *vport = &hdev->vport[i];
9096 		int ret;
9097 
9098 		 /* Send cmd to clear VF's FUNC_RST_ING */
9099 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9100 		if (ret)
9101 			dev_warn(&hdev->pdev->dev,
9102 				 "clear vf(%d) rst failed %d!\n",
9103 				 vport->vport_id, ret);
9104 	}
9105 }
9106 
9107 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9108 {
9109 	struct pci_dev *pdev = ae_dev->pdev;
9110 	struct hclge_dev *hdev;
9111 	int ret;
9112 
9113 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9114 	if (!hdev) {
9115 		ret = -ENOMEM;
9116 		goto out;
9117 	}
9118 
9119 	hdev->pdev = pdev;
9120 	hdev->ae_dev = ae_dev;
9121 	hdev->reset_type = HNAE3_NONE_RESET;
9122 	hdev->reset_level = HNAE3_FUNC_RESET;
9123 	ae_dev->priv = hdev;
9124 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9125 
9126 	mutex_init(&hdev->vport_lock);
9127 	mutex_init(&hdev->vport_cfg_mutex);
9128 	spin_lock_init(&hdev->fd_rule_lock);
9129 
9130 	ret = hclge_pci_init(hdev);
9131 	if (ret) {
9132 		dev_err(&pdev->dev, "PCI init failed\n");
9133 		goto out;
9134 	}
9135 
9136 	/* Firmware command queue initialize */
9137 	ret = hclge_cmd_queue_init(hdev);
9138 	if (ret) {
9139 		dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
9140 		goto err_pci_uninit;
9141 	}
9142 
9143 	/* Firmware command initialize */
9144 	ret = hclge_cmd_init(hdev);
9145 	if (ret)
9146 		goto err_cmd_uninit;
9147 
9148 	ret = hclge_get_cap(hdev);
9149 	if (ret) {
9150 		dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
9151 			ret);
9152 		goto err_cmd_uninit;
9153 	}
9154 
9155 	ret = hclge_configure(hdev);
9156 	if (ret) {
9157 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9158 		goto err_cmd_uninit;
9159 	}
9160 
9161 	ret = hclge_init_msi(hdev);
9162 	if (ret) {
9163 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9164 		goto err_cmd_uninit;
9165 	}
9166 
9167 	ret = hclge_misc_irq_init(hdev);
9168 	if (ret) {
9169 		dev_err(&pdev->dev,
9170 			"Misc IRQ(vector0) init error, ret = %d.\n",
9171 			ret);
9172 		goto err_msi_uninit;
9173 	}
9174 
9175 	ret = hclge_alloc_tqps(hdev);
9176 	if (ret) {
9177 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9178 		goto err_msi_irq_uninit;
9179 	}
9180 
9181 	ret = hclge_alloc_vport(hdev);
9182 	if (ret) {
9183 		dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
9184 		goto err_msi_irq_uninit;
9185 	}
9186 
9187 	ret = hclge_map_tqp(hdev);
9188 	if (ret) {
9189 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9190 		goto err_msi_irq_uninit;
9191 	}
9192 
9193 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9194 		ret = hclge_mac_mdio_config(hdev);
9195 		if (ret) {
9196 			dev_err(&hdev->pdev->dev,
9197 				"mdio config fail ret=%d\n", ret);
9198 			goto err_msi_irq_uninit;
9199 		}
9200 	}
9201 
9202 	ret = hclge_init_umv_space(hdev);
9203 	if (ret) {
9204 		dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9205 		goto err_mdiobus_unreg;
9206 	}
9207 
9208 	ret = hclge_mac_init(hdev);
9209 	if (ret) {
9210 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9211 		goto err_mdiobus_unreg;
9212 	}
9213 
9214 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9215 	if (ret) {
9216 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9217 		goto err_mdiobus_unreg;
9218 	}
9219 
9220 	ret = hclge_config_gro(hdev, true);
9221 	if (ret)
9222 		goto err_mdiobus_unreg;
9223 
9224 	ret = hclge_init_vlan_config(hdev);
9225 	if (ret) {
9226 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9227 		goto err_mdiobus_unreg;
9228 	}
9229 
9230 	ret = hclge_tm_schd_init(hdev);
9231 	if (ret) {
9232 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9233 		goto err_mdiobus_unreg;
9234 	}
9235 
9236 	hclge_rss_init_cfg(hdev);
9237 	ret = hclge_rss_init_hw(hdev);
9238 	if (ret) {
9239 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9240 		goto err_mdiobus_unreg;
9241 	}
9242 
9243 	ret = init_mgr_tbl(hdev);
9244 	if (ret) {
9245 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9246 		goto err_mdiobus_unreg;
9247 	}
9248 
9249 	ret = hclge_init_fd_config(hdev);
9250 	if (ret) {
9251 		dev_err(&pdev->dev,
9252 			"fd table init fail, ret=%d\n", ret);
9253 		goto err_mdiobus_unreg;
9254 	}
9255 
9256 	INIT_KFIFO(hdev->mac_tnl_log);
9257 
9258 	hclge_dcb_ops_set(hdev);
9259 
9260 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9261 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9262 	INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
9263 	INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
9264 
9265 	/* Setup affinity after service timer setup because add_timer_on
9266 	 * is called in affinity notify.
9267 	 */
9268 	hclge_misc_affinity_setup(hdev);
9269 
9270 	hclge_clear_all_event_cause(hdev);
9271 	hclge_clear_resetting_state(hdev);
9272 
9273 	/* Log and clear the hw errors those already occurred */
9274 	hclge_handle_all_hns_hw_errors(ae_dev);
9275 
9276 	/* request delayed reset for the error recovery because an immediate
9277 	 * global reset on a PF affecting pending initialization of other PFs
9278 	 */
9279 	if (ae_dev->hw_err_reset_req) {
9280 		enum hnae3_reset_type reset_level;
9281 
9282 		reset_level = hclge_get_reset_level(ae_dev,
9283 						    &ae_dev->hw_err_reset_req);
9284 		hclge_set_def_reset_request(ae_dev, reset_level);
9285 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9286 	}
9287 
9288 	/* Enable MISC vector(vector0) */
9289 	hclge_enable_vector(&hdev->misc_vector, true);
9290 
9291 	hclge_state_init(hdev);
9292 	hdev->last_reset_time = jiffies;
9293 
9294 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9295 		 HCLGE_DRIVER_NAME);
9296 
9297 	return 0;
9298 
9299 err_mdiobus_unreg:
9300 	if (hdev->hw.mac.phydev)
9301 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
9302 err_msi_irq_uninit:
9303 	hclge_misc_irq_uninit(hdev);
9304 err_msi_uninit:
9305 	pci_free_irq_vectors(pdev);
9306 err_cmd_uninit:
9307 	hclge_cmd_uninit(hdev);
9308 err_pci_uninit:
9309 	pcim_iounmap(pdev, hdev->hw.io_base);
9310 	pci_clear_master(pdev);
9311 	pci_release_regions(pdev);
9312 	pci_disable_device(pdev);
9313 out:
9314 	return ret;
9315 }
9316 
9317 static void hclge_stats_clear(struct hclge_dev *hdev)
9318 {
9319 	memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
9320 }
9321 
9322 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9323 {
9324 	struct hclge_vport *vport = hdev->vport;
9325 	int i;
9326 
9327 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9328 		hclge_vport_stop(vport);
9329 		vport++;
9330 	}
9331 }
9332 
9333 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9334 {
9335 	struct hclge_dev *hdev = ae_dev->priv;
9336 	struct pci_dev *pdev = ae_dev->pdev;
9337 	int ret;
9338 
9339 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9340 
9341 	hclge_stats_clear(hdev);
9342 	memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9343 	memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9344 
9345 	ret = hclge_cmd_init(hdev);
9346 	if (ret) {
9347 		dev_err(&pdev->dev, "Cmd queue init failed\n");
9348 		return ret;
9349 	}
9350 
9351 	ret = hclge_map_tqp(hdev);
9352 	if (ret) {
9353 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9354 		return ret;
9355 	}
9356 
9357 	hclge_reset_umv_space(hdev);
9358 
9359 	ret = hclge_mac_init(hdev);
9360 	if (ret) {
9361 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9362 		return ret;
9363 	}
9364 
9365 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9366 	if (ret) {
9367 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9368 		return ret;
9369 	}
9370 
9371 	ret = hclge_config_gro(hdev, true);
9372 	if (ret)
9373 		return ret;
9374 
9375 	ret = hclge_init_vlan_config(hdev);
9376 	if (ret) {
9377 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9378 		return ret;
9379 	}
9380 
9381 	ret = hclge_tm_init_hw(hdev, true);
9382 	if (ret) {
9383 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9384 		return ret;
9385 	}
9386 
9387 	ret = hclge_rss_init_hw(hdev);
9388 	if (ret) {
9389 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9390 		return ret;
9391 	}
9392 
9393 	ret = hclge_init_fd_config(hdev);
9394 	if (ret) {
9395 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9396 		return ret;
9397 	}
9398 
9399 	/* Re-enable the hw error interrupts because
9400 	 * the interrupts get disabled on global reset.
9401 	 */
9402 	ret = hclge_config_nic_hw_error(hdev, true);
9403 	if (ret) {
9404 		dev_err(&pdev->dev,
9405 			"fail(%d) to re-enable NIC hw error interrupts\n",
9406 			ret);
9407 		return ret;
9408 	}
9409 
9410 	if (hdev->roce_client) {
9411 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
9412 		if (ret) {
9413 			dev_err(&pdev->dev,
9414 				"fail(%d) to re-enable roce ras interrupts\n",
9415 				ret);
9416 			return ret;
9417 		}
9418 	}
9419 
9420 	hclge_reset_vport_state(hdev);
9421 
9422 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9423 		 HCLGE_DRIVER_NAME);
9424 
9425 	return 0;
9426 }
9427 
9428 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9429 {
9430 	struct hclge_dev *hdev = ae_dev->priv;
9431 	struct hclge_mac *mac = &hdev->hw.mac;
9432 
9433 	hclge_misc_affinity_teardown(hdev);
9434 	hclge_state_uninit(hdev);
9435 
9436 	if (mac->phydev)
9437 		mdiobus_unregister(mac->mdio_bus);
9438 
9439 	hclge_uninit_umv_space(hdev);
9440 
9441 	/* Disable MISC vector(vector0) */
9442 	hclge_enable_vector(&hdev->misc_vector, false);
9443 	synchronize_irq(hdev->misc_vector.vector_irq);
9444 
9445 	/* Disable all hw interrupts */
9446 	hclge_config_mac_tnl_int(hdev, false);
9447 	hclge_config_nic_hw_error(hdev, false);
9448 	hclge_config_rocee_ras_interrupt(hdev, false);
9449 
9450 	hclge_cmd_uninit(hdev);
9451 	hclge_misc_irq_uninit(hdev);
9452 	hclge_pci_uninit(hdev);
9453 	mutex_destroy(&hdev->vport_lock);
9454 	hclge_uninit_vport_mac_table(hdev);
9455 	hclge_uninit_vport_vlan_table(hdev);
9456 	mutex_destroy(&hdev->vport_cfg_mutex);
9457 	ae_dev->priv = NULL;
9458 }
9459 
9460 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9461 {
9462 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9463 	struct hclge_vport *vport = hclge_get_vport(handle);
9464 	struct hclge_dev *hdev = vport->back;
9465 
9466 	return min_t(u32, hdev->rss_size_max,
9467 		     vport->alloc_tqps / kinfo->num_tc);
9468 }
9469 
9470 static void hclge_get_channels(struct hnae3_handle *handle,
9471 			       struct ethtool_channels *ch)
9472 {
9473 	ch->max_combined = hclge_get_max_channels(handle);
9474 	ch->other_count = 1;
9475 	ch->max_other = 1;
9476 	ch->combined_count = handle->kinfo.rss_size;
9477 }
9478 
9479 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9480 					u16 *alloc_tqps, u16 *max_rss_size)
9481 {
9482 	struct hclge_vport *vport = hclge_get_vport(handle);
9483 	struct hclge_dev *hdev = vport->back;
9484 
9485 	*alloc_tqps = vport->alloc_tqps;
9486 	*max_rss_size = hdev->rss_size_max;
9487 }
9488 
9489 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9490 			      bool rxfh_configured)
9491 {
9492 	struct hclge_vport *vport = hclge_get_vport(handle);
9493 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9494 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9495 	struct hclge_dev *hdev = vport->back;
9496 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9497 	int cur_rss_size = kinfo->rss_size;
9498 	int cur_tqps = kinfo->num_tqps;
9499 	u16 tc_valid[HCLGE_MAX_TC_NUM];
9500 	u16 roundup_size;
9501 	u32 *rss_indir;
9502 	unsigned int i;
9503 	int ret;
9504 
9505 	kinfo->req_rss_size = new_tqps_num;
9506 
9507 	ret = hclge_tm_vport_map_update(hdev);
9508 	if (ret) {
9509 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9510 		return ret;
9511 	}
9512 
9513 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
9514 	roundup_size = ilog2(roundup_size);
9515 	/* Set the RSS TC mode according to the new RSS size */
9516 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9517 		tc_valid[i] = 0;
9518 
9519 		if (!(hdev->hw_tc_map & BIT(i)))
9520 			continue;
9521 
9522 		tc_valid[i] = 1;
9523 		tc_size[i] = roundup_size;
9524 		tc_offset[i] = kinfo->rss_size * i;
9525 	}
9526 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9527 	if (ret)
9528 		return ret;
9529 
9530 	/* RSS indirection table has been configuared by user */
9531 	if (rxfh_configured)
9532 		goto out;
9533 
9534 	/* Reinitializes the rss indirect table according to the new RSS size */
9535 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9536 	if (!rss_indir)
9537 		return -ENOMEM;
9538 
9539 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9540 		rss_indir[i] = i % kinfo->rss_size;
9541 
9542 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9543 	if (ret)
9544 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9545 			ret);
9546 
9547 	kfree(rss_indir);
9548 
9549 out:
9550 	if (!ret)
9551 		dev_info(&hdev->pdev->dev,
9552 			 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
9553 			 cur_rss_size, kinfo->rss_size,
9554 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
9555 
9556 	return ret;
9557 }
9558 
9559 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
9560 			      u32 *regs_num_64_bit)
9561 {
9562 	struct hclge_desc desc;
9563 	u32 total_num;
9564 	int ret;
9565 
9566 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
9567 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9568 	if (ret) {
9569 		dev_err(&hdev->pdev->dev,
9570 			"Query register number cmd failed, ret = %d.\n", ret);
9571 		return ret;
9572 	}
9573 
9574 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
9575 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
9576 
9577 	total_num = *regs_num_32_bit + *regs_num_64_bit;
9578 	if (!total_num)
9579 		return -EINVAL;
9580 
9581 	return 0;
9582 }
9583 
9584 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9585 				 void *data)
9586 {
9587 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
9588 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
9589 
9590 	struct hclge_desc *desc;
9591 	u32 *reg_val = data;
9592 	__le32 *desc_data;
9593 	int nodata_num;
9594 	int cmd_num;
9595 	int i, k, n;
9596 	int ret;
9597 
9598 	if (regs_num == 0)
9599 		return 0;
9600 
9601 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
9602 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
9603 			       HCLGE_32_BIT_REG_RTN_DATANUM);
9604 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9605 	if (!desc)
9606 		return -ENOMEM;
9607 
9608 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
9609 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9610 	if (ret) {
9611 		dev_err(&hdev->pdev->dev,
9612 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
9613 		kfree(desc);
9614 		return ret;
9615 	}
9616 
9617 	for (i = 0; i < cmd_num; i++) {
9618 		if (i == 0) {
9619 			desc_data = (__le32 *)(&desc[i].data[0]);
9620 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
9621 		} else {
9622 			desc_data = (__le32 *)(&desc[i]);
9623 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
9624 		}
9625 		for (k = 0; k < n; k++) {
9626 			*reg_val++ = le32_to_cpu(*desc_data++);
9627 
9628 			regs_num--;
9629 			if (!regs_num)
9630 				break;
9631 		}
9632 	}
9633 
9634 	kfree(desc);
9635 	return 0;
9636 }
9637 
9638 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9639 				 void *data)
9640 {
9641 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
9642 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
9643 
9644 	struct hclge_desc *desc;
9645 	u64 *reg_val = data;
9646 	__le64 *desc_data;
9647 	int nodata_len;
9648 	int cmd_num;
9649 	int i, k, n;
9650 	int ret;
9651 
9652 	if (regs_num == 0)
9653 		return 0;
9654 
9655 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
9656 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
9657 			       HCLGE_64_BIT_REG_RTN_DATANUM);
9658 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9659 	if (!desc)
9660 		return -ENOMEM;
9661 
9662 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9663 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9664 	if (ret) {
9665 		dev_err(&hdev->pdev->dev,
9666 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
9667 		kfree(desc);
9668 		return ret;
9669 	}
9670 
9671 	for (i = 0; i < cmd_num; i++) {
9672 		if (i == 0) {
9673 			desc_data = (__le64 *)(&desc[i].data[0]);
9674 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9675 		} else {
9676 			desc_data = (__le64 *)(&desc[i]);
9677 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
9678 		}
9679 		for (k = 0; k < n; k++) {
9680 			*reg_val++ = le64_to_cpu(*desc_data++);
9681 
9682 			regs_num--;
9683 			if (!regs_num)
9684 				break;
9685 		}
9686 	}
9687 
9688 	kfree(desc);
9689 	return 0;
9690 }
9691 
9692 #define MAX_SEPARATE_NUM	4
9693 #define SEPARATOR_VALUE		0xFDFCFBFA
9694 #define REG_NUM_PER_LINE	4
9695 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
9696 #define REG_SEPARATOR_LINE	1
9697 #define REG_NUM_REMAIN_MASK	3
9698 #define BD_LIST_MAX_NUM		30
9699 
9700 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
9701 {
9702 	/*prepare 4 commands to query DFX BD number*/
9703 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
9704 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9705 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
9706 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9707 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
9708 	desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9709 	hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
9710 
9711 	return hclge_cmd_send(&hdev->hw, desc, 4);
9712 }
9713 
9714 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
9715 				    int *bd_num_list,
9716 				    u32 type_num)
9717 {
9718 #define HCLGE_DFX_REG_BD_NUM	4
9719 
9720 	u32 entries_per_desc, desc_index, index, offset, i;
9721 	struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
9722 	int ret;
9723 
9724 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
9725 	if (ret) {
9726 		dev_err(&hdev->pdev->dev,
9727 			"Get dfx bd num fail, status is %d.\n", ret);
9728 		return ret;
9729 	}
9730 
9731 	entries_per_desc = ARRAY_SIZE(desc[0].data);
9732 	for (i = 0; i < type_num; i++) {
9733 		offset = hclge_dfx_bd_offset_list[i];
9734 		index = offset % entries_per_desc;
9735 		desc_index = offset / entries_per_desc;
9736 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
9737 	}
9738 
9739 	return ret;
9740 }
9741 
9742 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
9743 				  struct hclge_desc *desc_src, int bd_num,
9744 				  enum hclge_opcode_type cmd)
9745 {
9746 	struct hclge_desc *desc = desc_src;
9747 	int i, ret;
9748 
9749 	hclge_cmd_setup_basic_desc(desc, cmd, true);
9750 	for (i = 0; i < bd_num - 1; i++) {
9751 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9752 		desc++;
9753 		hclge_cmd_setup_basic_desc(desc, cmd, true);
9754 	}
9755 
9756 	desc = desc_src;
9757 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
9758 	if (ret)
9759 		dev_err(&hdev->pdev->dev,
9760 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
9761 			cmd, ret);
9762 
9763 	return ret;
9764 }
9765 
9766 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
9767 				    void *data)
9768 {
9769 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
9770 	struct hclge_desc *desc = desc_src;
9771 	u32 *reg = data;
9772 
9773 	entries_per_desc = ARRAY_SIZE(desc->data);
9774 	reg_num = entries_per_desc * bd_num;
9775 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
9776 	for (i = 0; i < reg_num; i++) {
9777 		index = i % entries_per_desc;
9778 		desc_index = i / entries_per_desc;
9779 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
9780 	}
9781 	for (i = 0; i < separator_num; i++)
9782 		*reg++ = SEPARATOR_VALUE;
9783 
9784 	return reg_num + separator_num;
9785 }
9786 
9787 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
9788 {
9789 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9790 	int data_len_per_desc, data_len, bd_num, i;
9791 	int bd_num_list[BD_LIST_MAX_NUM];
9792 	int ret;
9793 
9794 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
9795 	if (ret) {
9796 		dev_err(&hdev->pdev->dev,
9797 			"Get dfx reg bd num fail, status is %d.\n", ret);
9798 		return ret;
9799 	}
9800 
9801 	data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
9802 	*len = 0;
9803 	for (i = 0; i < dfx_reg_type_num; i++) {
9804 		bd_num = bd_num_list[i];
9805 		data_len = data_len_per_desc * bd_num;
9806 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
9807 	}
9808 
9809 	return ret;
9810 }
9811 
9812 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
9813 {
9814 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
9815 	int bd_num, bd_num_max, buf_len, i;
9816 	int bd_num_list[BD_LIST_MAX_NUM];
9817 	struct hclge_desc *desc_src;
9818 	u32 *reg = data;
9819 	int ret;
9820 
9821 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
9822 	if (ret) {
9823 		dev_err(&hdev->pdev->dev,
9824 			"Get dfx reg bd num fail, status is %d.\n", ret);
9825 		return ret;
9826 	}
9827 
9828 	bd_num_max = bd_num_list[0];
9829 	for (i = 1; i < dfx_reg_type_num; i++)
9830 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
9831 
9832 	buf_len = sizeof(*desc_src) * bd_num_max;
9833 	desc_src = kzalloc(buf_len, GFP_KERNEL);
9834 	if (!desc_src) {
9835 		dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
9836 		return -ENOMEM;
9837 	}
9838 
9839 	for (i = 0; i < dfx_reg_type_num; i++) {
9840 		bd_num = bd_num_list[i];
9841 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
9842 					     hclge_dfx_reg_opcode_list[i]);
9843 		if (ret) {
9844 			dev_err(&hdev->pdev->dev,
9845 				"Get dfx reg fail, status is %d.\n", ret);
9846 			break;
9847 		}
9848 
9849 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
9850 	}
9851 
9852 	kfree(desc_src);
9853 	return ret;
9854 }
9855 
9856 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
9857 			      struct hnae3_knic_private_info *kinfo)
9858 {
9859 #define HCLGE_RING_REG_OFFSET		0x200
9860 #define HCLGE_RING_INT_REG_OFFSET	0x4
9861 
9862 	int i, j, reg_num, separator_num;
9863 	int data_num_sum;
9864 	u32 *reg = data;
9865 
9866 	/* fetching per-PF registers valus from PF PCIe register space */
9867 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
9868 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9869 	for (i = 0; i < reg_num; i++)
9870 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9871 	for (i = 0; i < separator_num; i++)
9872 		*reg++ = SEPARATOR_VALUE;
9873 	data_num_sum = reg_num + separator_num;
9874 
9875 	reg_num = ARRAY_SIZE(common_reg_addr_list);
9876 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9877 	for (i = 0; i < reg_num; i++)
9878 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9879 	for (i = 0; i < separator_num; i++)
9880 		*reg++ = SEPARATOR_VALUE;
9881 	data_num_sum += reg_num + separator_num;
9882 
9883 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
9884 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9885 	for (j = 0; j < kinfo->num_tqps; j++) {
9886 		for (i = 0; i < reg_num; i++)
9887 			*reg++ = hclge_read_dev(&hdev->hw,
9888 						ring_reg_addr_list[i] +
9889 						HCLGE_RING_REG_OFFSET * j);
9890 		for (i = 0; i < separator_num; i++)
9891 			*reg++ = SEPARATOR_VALUE;
9892 	}
9893 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
9894 
9895 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
9896 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9897 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
9898 		for (i = 0; i < reg_num; i++)
9899 			*reg++ = hclge_read_dev(&hdev->hw,
9900 						tqp_intr_reg_addr_list[i] +
9901 						HCLGE_RING_INT_REG_OFFSET * j);
9902 		for (i = 0; i < separator_num; i++)
9903 			*reg++ = SEPARATOR_VALUE;
9904 	}
9905 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
9906 
9907 	return data_num_sum;
9908 }
9909 
9910 static int hclge_get_regs_len(struct hnae3_handle *handle)
9911 {
9912 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9913 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9914 	struct hclge_vport *vport = hclge_get_vport(handle);
9915 	struct hclge_dev *hdev = vport->back;
9916 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
9917 	int regs_lines_32_bit, regs_lines_64_bit;
9918 	int ret;
9919 
9920 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9921 	if (ret) {
9922 		dev_err(&hdev->pdev->dev,
9923 			"Get register number failed, ret = %d.\n", ret);
9924 		return ret;
9925 	}
9926 
9927 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
9928 	if (ret) {
9929 		dev_err(&hdev->pdev->dev,
9930 			"Get dfx reg len failed, ret = %d.\n", ret);
9931 		return ret;
9932 	}
9933 
9934 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
9935 		REG_SEPARATOR_LINE;
9936 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
9937 		REG_SEPARATOR_LINE;
9938 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
9939 		REG_SEPARATOR_LINE;
9940 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
9941 		REG_SEPARATOR_LINE;
9942 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
9943 		REG_SEPARATOR_LINE;
9944 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
9945 		REG_SEPARATOR_LINE;
9946 
9947 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9948 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
9949 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
9950 }
9951 
9952 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9953 			   void *data)
9954 {
9955 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9956 	struct hclge_vport *vport = hclge_get_vport(handle);
9957 	struct hclge_dev *hdev = vport->back;
9958 	u32 regs_num_32_bit, regs_num_64_bit;
9959 	int i, reg_num, separator_num, ret;
9960 	u32 *reg = data;
9961 
9962 	*version = hdev->fw_version;
9963 
9964 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9965 	if (ret) {
9966 		dev_err(&hdev->pdev->dev,
9967 			"Get register number failed, ret = %d.\n", ret);
9968 		return;
9969 	}
9970 
9971 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
9972 
9973 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9974 	if (ret) {
9975 		dev_err(&hdev->pdev->dev,
9976 			"Get 32 bit register failed, ret = %d.\n", ret);
9977 		return;
9978 	}
9979 	reg_num = regs_num_32_bit;
9980 	reg += reg_num;
9981 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9982 	for (i = 0; i < separator_num; i++)
9983 		*reg++ = SEPARATOR_VALUE;
9984 
9985 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9986 	if (ret) {
9987 		dev_err(&hdev->pdev->dev,
9988 			"Get 64 bit register failed, ret = %d.\n", ret);
9989 		return;
9990 	}
9991 	reg_num = regs_num_64_bit * 2;
9992 	reg += reg_num;
9993 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
9994 	for (i = 0; i < separator_num; i++)
9995 		*reg++ = SEPARATOR_VALUE;
9996 
9997 	ret = hclge_get_dfx_reg(hdev, reg);
9998 	if (ret)
9999 		dev_err(&hdev->pdev->dev,
10000 			"Get dfx register failed, ret = %d.\n", ret);
10001 }
10002 
10003 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10004 {
10005 	struct hclge_set_led_state_cmd *req;
10006 	struct hclge_desc desc;
10007 	int ret;
10008 
10009 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10010 
10011 	req = (struct hclge_set_led_state_cmd *)desc.data;
10012 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10013 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10014 
10015 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10016 	if (ret)
10017 		dev_err(&hdev->pdev->dev,
10018 			"Send set led state cmd error, ret =%d\n", ret);
10019 
10020 	return ret;
10021 }
10022 
10023 enum hclge_led_status {
10024 	HCLGE_LED_OFF,
10025 	HCLGE_LED_ON,
10026 	HCLGE_LED_NO_CHANGE = 0xFF,
10027 };
10028 
10029 static int hclge_set_led_id(struct hnae3_handle *handle,
10030 			    enum ethtool_phys_id_state status)
10031 {
10032 	struct hclge_vport *vport = hclge_get_vport(handle);
10033 	struct hclge_dev *hdev = vport->back;
10034 
10035 	switch (status) {
10036 	case ETHTOOL_ID_ACTIVE:
10037 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
10038 	case ETHTOOL_ID_INACTIVE:
10039 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10040 	default:
10041 		return -EINVAL;
10042 	}
10043 }
10044 
10045 static void hclge_get_link_mode(struct hnae3_handle *handle,
10046 				unsigned long *supported,
10047 				unsigned long *advertising)
10048 {
10049 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10050 	struct hclge_vport *vport = hclge_get_vport(handle);
10051 	struct hclge_dev *hdev = vport->back;
10052 	unsigned int idx = 0;
10053 
10054 	for (; idx < size; idx++) {
10055 		supported[idx] = hdev->hw.mac.supported[idx];
10056 		advertising[idx] = hdev->hw.mac.advertising[idx];
10057 	}
10058 }
10059 
10060 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10061 {
10062 	struct hclge_vport *vport = hclge_get_vport(handle);
10063 	struct hclge_dev *hdev = vport->back;
10064 
10065 	return hclge_config_gro(hdev, enable);
10066 }
10067 
10068 static const struct hnae3_ae_ops hclge_ops = {
10069 	.init_ae_dev = hclge_init_ae_dev,
10070 	.uninit_ae_dev = hclge_uninit_ae_dev,
10071 	.flr_prepare = hclge_flr_prepare,
10072 	.flr_done = hclge_flr_done,
10073 	.init_client_instance = hclge_init_client_instance,
10074 	.uninit_client_instance = hclge_uninit_client_instance,
10075 	.map_ring_to_vector = hclge_map_ring_to_vector,
10076 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10077 	.get_vector = hclge_get_vector,
10078 	.put_vector = hclge_put_vector,
10079 	.set_promisc_mode = hclge_set_promisc_mode,
10080 	.set_loopback = hclge_set_loopback,
10081 	.start = hclge_ae_start,
10082 	.stop = hclge_ae_stop,
10083 	.client_start = hclge_client_start,
10084 	.client_stop = hclge_client_stop,
10085 	.get_status = hclge_get_status,
10086 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
10087 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10088 	.get_media_type = hclge_get_media_type,
10089 	.check_port_speed = hclge_check_port_speed,
10090 	.get_fec = hclge_get_fec,
10091 	.set_fec = hclge_set_fec,
10092 	.get_rss_key_size = hclge_get_rss_key_size,
10093 	.get_rss_indir_size = hclge_get_rss_indir_size,
10094 	.get_rss = hclge_get_rss,
10095 	.set_rss = hclge_set_rss,
10096 	.set_rss_tuple = hclge_set_rss_tuple,
10097 	.get_rss_tuple = hclge_get_rss_tuple,
10098 	.get_tc_size = hclge_get_tc_size,
10099 	.get_mac_addr = hclge_get_mac_addr,
10100 	.set_mac_addr = hclge_set_mac_addr,
10101 	.do_ioctl = hclge_do_ioctl,
10102 	.add_uc_addr = hclge_add_uc_addr,
10103 	.rm_uc_addr = hclge_rm_uc_addr,
10104 	.add_mc_addr = hclge_add_mc_addr,
10105 	.rm_mc_addr = hclge_rm_mc_addr,
10106 	.set_autoneg = hclge_set_autoneg,
10107 	.get_autoneg = hclge_get_autoneg,
10108 	.restart_autoneg = hclge_restart_autoneg,
10109 	.halt_autoneg = hclge_halt_autoneg,
10110 	.get_pauseparam = hclge_get_pauseparam,
10111 	.set_pauseparam = hclge_set_pauseparam,
10112 	.set_mtu = hclge_set_mtu,
10113 	.reset_queue = hclge_reset_tqp,
10114 	.get_stats = hclge_get_stats,
10115 	.get_mac_stats = hclge_get_mac_stat,
10116 	.update_stats = hclge_update_stats,
10117 	.get_strings = hclge_get_strings,
10118 	.get_sset_count = hclge_get_sset_count,
10119 	.get_fw_version = hclge_get_fw_version,
10120 	.get_mdix_mode = hclge_get_mdix_mode,
10121 	.enable_vlan_filter = hclge_enable_vlan_filter,
10122 	.set_vlan_filter = hclge_set_vlan_filter,
10123 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10124 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10125 	.reset_event = hclge_reset_event,
10126 	.get_reset_level = hclge_get_reset_level,
10127 	.set_default_reset_request = hclge_set_def_reset_request,
10128 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10129 	.set_channels = hclge_set_channels,
10130 	.get_channels = hclge_get_channels,
10131 	.get_regs_len = hclge_get_regs_len,
10132 	.get_regs = hclge_get_regs,
10133 	.set_led_id = hclge_set_led_id,
10134 	.get_link_mode = hclge_get_link_mode,
10135 	.add_fd_entry = hclge_add_fd_entry,
10136 	.del_fd_entry = hclge_del_fd_entry,
10137 	.del_all_fd_entries = hclge_del_all_fd_entries,
10138 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10139 	.get_fd_rule_info = hclge_get_fd_rule_info,
10140 	.get_fd_all_rules = hclge_get_all_rules,
10141 	.restore_fd_rules = hclge_restore_fd_entries,
10142 	.enable_fd = hclge_enable_fd,
10143 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
10144 	.dbg_run_cmd = hclge_dbg_run_cmd,
10145 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
10146 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
10147 	.ae_dev_resetting = hclge_ae_dev_resetting,
10148 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10149 	.set_gro_en = hclge_gro_en,
10150 	.get_global_queue_id = hclge_covert_handle_qid_global,
10151 	.set_timer_task = hclge_set_timer_task,
10152 	.mac_connect_phy = hclge_mac_connect_phy,
10153 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
10154 	.restore_vlan_table = hclge_restore_vlan_table,
10155 };
10156 
10157 static struct hnae3_ae_algo ae_algo = {
10158 	.ops = &hclge_ops,
10159 	.pdev_id_table = ae_algo_pci_tbl,
10160 };
10161 
10162 static int hclge_init(void)
10163 {
10164 	pr_info("%s is initializing\n", HCLGE_NAME);
10165 
10166 	hnae3_register_ae_algo(&ae_algo);
10167 
10168 	return 0;
10169 }
10170 
10171 static void hclge_exit(void)
10172 {
10173 	hnae3_unregister_ae_algo(&ae_algo);
10174 }
10175 module_init(hclge_init);
10176 module_exit(hclge_exit);
10177 
10178 MODULE_LICENSE("GPL");
10179 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10180 MODULE_DESCRIPTION("HCLGE Driver");
10181 MODULE_VERSION(HCLGE_MOD_VERSION);
10182