1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256U
31 #define HCLGE_BUF_MUL_BY	2
32 #define HCLGE_BUF_DIV_BY	2
33 #define NEED_RESERVE_TC_NUM	2
34 #define BUF_MAX_PERCENT		100
35 #define BUF_RESERVE_PERCENT	90
36 
37 #define HCLGE_RESET_MAX_FAIL_CNT	5
38 #define HCLGE_RESET_SYNC_TIME		100
39 #define HCLGE_PF_RESET_SYNC_TIME	20
40 #define HCLGE_PF_RESET_SYNC_CNT		1500
41 
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55 
56 #define HCLGE_LINK_STATUS_MS	10
57 
58 #define HCLGE_VF_VPORT_START_NUM	1
59 
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66 			       u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
70 						   unsigned long *addr);
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
72 
73 static struct hnae3_ae_algo ae_algo;
74 
75 static struct workqueue_struct *hclge_wq;
76 
77 static const struct pci_device_id ae_algo_pci_tbl[] = {
78 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
79 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
85 	/* required last entry */
86 	{0, }
87 };
88 
89 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
90 
91 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
92 					 HCLGE_CMDQ_TX_ADDR_H_REG,
93 					 HCLGE_CMDQ_TX_DEPTH_REG,
94 					 HCLGE_CMDQ_TX_TAIL_REG,
95 					 HCLGE_CMDQ_TX_HEAD_REG,
96 					 HCLGE_CMDQ_RX_ADDR_L_REG,
97 					 HCLGE_CMDQ_RX_ADDR_H_REG,
98 					 HCLGE_CMDQ_RX_DEPTH_REG,
99 					 HCLGE_CMDQ_RX_TAIL_REG,
100 					 HCLGE_CMDQ_RX_HEAD_REG,
101 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
102 					 HCLGE_CMDQ_INTR_STS_REG,
103 					 HCLGE_CMDQ_INTR_EN_REG,
104 					 HCLGE_CMDQ_INTR_GEN_REG};
105 
106 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
107 					   HCLGE_VECTOR0_OTER_EN_REG,
108 					   HCLGE_MISC_RESET_STS_REG,
109 					   HCLGE_MISC_VECTOR_INT_STS,
110 					   HCLGE_GLOBAL_RESET_REG,
111 					   HCLGE_FUN_RST_ING,
112 					   HCLGE_GRO_EN_REG};
113 
114 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
115 					 HCLGE_RING_RX_ADDR_H_REG,
116 					 HCLGE_RING_RX_BD_NUM_REG,
117 					 HCLGE_RING_RX_BD_LENGTH_REG,
118 					 HCLGE_RING_RX_MERGE_EN_REG,
119 					 HCLGE_RING_RX_TAIL_REG,
120 					 HCLGE_RING_RX_HEAD_REG,
121 					 HCLGE_RING_RX_FBD_NUM_REG,
122 					 HCLGE_RING_RX_OFFSET_REG,
123 					 HCLGE_RING_RX_FBD_OFFSET_REG,
124 					 HCLGE_RING_RX_STASH_REG,
125 					 HCLGE_RING_RX_BD_ERR_REG,
126 					 HCLGE_RING_TX_ADDR_L_REG,
127 					 HCLGE_RING_TX_ADDR_H_REG,
128 					 HCLGE_RING_TX_BD_NUM_REG,
129 					 HCLGE_RING_TX_PRIORITY_REG,
130 					 HCLGE_RING_TX_TC_REG,
131 					 HCLGE_RING_TX_MERGE_EN_REG,
132 					 HCLGE_RING_TX_TAIL_REG,
133 					 HCLGE_RING_TX_HEAD_REG,
134 					 HCLGE_RING_TX_FBD_NUM_REG,
135 					 HCLGE_RING_TX_OFFSET_REG,
136 					 HCLGE_RING_TX_EBD_NUM_REG,
137 					 HCLGE_RING_TX_EBD_OFFSET_REG,
138 					 HCLGE_RING_TX_BD_ERR_REG,
139 					 HCLGE_RING_EN_REG};
140 
141 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
142 					     HCLGE_TQP_INTR_GL0_REG,
143 					     HCLGE_TQP_INTR_GL1_REG,
144 					     HCLGE_TQP_INTR_GL2_REG,
145 					     HCLGE_TQP_INTR_RL_REG};
146 
147 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
148 	"App    Loopback test",
149 	"Serdes serial Loopback test",
150 	"Serdes parallel Loopback test",
151 	"Phy    Loopback test"
152 };
153 
154 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
155 	{"mac_tx_mac_pause_num",
156 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
157 	{"mac_rx_mac_pause_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
159 	{"mac_tx_control_pkt_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
161 	{"mac_rx_control_pkt_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
163 	{"mac_tx_pfc_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
165 	{"mac_tx_pfc_pri0_pkt_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
167 	{"mac_tx_pfc_pri1_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
169 	{"mac_tx_pfc_pri2_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
171 	{"mac_tx_pfc_pri3_pkt_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
173 	{"mac_tx_pfc_pri4_pkt_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
175 	{"mac_tx_pfc_pri5_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
177 	{"mac_tx_pfc_pri6_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
179 	{"mac_tx_pfc_pri7_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
181 	{"mac_rx_pfc_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
183 	{"mac_rx_pfc_pri0_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
185 	{"mac_rx_pfc_pri1_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
187 	{"mac_rx_pfc_pri2_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
189 	{"mac_rx_pfc_pri3_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
191 	{"mac_rx_pfc_pri4_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
193 	{"mac_rx_pfc_pri5_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
195 	{"mac_rx_pfc_pri6_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
197 	{"mac_rx_pfc_pri7_pkt_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
199 	{"mac_tx_total_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
201 	{"mac_tx_total_oct_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
203 	{"mac_tx_good_pkt_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
205 	{"mac_tx_bad_pkt_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
207 	{"mac_tx_good_oct_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
209 	{"mac_tx_bad_oct_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
211 	{"mac_tx_uni_pkt_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
213 	{"mac_tx_multi_pkt_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
215 	{"mac_tx_broad_pkt_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
217 	{"mac_tx_undersize_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
219 	{"mac_tx_oversize_pkt_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
221 	{"mac_tx_64_oct_pkt_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
223 	{"mac_tx_65_127_oct_pkt_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
225 	{"mac_tx_128_255_oct_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
227 	{"mac_tx_256_511_oct_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
229 	{"mac_tx_512_1023_oct_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
231 	{"mac_tx_1024_1518_oct_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
233 	{"mac_tx_1519_2047_oct_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
235 	{"mac_tx_2048_4095_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
237 	{"mac_tx_4096_8191_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
239 	{"mac_tx_8192_9216_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
241 	{"mac_tx_9217_12287_oct_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
243 	{"mac_tx_12288_16383_oct_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
245 	{"mac_tx_1519_max_good_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
247 	{"mac_tx_1519_max_bad_pkt_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
249 	{"mac_rx_total_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
251 	{"mac_rx_total_oct_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
253 	{"mac_rx_good_pkt_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
255 	{"mac_rx_bad_pkt_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
257 	{"mac_rx_good_oct_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
259 	{"mac_rx_bad_oct_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
261 	{"mac_rx_uni_pkt_num",
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
263 	{"mac_rx_multi_pkt_num",
264 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
265 	{"mac_rx_broad_pkt_num",
266 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
267 	{"mac_rx_undersize_pkt_num",
268 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
269 	{"mac_rx_oversize_pkt_num",
270 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
271 	{"mac_rx_64_oct_pkt_num",
272 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
273 	{"mac_rx_65_127_oct_pkt_num",
274 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
275 	{"mac_rx_128_255_oct_pkt_num",
276 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
277 	{"mac_rx_256_511_oct_pkt_num",
278 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
279 	{"mac_rx_512_1023_oct_pkt_num",
280 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
281 	{"mac_rx_1024_1518_oct_pkt_num",
282 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
283 	{"mac_rx_1519_2047_oct_pkt_num",
284 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
285 	{"mac_rx_2048_4095_oct_pkt_num",
286 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
287 	{"mac_rx_4096_8191_oct_pkt_num",
288 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
289 	{"mac_rx_8192_9216_oct_pkt_num",
290 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
291 	{"mac_rx_9217_12287_oct_pkt_num",
292 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
293 	{"mac_rx_12288_16383_oct_pkt_num",
294 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
295 	{"mac_rx_1519_max_good_pkt_num",
296 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
297 	{"mac_rx_1519_max_bad_pkt_num",
298 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
299 
300 	{"mac_tx_fragment_pkt_num",
301 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
302 	{"mac_tx_undermin_pkt_num",
303 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
304 	{"mac_tx_jabber_pkt_num",
305 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
306 	{"mac_tx_err_all_pkt_num",
307 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
308 	{"mac_tx_from_app_good_pkt_num",
309 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
310 	{"mac_tx_from_app_bad_pkt_num",
311 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
312 	{"mac_rx_fragment_pkt_num",
313 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
314 	{"mac_rx_undermin_pkt_num",
315 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
316 	{"mac_rx_jabber_pkt_num",
317 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
318 	{"mac_rx_fcs_err_pkt_num",
319 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
320 	{"mac_rx_send_app_good_pkt_num",
321 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
322 	{"mac_rx_send_app_bad_pkt_num",
323 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
324 };
325 
326 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
327 	{
328 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
329 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
330 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
331 		.i_port_bitmap = 0x1,
332 	},
333 };
334 
335 static const u8 hclge_hash_key[] = {
336 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
337 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
338 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
339 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
340 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
341 };
342 
343 static const u32 hclge_dfx_bd_offset_list[] = {
344 	HCLGE_DFX_BIOS_BD_OFFSET,
345 	HCLGE_DFX_SSU_0_BD_OFFSET,
346 	HCLGE_DFX_SSU_1_BD_OFFSET,
347 	HCLGE_DFX_IGU_BD_OFFSET,
348 	HCLGE_DFX_RPU_0_BD_OFFSET,
349 	HCLGE_DFX_RPU_1_BD_OFFSET,
350 	HCLGE_DFX_NCSI_BD_OFFSET,
351 	HCLGE_DFX_RTC_BD_OFFSET,
352 	HCLGE_DFX_PPP_BD_OFFSET,
353 	HCLGE_DFX_RCB_BD_OFFSET,
354 	HCLGE_DFX_TQP_BD_OFFSET,
355 	HCLGE_DFX_SSU_2_BD_OFFSET
356 };
357 
358 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
359 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
360 	HCLGE_OPC_DFX_SSU_REG_0,
361 	HCLGE_OPC_DFX_SSU_REG_1,
362 	HCLGE_OPC_DFX_IGU_EGU_REG,
363 	HCLGE_OPC_DFX_RPU_REG_0,
364 	HCLGE_OPC_DFX_RPU_REG_1,
365 	HCLGE_OPC_DFX_NCSI_REG,
366 	HCLGE_OPC_DFX_RTC_REG,
367 	HCLGE_OPC_DFX_PPP_REG,
368 	HCLGE_OPC_DFX_RCB_REG,
369 	HCLGE_OPC_DFX_TQP_REG,
370 	HCLGE_OPC_DFX_SSU_REG_2
371 };
372 
373 static const struct key_info meta_data_key_info[] = {
374 	{ PACKET_TYPE_ID, 6},
375 	{ IP_FRAGEMENT, 1},
376 	{ ROCE_TYPE, 1},
377 	{ NEXT_KEY, 5},
378 	{ VLAN_NUMBER, 2},
379 	{ SRC_VPORT, 12},
380 	{ DST_VPORT, 12},
381 	{ TUNNEL_PACKET, 1},
382 };
383 
384 static const struct key_info tuple_key_info[] = {
385 	{ OUTER_DST_MAC, 48},
386 	{ OUTER_SRC_MAC, 48},
387 	{ OUTER_VLAN_TAG_FST, 16},
388 	{ OUTER_VLAN_TAG_SEC, 16},
389 	{ OUTER_ETH_TYPE, 16},
390 	{ OUTER_L2_RSV, 16},
391 	{ OUTER_IP_TOS, 8},
392 	{ OUTER_IP_PROTO, 8},
393 	{ OUTER_SRC_IP, 32},
394 	{ OUTER_DST_IP, 32},
395 	{ OUTER_L3_RSV, 16},
396 	{ OUTER_SRC_PORT, 16},
397 	{ OUTER_DST_PORT, 16},
398 	{ OUTER_L4_RSV, 32},
399 	{ OUTER_TUN_VNI, 24},
400 	{ OUTER_TUN_FLOW_ID, 8},
401 	{ INNER_DST_MAC, 48},
402 	{ INNER_SRC_MAC, 48},
403 	{ INNER_VLAN_TAG_FST, 16},
404 	{ INNER_VLAN_TAG_SEC, 16},
405 	{ INNER_ETH_TYPE, 16},
406 	{ INNER_L2_RSV, 16},
407 	{ INNER_IP_TOS, 8},
408 	{ INNER_IP_PROTO, 8},
409 	{ INNER_SRC_IP, 32},
410 	{ INNER_DST_IP, 32},
411 	{ INNER_L3_RSV, 16},
412 	{ INNER_SRC_PORT, 16},
413 	{ INNER_DST_PORT, 16},
414 	{ INNER_L4_RSV, 32},
415 };
416 
417 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
418 {
419 #define HCLGE_MAC_CMD_NUM 21
420 
421 	u64 *data = (u64 *)(&hdev->mac_stats);
422 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
423 	__le64 *desc_data;
424 	int i, k, n;
425 	int ret;
426 
427 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
428 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
429 	if (ret) {
430 		dev_err(&hdev->pdev->dev,
431 			"Get MAC pkt stats fail, status = %d.\n", ret);
432 
433 		return ret;
434 	}
435 
436 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
437 		/* for special opcode 0032, only the first desc has the head */
438 		if (unlikely(i == 0)) {
439 			desc_data = (__le64 *)(&desc[i].data[0]);
440 			n = HCLGE_RD_FIRST_STATS_NUM;
441 		} else {
442 			desc_data = (__le64 *)(&desc[i]);
443 			n = HCLGE_RD_OTHER_STATS_NUM;
444 		}
445 
446 		for (k = 0; k < n; k++) {
447 			*data += le64_to_cpu(*desc_data);
448 			data++;
449 			desc_data++;
450 		}
451 	}
452 
453 	return 0;
454 }
455 
456 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
457 {
458 	u64 *data = (u64 *)(&hdev->mac_stats);
459 	struct hclge_desc *desc;
460 	__le64 *desc_data;
461 	u16 i, k, n;
462 	int ret;
463 
464 	/* This may be called inside atomic sections,
465 	 * so GFP_ATOMIC is more suitalbe here
466 	 */
467 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
468 	if (!desc)
469 		return -ENOMEM;
470 
471 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
472 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
473 	if (ret) {
474 		kfree(desc);
475 		return ret;
476 	}
477 
478 	for (i = 0; i < desc_num; i++) {
479 		/* for special opcode 0034, only the first desc has the head */
480 		if (i == 0) {
481 			desc_data = (__le64 *)(&desc[i].data[0]);
482 			n = HCLGE_RD_FIRST_STATS_NUM;
483 		} else {
484 			desc_data = (__le64 *)(&desc[i]);
485 			n = HCLGE_RD_OTHER_STATS_NUM;
486 		}
487 
488 		for (k = 0; k < n; k++) {
489 			*data += le64_to_cpu(*desc_data);
490 			data++;
491 			desc_data++;
492 		}
493 	}
494 
495 	kfree(desc);
496 
497 	return 0;
498 }
499 
500 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
501 {
502 	struct hclge_desc desc;
503 	__le32 *desc_data;
504 	u32 reg_num;
505 	int ret;
506 
507 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
508 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
509 	if (ret)
510 		return ret;
511 
512 	desc_data = (__le32 *)(&desc.data[0]);
513 	reg_num = le32_to_cpu(*desc_data);
514 
515 	*desc_num = 1 + ((reg_num - 3) >> 2) +
516 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
517 
518 	return 0;
519 }
520 
521 static int hclge_mac_update_stats(struct hclge_dev *hdev)
522 {
523 	u32 desc_num;
524 	int ret;
525 
526 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
527 
528 	/* The firmware supports the new statistics acquisition method */
529 	if (!ret)
530 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
531 	else if (ret == -EOPNOTSUPP)
532 		ret = hclge_mac_update_stats_defective(hdev);
533 	else
534 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
535 
536 	return ret;
537 }
538 
539 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
540 {
541 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
542 	struct hclge_vport *vport = hclge_get_vport(handle);
543 	struct hclge_dev *hdev = vport->back;
544 	struct hnae3_queue *queue;
545 	struct hclge_desc desc[1];
546 	struct hclge_tqp *tqp;
547 	int ret, i;
548 
549 	for (i = 0; i < kinfo->num_tqps; i++) {
550 		queue = handle->kinfo.tqp[i];
551 		tqp = container_of(queue, struct hclge_tqp, q);
552 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
553 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
554 					   true);
555 
556 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
557 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
558 		if (ret) {
559 			dev_err(&hdev->pdev->dev,
560 				"Query tqp stat fail, status = %d,queue = %d\n",
561 				ret, i);
562 			return ret;
563 		}
564 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
565 			le32_to_cpu(desc[0].data[1]);
566 	}
567 
568 	for (i = 0; i < kinfo->num_tqps; i++) {
569 		queue = handle->kinfo.tqp[i];
570 		tqp = container_of(queue, struct hclge_tqp, q);
571 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
572 		hclge_cmd_setup_basic_desc(&desc[0],
573 					   HCLGE_OPC_QUERY_TX_STATUS,
574 					   true);
575 
576 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
577 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
578 		if (ret) {
579 			dev_err(&hdev->pdev->dev,
580 				"Query tqp stat fail, status = %d,queue = %d\n",
581 				ret, i);
582 			return ret;
583 		}
584 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
585 			le32_to_cpu(desc[0].data[1]);
586 	}
587 
588 	return 0;
589 }
590 
591 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
592 {
593 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
594 	struct hclge_tqp *tqp;
595 	u64 *buff = data;
596 	int i;
597 
598 	for (i = 0; i < kinfo->num_tqps; i++) {
599 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
600 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
601 	}
602 
603 	for (i = 0; i < kinfo->num_tqps; i++) {
604 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
605 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
606 	}
607 
608 	return buff;
609 }
610 
611 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
612 {
613 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
614 
615 	/* each tqp has TX & RX two queues */
616 	return kinfo->num_tqps * (2);
617 }
618 
619 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
620 {
621 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622 	u8 *buff = data;
623 	int i = 0;
624 
625 	for (i = 0; i < kinfo->num_tqps; i++) {
626 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
627 			struct hclge_tqp, q);
628 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
629 			 tqp->index);
630 		buff = buff + ETH_GSTRING_LEN;
631 	}
632 
633 	for (i = 0; i < kinfo->num_tqps; i++) {
634 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
635 			struct hclge_tqp, q);
636 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
637 			 tqp->index);
638 		buff = buff + ETH_GSTRING_LEN;
639 	}
640 
641 	return buff;
642 }
643 
644 static u64 *hclge_comm_get_stats(const void *comm_stats,
645 				 const struct hclge_comm_stats_str strs[],
646 				 int size, u64 *data)
647 {
648 	u64 *buf = data;
649 	u32 i;
650 
651 	for (i = 0; i < size; i++)
652 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
653 
654 	return buf + size;
655 }
656 
657 static u8 *hclge_comm_get_strings(u32 stringset,
658 				  const struct hclge_comm_stats_str strs[],
659 				  int size, u8 *data)
660 {
661 	char *buff = (char *)data;
662 	u32 i;
663 
664 	if (stringset != ETH_SS_STATS)
665 		return buff;
666 
667 	for (i = 0; i < size; i++) {
668 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
669 		buff = buff + ETH_GSTRING_LEN;
670 	}
671 
672 	return (u8 *)buff;
673 }
674 
675 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
676 {
677 	struct hnae3_handle *handle;
678 	int status;
679 
680 	handle = &hdev->vport[0].nic;
681 	if (handle->client) {
682 		status = hclge_tqps_update_stats(handle);
683 		if (status) {
684 			dev_err(&hdev->pdev->dev,
685 				"Update TQPS stats fail, status = %d.\n",
686 				status);
687 		}
688 	}
689 
690 	status = hclge_mac_update_stats(hdev);
691 	if (status)
692 		dev_err(&hdev->pdev->dev,
693 			"Update MAC stats fail, status = %d.\n", status);
694 }
695 
696 static void hclge_update_stats(struct hnae3_handle *handle,
697 			       struct net_device_stats *net_stats)
698 {
699 	struct hclge_vport *vport = hclge_get_vport(handle);
700 	struct hclge_dev *hdev = vport->back;
701 	int status;
702 
703 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
704 		return;
705 
706 	status = hclge_mac_update_stats(hdev);
707 	if (status)
708 		dev_err(&hdev->pdev->dev,
709 			"Update MAC stats fail, status = %d.\n",
710 			status);
711 
712 	status = hclge_tqps_update_stats(handle);
713 	if (status)
714 		dev_err(&hdev->pdev->dev,
715 			"Update TQPS stats fail, status = %d.\n",
716 			status);
717 
718 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
719 }
720 
721 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
722 {
723 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
724 		HNAE3_SUPPORT_PHY_LOOPBACK |\
725 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
726 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
727 
728 	struct hclge_vport *vport = hclge_get_vport(handle);
729 	struct hclge_dev *hdev = vport->back;
730 	int count = 0;
731 
732 	/* Loopback test support rules:
733 	 * mac: only GE mode support
734 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
735 	 * phy: only support when phy device exist on board
736 	 */
737 	if (stringset == ETH_SS_TEST) {
738 		/* clear loopback bit flags at first */
739 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
740 		if (hdev->pdev->revision >= 0x21 ||
741 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
742 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
743 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
744 			count += 1;
745 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
746 		}
747 
748 		count += 2;
749 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
750 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
751 
752 		if (hdev->hw.mac.phydev) {
753 			count += 1;
754 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
755 		}
756 
757 	} else if (stringset == ETH_SS_STATS) {
758 		count = ARRAY_SIZE(g_mac_stats_string) +
759 			hclge_tqps_get_sset_count(handle, stringset);
760 	}
761 
762 	return count;
763 }
764 
765 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
766 			      u8 *data)
767 {
768 	u8 *p = (char *)data;
769 	int size;
770 
771 	if (stringset == ETH_SS_STATS) {
772 		size = ARRAY_SIZE(g_mac_stats_string);
773 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
774 					   size, p);
775 		p = hclge_tqps_get_strings(handle, p);
776 	} else if (stringset == ETH_SS_TEST) {
777 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
778 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
779 			       ETH_GSTRING_LEN);
780 			p += ETH_GSTRING_LEN;
781 		}
782 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
783 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
784 			       ETH_GSTRING_LEN);
785 			p += ETH_GSTRING_LEN;
786 		}
787 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
788 			memcpy(p,
789 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
790 			       ETH_GSTRING_LEN);
791 			p += ETH_GSTRING_LEN;
792 		}
793 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
794 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
795 			       ETH_GSTRING_LEN);
796 			p += ETH_GSTRING_LEN;
797 		}
798 	}
799 }
800 
801 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
802 {
803 	struct hclge_vport *vport = hclge_get_vport(handle);
804 	struct hclge_dev *hdev = vport->back;
805 	u64 *p;
806 
807 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
808 				 ARRAY_SIZE(g_mac_stats_string), data);
809 	p = hclge_tqps_get_stats(handle, p);
810 }
811 
812 static void hclge_get_mac_stat(struct hnae3_handle *handle,
813 			       struct hns3_mac_stats *mac_stats)
814 {
815 	struct hclge_vport *vport = hclge_get_vport(handle);
816 	struct hclge_dev *hdev = vport->back;
817 
818 	hclge_update_stats(handle, NULL);
819 
820 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
821 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
822 }
823 
824 static int hclge_parse_func_status(struct hclge_dev *hdev,
825 				   struct hclge_func_status_cmd *status)
826 {
827 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
828 		return -EINVAL;
829 
830 	/* Set the pf to main pf */
831 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
832 		hdev->flag |= HCLGE_FLAG_MAIN;
833 	else
834 		hdev->flag &= ~HCLGE_FLAG_MAIN;
835 
836 	return 0;
837 }
838 
839 static int hclge_query_function_status(struct hclge_dev *hdev)
840 {
841 #define HCLGE_QUERY_MAX_CNT	5
842 
843 	struct hclge_func_status_cmd *req;
844 	struct hclge_desc desc;
845 	int timeout = 0;
846 	int ret;
847 
848 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
849 	req = (struct hclge_func_status_cmd *)desc.data;
850 
851 	do {
852 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
853 		if (ret) {
854 			dev_err(&hdev->pdev->dev,
855 				"query function status failed %d.\n", ret);
856 			return ret;
857 		}
858 
859 		/* Check pf reset is done */
860 		if (req->pf_state)
861 			break;
862 		usleep_range(1000, 2000);
863 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
864 
865 	return hclge_parse_func_status(hdev, req);
866 }
867 
868 static int hclge_query_pf_resource(struct hclge_dev *hdev)
869 {
870 	struct hclge_pf_res_cmd *req;
871 	struct hclge_desc desc;
872 	int ret;
873 
874 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
875 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
876 	if (ret) {
877 		dev_err(&hdev->pdev->dev,
878 			"query pf resource failed %d.\n", ret);
879 		return ret;
880 	}
881 
882 	req = (struct hclge_pf_res_cmd *)desc.data;
883 	hdev->num_tqps = le16_to_cpu(req->tqp_num);
884 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
885 
886 	if (req->tx_buf_size)
887 		hdev->tx_buf_size =
888 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
889 	else
890 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
891 
892 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
893 
894 	if (req->dv_buf_size)
895 		hdev->dv_buf_size =
896 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
897 	else
898 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
899 
900 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
901 
902 	if (hnae3_dev_roce_supported(hdev)) {
903 		hdev->roce_base_msix_offset =
904 		hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
905 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
906 		hdev->num_roce_msi =
907 		hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
908 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
909 
910 		/* nic's msix numbers is always equals to the roce's. */
911 		hdev->num_nic_msi = hdev->num_roce_msi;
912 
913 		/* PF should have NIC vectors and Roce vectors,
914 		 * NIC vectors are queued before Roce vectors.
915 		 */
916 		hdev->num_msi = hdev->num_roce_msi +
917 				hdev->roce_base_msix_offset;
918 	} else {
919 		hdev->num_msi =
920 		hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
921 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
922 
923 		hdev->num_nic_msi = hdev->num_msi;
924 	}
925 
926 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
927 		dev_err(&hdev->pdev->dev,
928 			"Just %u msi resources, not enough for pf(min:2).\n",
929 			hdev->num_nic_msi);
930 		return -EINVAL;
931 	}
932 
933 	return 0;
934 }
935 
936 static int hclge_parse_speed(int speed_cmd, int *speed)
937 {
938 	switch (speed_cmd) {
939 	case 6:
940 		*speed = HCLGE_MAC_SPEED_10M;
941 		break;
942 	case 7:
943 		*speed = HCLGE_MAC_SPEED_100M;
944 		break;
945 	case 0:
946 		*speed = HCLGE_MAC_SPEED_1G;
947 		break;
948 	case 1:
949 		*speed = HCLGE_MAC_SPEED_10G;
950 		break;
951 	case 2:
952 		*speed = HCLGE_MAC_SPEED_25G;
953 		break;
954 	case 3:
955 		*speed = HCLGE_MAC_SPEED_40G;
956 		break;
957 	case 4:
958 		*speed = HCLGE_MAC_SPEED_50G;
959 		break;
960 	case 5:
961 		*speed = HCLGE_MAC_SPEED_100G;
962 		break;
963 	default:
964 		return -EINVAL;
965 	}
966 
967 	return 0;
968 }
969 
970 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
971 {
972 	struct hclge_vport *vport = hclge_get_vport(handle);
973 	struct hclge_dev *hdev = vport->back;
974 	u32 speed_ability = hdev->hw.mac.speed_ability;
975 	u32 speed_bit = 0;
976 
977 	switch (speed) {
978 	case HCLGE_MAC_SPEED_10M:
979 		speed_bit = HCLGE_SUPPORT_10M_BIT;
980 		break;
981 	case HCLGE_MAC_SPEED_100M:
982 		speed_bit = HCLGE_SUPPORT_100M_BIT;
983 		break;
984 	case HCLGE_MAC_SPEED_1G:
985 		speed_bit = HCLGE_SUPPORT_1G_BIT;
986 		break;
987 	case HCLGE_MAC_SPEED_10G:
988 		speed_bit = HCLGE_SUPPORT_10G_BIT;
989 		break;
990 	case HCLGE_MAC_SPEED_25G:
991 		speed_bit = HCLGE_SUPPORT_25G_BIT;
992 		break;
993 	case HCLGE_MAC_SPEED_40G:
994 		speed_bit = HCLGE_SUPPORT_40G_BIT;
995 		break;
996 	case HCLGE_MAC_SPEED_50G:
997 		speed_bit = HCLGE_SUPPORT_50G_BIT;
998 		break;
999 	case HCLGE_MAC_SPEED_100G:
1000 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1001 		break;
1002 	default:
1003 		return -EINVAL;
1004 	}
1005 
1006 	if (speed_bit & speed_ability)
1007 		return 0;
1008 
1009 	return -EINVAL;
1010 }
1011 
1012 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1013 {
1014 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1015 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1016 				 mac->supported);
1017 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1018 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1019 				 mac->supported);
1020 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1021 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1022 				 mac->supported);
1023 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1024 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1025 				 mac->supported);
1026 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1027 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1028 				 mac->supported);
1029 }
1030 
1031 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1032 {
1033 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1034 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1035 				 mac->supported);
1036 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1037 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1038 				 mac->supported);
1039 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1040 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1041 				 mac->supported);
1042 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1043 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1044 				 mac->supported);
1045 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1046 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1047 				 mac->supported);
1048 }
1049 
1050 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1051 {
1052 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1053 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1054 				 mac->supported);
1055 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1056 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1057 				 mac->supported);
1058 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1059 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1060 				 mac->supported);
1061 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1062 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1063 				 mac->supported);
1064 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1065 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1066 				 mac->supported);
1067 }
1068 
1069 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1070 {
1071 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1072 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1073 				 mac->supported);
1074 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1075 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1076 				 mac->supported);
1077 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1078 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1079 				 mac->supported);
1080 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1081 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1082 				 mac->supported);
1083 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1084 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1085 				 mac->supported);
1086 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1087 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1088 				 mac->supported);
1089 }
1090 
1091 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1092 {
1093 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1094 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1095 
1096 	switch (mac->speed) {
1097 	case HCLGE_MAC_SPEED_10G:
1098 	case HCLGE_MAC_SPEED_40G:
1099 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1100 				 mac->supported);
1101 		mac->fec_ability =
1102 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1103 		break;
1104 	case HCLGE_MAC_SPEED_25G:
1105 	case HCLGE_MAC_SPEED_50G:
1106 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1107 				 mac->supported);
1108 		mac->fec_ability =
1109 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1110 			BIT(HNAE3_FEC_AUTO);
1111 		break;
1112 	case HCLGE_MAC_SPEED_100G:
1113 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1114 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1115 		break;
1116 	default:
1117 		mac->fec_ability = 0;
1118 		break;
1119 	}
1120 }
1121 
1122 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1123 					u8 speed_ability)
1124 {
1125 	struct hclge_mac *mac = &hdev->hw.mac;
1126 
1127 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1128 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1129 				 mac->supported);
1130 
1131 	hclge_convert_setting_sr(mac, speed_ability);
1132 	hclge_convert_setting_lr(mac, speed_ability);
1133 	hclge_convert_setting_cr(mac, speed_ability);
1134 	if (hdev->pdev->revision >= 0x21)
1135 		hclge_convert_setting_fec(mac);
1136 
1137 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1138 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1139 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1140 }
1141 
1142 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1143 					    u8 speed_ability)
1144 {
1145 	struct hclge_mac *mac = &hdev->hw.mac;
1146 
1147 	hclge_convert_setting_kr(mac, speed_ability);
1148 	if (hdev->pdev->revision >= 0x21)
1149 		hclge_convert_setting_fec(mac);
1150 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1151 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1152 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1153 }
1154 
1155 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1156 					 u8 speed_ability)
1157 {
1158 	unsigned long *supported = hdev->hw.mac.supported;
1159 
1160 	/* default to support all speed for GE port */
1161 	if (!speed_ability)
1162 		speed_ability = HCLGE_SUPPORT_GE;
1163 
1164 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1165 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1166 				 supported);
1167 
1168 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1169 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1170 				 supported);
1171 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1172 				 supported);
1173 	}
1174 
1175 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1176 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1177 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1178 	}
1179 
1180 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1181 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1182 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1183 	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1184 }
1185 
1186 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1187 {
1188 	u8 media_type = hdev->hw.mac.media_type;
1189 
1190 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1191 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1192 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1193 		hclge_parse_copper_link_mode(hdev, speed_ability);
1194 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1195 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1196 }
1197 
1198 static u32 hclge_get_max_speed(u8 speed_ability)
1199 {
1200 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1201 		return HCLGE_MAC_SPEED_100G;
1202 
1203 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1204 		return HCLGE_MAC_SPEED_50G;
1205 
1206 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1207 		return HCLGE_MAC_SPEED_40G;
1208 
1209 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1210 		return HCLGE_MAC_SPEED_25G;
1211 
1212 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1213 		return HCLGE_MAC_SPEED_10G;
1214 
1215 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1216 		return HCLGE_MAC_SPEED_1G;
1217 
1218 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1219 		return HCLGE_MAC_SPEED_100M;
1220 
1221 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1222 		return HCLGE_MAC_SPEED_10M;
1223 
1224 	return HCLGE_MAC_SPEED_1G;
1225 }
1226 
1227 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1228 {
1229 	struct hclge_cfg_param_cmd *req;
1230 	u64 mac_addr_tmp_high;
1231 	u64 mac_addr_tmp;
1232 	unsigned int i;
1233 
1234 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1235 
1236 	/* get the configuration */
1237 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1238 					      HCLGE_CFG_VMDQ_M,
1239 					      HCLGE_CFG_VMDQ_S);
1240 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1241 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1242 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243 					    HCLGE_CFG_TQP_DESC_N_M,
1244 					    HCLGE_CFG_TQP_DESC_N_S);
1245 
1246 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1247 					HCLGE_CFG_PHY_ADDR_M,
1248 					HCLGE_CFG_PHY_ADDR_S);
1249 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1250 					  HCLGE_CFG_MEDIA_TP_M,
1251 					  HCLGE_CFG_MEDIA_TP_S);
1252 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1253 					  HCLGE_CFG_RX_BUF_LEN_M,
1254 					  HCLGE_CFG_RX_BUF_LEN_S);
1255 	/* get mac_address */
1256 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1257 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1258 					    HCLGE_CFG_MAC_ADDR_H_M,
1259 					    HCLGE_CFG_MAC_ADDR_H_S);
1260 
1261 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1262 
1263 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1264 					     HCLGE_CFG_DEFAULT_SPEED_M,
1265 					     HCLGE_CFG_DEFAULT_SPEED_S);
1266 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1267 					    HCLGE_CFG_RSS_SIZE_M,
1268 					    HCLGE_CFG_RSS_SIZE_S);
1269 
1270 	for (i = 0; i < ETH_ALEN; i++)
1271 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1272 
1273 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1274 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1275 
1276 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1277 					     HCLGE_CFG_SPEED_ABILITY_M,
1278 					     HCLGE_CFG_SPEED_ABILITY_S);
1279 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1280 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1281 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1282 	if (!cfg->umv_space)
1283 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1284 }
1285 
1286 /* hclge_get_cfg: query the static parameter from flash
1287  * @hdev: pointer to struct hclge_dev
1288  * @hcfg: the config structure to be getted
1289  */
1290 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1291 {
1292 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1293 	struct hclge_cfg_param_cmd *req;
1294 	unsigned int i;
1295 	int ret;
1296 
1297 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1298 		u32 offset = 0;
1299 
1300 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1301 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1302 					   true);
1303 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1304 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1305 		/* Len should be united by 4 bytes when send to hardware */
1306 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1307 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1308 		req->offset = cpu_to_le32(offset);
1309 	}
1310 
1311 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1312 	if (ret) {
1313 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1314 		return ret;
1315 	}
1316 
1317 	hclge_parse_cfg(hcfg, desc);
1318 
1319 	return 0;
1320 }
1321 
1322 static int hclge_get_cap(struct hclge_dev *hdev)
1323 {
1324 	int ret;
1325 
1326 	ret = hclge_query_function_status(hdev);
1327 	if (ret) {
1328 		dev_err(&hdev->pdev->dev,
1329 			"query function status error %d.\n", ret);
1330 		return ret;
1331 	}
1332 
1333 	/* get pf resource */
1334 	return hclge_query_pf_resource(hdev);
1335 }
1336 
1337 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1338 {
1339 #define HCLGE_MIN_TX_DESC	64
1340 #define HCLGE_MIN_RX_DESC	64
1341 
1342 	if (!is_kdump_kernel())
1343 		return;
1344 
1345 	dev_info(&hdev->pdev->dev,
1346 		 "Running kdump kernel. Using minimal resources\n");
1347 
1348 	/* minimal queue pairs equals to the number of vports */
1349 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1350 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1351 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1352 }
1353 
1354 static int hclge_configure(struct hclge_dev *hdev)
1355 {
1356 	struct hclge_cfg cfg;
1357 	unsigned int i;
1358 	int ret;
1359 
1360 	ret = hclge_get_cfg(hdev, &cfg);
1361 	if (ret) {
1362 		dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1363 		return ret;
1364 	}
1365 
1366 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1367 	hdev->base_tqp_pid = 0;
1368 	hdev->rss_size_max = cfg.rss_size_max;
1369 	hdev->rx_buf_len = cfg.rx_buf_len;
1370 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1371 	hdev->hw.mac.media_type = cfg.media_type;
1372 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1373 	hdev->num_tx_desc = cfg.tqp_desc_num;
1374 	hdev->num_rx_desc = cfg.tqp_desc_num;
1375 	hdev->tm_info.num_pg = 1;
1376 	hdev->tc_max = cfg.tc_num;
1377 	hdev->tm_info.hw_pfc_map = 0;
1378 	hdev->wanted_umv_size = cfg.umv_space;
1379 
1380 	if (hnae3_dev_fd_supported(hdev)) {
1381 		hdev->fd_en = true;
1382 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1383 	}
1384 
1385 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1386 	if (ret) {
1387 		dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1388 		return ret;
1389 	}
1390 
1391 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1392 
1393 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1394 
1395 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1396 	    (hdev->tc_max < 1)) {
1397 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1398 			 hdev->tc_max);
1399 		hdev->tc_max = 1;
1400 	}
1401 
1402 	/* Dev does not support DCB */
1403 	if (!hnae3_dev_dcb_supported(hdev)) {
1404 		hdev->tc_max = 1;
1405 		hdev->pfc_max = 0;
1406 	} else {
1407 		hdev->pfc_max = hdev->tc_max;
1408 	}
1409 
1410 	hdev->tm_info.num_tc = 1;
1411 
1412 	/* Currently not support uncontiuous tc */
1413 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1414 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1415 
1416 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1417 
1418 	hclge_init_kdump_kernel_config(hdev);
1419 
1420 	/* Set the init affinity based on pci func number */
1421 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1422 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1423 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1424 			&hdev->affinity_mask);
1425 
1426 	return ret;
1427 }
1428 
1429 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1430 			    unsigned int tso_mss_max)
1431 {
1432 	struct hclge_cfg_tso_status_cmd *req;
1433 	struct hclge_desc desc;
1434 	u16 tso_mss;
1435 
1436 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1437 
1438 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1439 
1440 	tso_mss = 0;
1441 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1442 			HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1443 	req->tso_mss_min = cpu_to_le16(tso_mss);
1444 
1445 	tso_mss = 0;
1446 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1447 			HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1448 	req->tso_mss_max = cpu_to_le16(tso_mss);
1449 
1450 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1451 }
1452 
1453 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1454 {
1455 	struct hclge_cfg_gro_status_cmd *req;
1456 	struct hclge_desc desc;
1457 	int ret;
1458 
1459 	if (!hnae3_dev_gro_supported(hdev))
1460 		return 0;
1461 
1462 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1463 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1464 
1465 	req->gro_en = cpu_to_le16(en ? 1 : 0);
1466 
1467 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1468 	if (ret)
1469 		dev_err(&hdev->pdev->dev,
1470 			"GRO hardware config cmd failed, ret = %d\n", ret);
1471 
1472 	return ret;
1473 }
1474 
1475 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1476 {
1477 	struct hclge_tqp *tqp;
1478 	int i;
1479 
1480 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1481 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1482 	if (!hdev->htqp)
1483 		return -ENOMEM;
1484 
1485 	tqp = hdev->htqp;
1486 
1487 	for (i = 0; i < hdev->num_tqps; i++) {
1488 		tqp->dev = &hdev->pdev->dev;
1489 		tqp->index = i;
1490 
1491 		tqp->q.ae_algo = &ae_algo;
1492 		tqp->q.buf_size = hdev->rx_buf_len;
1493 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1494 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1495 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1496 			i * HCLGE_TQP_REG_SIZE;
1497 
1498 		tqp++;
1499 	}
1500 
1501 	return 0;
1502 }
1503 
1504 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1505 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1506 {
1507 	struct hclge_tqp_map_cmd *req;
1508 	struct hclge_desc desc;
1509 	int ret;
1510 
1511 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1512 
1513 	req = (struct hclge_tqp_map_cmd *)desc.data;
1514 	req->tqp_id = cpu_to_le16(tqp_pid);
1515 	req->tqp_vf = func_id;
1516 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1517 	if (!is_pf)
1518 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1519 	req->tqp_vid = cpu_to_le16(tqp_vid);
1520 
1521 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1522 	if (ret)
1523 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1524 
1525 	return ret;
1526 }
1527 
1528 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1529 {
1530 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1531 	struct hclge_dev *hdev = vport->back;
1532 	int i, alloced;
1533 
1534 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1535 	     alloced < num_tqps; i++) {
1536 		if (!hdev->htqp[i].alloced) {
1537 			hdev->htqp[i].q.handle = &vport->nic;
1538 			hdev->htqp[i].q.tqp_index = alloced;
1539 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1540 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1541 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1542 			hdev->htqp[i].alloced = true;
1543 			alloced++;
1544 		}
1545 	}
1546 	vport->alloc_tqps = alloced;
1547 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1548 				vport->alloc_tqps / hdev->tm_info.num_tc);
1549 
1550 	/* ensure one to one mapping between irq and queue at default */
1551 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1552 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1553 
1554 	return 0;
1555 }
1556 
1557 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1558 			    u16 num_tx_desc, u16 num_rx_desc)
1559 
1560 {
1561 	struct hnae3_handle *nic = &vport->nic;
1562 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1563 	struct hclge_dev *hdev = vport->back;
1564 	int ret;
1565 
1566 	kinfo->num_tx_desc = num_tx_desc;
1567 	kinfo->num_rx_desc = num_rx_desc;
1568 
1569 	kinfo->rx_buf_len = hdev->rx_buf_len;
1570 
1571 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1572 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1573 	if (!kinfo->tqp)
1574 		return -ENOMEM;
1575 
1576 	ret = hclge_assign_tqp(vport, num_tqps);
1577 	if (ret)
1578 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1579 
1580 	return ret;
1581 }
1582 
1583 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1584 				  struct hclge_vport *vport)
1585 {
1586 	struct hnae3_handle *nic = &vport->nic;
1587 	struct hnae3_knic_private_info *kinfo;
1588 	u16 i;
1589 
1590 	kinfo = &nic->kinfo;
1591 	for (i = 0; i < vport->alloc_tqps; i++) {
1592 		struct hclge_tqp *q =
1593 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1594 		bool is_pf;
1595 		int ret;
1596 
1597 		is_pf = !(vport->vport_id);
1598 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1599 					     i, is_pf);
1600 		if (ret)
1601 			return ret;
1602 	}
1603 
1604 	return 0;
1605 }
1606 
1607 static int hclge_map_tqp(struct hclge_dev *hdev)
1608 {
1609 	struct hclge_vport *vport = hdev->vport;
1610 	u16 i, num_vport;
1611 
1612 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1613 	for (i = 0; i < num_vport; i++)	{
1614 		int ret;
1615 
1616 		ret = hclge_map_tqp_to_vport(hdev, vport);
1617 		if (ret)
1618 			return ret;
1619 
1620 		vport++;
1621 	}
1622 
1623 	return 0;
1624 }
1625 
1626 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1627 {
1628 	struct hnae3_handle *nic = &vport->nic;
1629 	struct hclge_dev *hdev = vport->back;
1630 	int ret;
1631 
1632 	nic->pdev = hdev->pdev;
1633 	nic->ae_algo = &ae_algo;
1634 	nic->numa_node_mask = hdev->numa_node_mask;
1635 
1636 	ret = hclge_knic_setup(vport, num_tqps,
1637 			       hdev->num_tx_desc, hdev->num_rx_desc);
1638 	if (ret)
1639 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1640 
1641 	return ret;
1642 }
1643 
1644 static int hclge_alloc_vport(struct hclge_dev *hdev)
1645 {
1646 	struct pci_dev *pdev = hdev->pdev;
1647 	struct hclge_vport *vport;
1648 	u32 tqp_main_vport;
1649 	u32 tqp_per_vport;
1650 	int num_vport, i;
1651 	int ret;
1652 
1653 	/* We need to alloc a vport for main NIC of PF */
1654 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1655 
1656 	if (hdev->num_tqps < num_vport) {
1657 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1658 			hdev->num_tqps, num_vport);
1659 		return -EINVAL;
1660 	}
1661 
1662 	/* Alloc the same number of TQPs for every vport */
1663 	tqp_per_vport = hdev->num_tqps / num_vport;
1664 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1665 
1666 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1667 			     GFP_KERNEL);
1668 	if (!vport)
1669 		return -ENOMEM;
1670 
1671 	hdev->vport = vport;
1672 	hdev->num_alloc_vport = num_vport;
1673 
1674 	if (IS_ENABLED(CONFIG_PCI_IOV))
1675 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1676 
1677 	for (i = 0; i < num_vport; i++) {
1678 		vport->back = hdev;
1679 		vport->vport_id = i;
1680 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1681 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1682 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1683 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1684 		INIT_LIST_HEAD(&vport->vlan_list);
1685 		INIT_LIST_HEAD(&vport->uc_mac_list);
1686 		INIT_LIST_HEAD(&vport->mc_mac_list);
1687 
1688 		if (i == 0)
1689 			ret = hclge_vport_setup(vport, tqp_main_vport);
1690 		else
1691 			ret = hclge_vport_setup(vport, tqp_per_vport);
1692 		if (ret) {
1693 			dev_err(&pdev->dev,
1694 				"vport setup failed for vport %d, %d\n",
1695 				i, ret);
1696 			return ret;
1697 		}
1698 
1699 		vport++;
1700 	}
1701 
1702 	return 0;
1703 }
1704 
1705 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1706 				    struct hclge_pkt_buf_alloc *buf_alloc)
1707 {
1708 /* TX buffer size is unit by 128 byte */
1709 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1710 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1711 	struct hclge_tx_buff_alloc_cmd *req;
1712 	struct hclge_desc desc;
1713 	int ret;
1714 	u8 i;
1715 
1716 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1717 
1718 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1719 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1720 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1721 
1722 		req->tx_pkt_buff[i] =
1723 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1724 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1725 	}
1726 
1727 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1728 	if (ret)
1729 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1730 			ret);
1731 
1732 	return ret;
1733 }
1734 
1735 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1736 				 struct hclge_pkt_buf_alloc *buf_alloc)
1737 {
1738 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1739 
1740 	if (ret)
1741 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1742 
1743 	return ret;
1744 }
1745 
1746 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1747 {
1748 	unsigned int i;
1749 	u32 cnt = 0;
1750 
1751 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1752 		if (hdev->hw_tc_map & BIT(i))
1753 			cnt++;
1754 	return cnt;
1755 }
1756 
1757 /* Get the number of pfc enabled TCs, which have private buffer */
1758 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1759 				  struct hclge_pkt_buf_alloc *buf_alloc)
1760 {
1761 	struct hclge_priv_buf *priv;
1762 	unsigned int i;
1763 	int cnt = 0;
1764 
1765 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1766 		priv = &buf_alloc->priv_buf[i];
1767 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1768 		    priv->enable)
1769 			cnt++;
1770 	}
1771 
1772 	return cnt;
1773 }
1774 
1775 /* Get the number of pfc disabled TCs, which have private buffer */
1776 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1777 				     struct hclge_pkt_buf_alloc *buf_alloc)
1778 {
1779 	struct hclge_priv_buf *priv;
1780 	unsigned int i;
1781 	int cnt = 0;
1782 
1783 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1784 		priv = &buf_alloc->priv_buf[i];
1785 		if (hdev->hw_tc_map & BIT(i) &&
1786 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1787 		    priv->enable)
1788 			cnt++;
1789 	}
1790 
1791 	return cnt;
1792 }
1793 
1794 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1795 {
1796 	struct hclge_priv_buf *priv;
1797 	u32 rx_priv = 0;
1798 	int i;
1799 
1800 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1801 		priv = &buf_alloc->priv_buf[i];
1802 		if (priv->enable)
1803 			rx_priv += priv->buf_size;
1804 	}
1805 	return rx_priv;
1806 }
1807 
1808 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1809 {
1810 	u32 i, total_tx_size = 0;
1811 
1812 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1813 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1814 
1815 	return total_tx_size;
1816 }
1817 
1818 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1819 				struct hclge_pkt_buf_alloc *buf_alloc,
1820 				u32 rx_all)
1821 {
1822 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1823 	u32 tc_num = hclge_get_tc_num(hdev);
1824 	u32 shared_buf, aligned_mps;
1825 	u32 rx_priv;
1826 	int i;
1827 
1828 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1829 
1830 	if (hnae3_dev_dcb_supported(hdev))
1831 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1832 					hdev->dv_buf_size;
1833 	else
1834 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1835 					+ hdev->dv_buf_size;
1836 
1837 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1838 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1839 			     HCLGE_BUF_SIZE_UNIT);
1840 
1841 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1842 	if (rx_all < rx_priv + shared_std)
1843 		return false;
1844 
1845 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1846 	buf_alloc->s_buf.buf_size = shared_buf;
1847 	if (hnae3_dev_dcb_supported(hdev)) {
1848 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1849 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1850 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1851 				  HCLGE_BUF_SIZE_UNIT);
1852 	} else {
1853 		buf_alloc->s_buf.self.high = aligned_mps +
1854 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1855 		buf_alloc->s_buf.self.low = aligned_mps;
1856 	}
1857 
1858 	if (hnae3_dev_dcb_supported(hdev)) {
1859 		hi_thrd = shared_buf - hdev->dv_buf_size;
1860 
1861 		if (tc_num <= NEED_RESERVE_TC_NUM)
1862 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1863 					/ BUF_MAX_PERCENT;
1864 
1865 		if (tc_num)
1866 			hi_thrd = hi_thrd / tc_num;
1867 
1868 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1869 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1870 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1871 	} else {
1872 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1873 		lo_thrd = aligned_mps;
1874 	}
1875 
1876 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1877 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1878 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1879 	}
1880 
1881 	return true;
1882 }
1883 
1884 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1885 				struct hclge_pkt_buf_alloc *buf_alloc)
1886 {
1887 	u32 i, total_size;
1888 
1889 	total_size = hdev->pkt_buf_size;
1890 
1891 	/* alloc tx buffer for all enabled tc */
1892 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1893 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1894 
1895 		if (hdev->hw_tc_map & BIT(i)) {
1896 			if (total_size < hdev->tx_buf_size)
1897 				return -ENOMEM;
1898 
1899 			priv->tx_buf_size = hdev->tx_buf_size;
1900 		} else {
1901 			priv->tx_buf_size = 0;
1902 		}
1903 
1904 		total_size -= priv->tx_buf_size;
1905 	}
1906 
1907 	return 0;
1908 }
1909 
1910 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1911 				  struct hclge_pkt_buf_alloc *buf_alloc)
1912 {
1913 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1914 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1915 	unsigned int i;
1916 
1917 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1918 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1919 
1920 		priv->enable = 0;
1921 		priv->wl.low = 0;
1922 		priv->wl.high = 0;
1923 		priv->buf_size = 0;
1924 
1925 		if (!(hdev->hw_tc_map & BIT(i)))
1926 			continue;
1927 
1928 		priv->enable = 1;
1929 
1930 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1931 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1932 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
1933 						HCLGE_BUF_SIZE_UNIT);
1934 		} else {
1935 			priv->wl.low = 0;
1936 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1937 					aligned_mps;
1938 		}
1939 
1940 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1941 	}
1942 
1943 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1944 }
1945 
1946 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1947 					  struct hclge_pkt_buf_alloc *buf_alloc)
1948 {
1949 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1950 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1951 	int i;
1952 
1953 	/* let the last to be cleared first */
1954 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1955 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1956 		unsigned int mask = BIT((unsigned int)i);
1957 
1958 		if (hdev->hw_tc_map & mask &&
1959 		    !(hdev->tm_info.hw_pfc_map & mask)) {
1960 			/* Clear the no pfc TC private buffer */
1961 			priv->wl.low = 0;
1962 			priv->wl.high = 0;
1963 			priv->buf_size = 0;
1964 			priv->enable = 0;
1965 			no_pfc_priv_num--;
1966 		}
1967 
1968 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1969 		    no_pfc_priv_num == 0)
1970 			break;
1971 	}
1972 
1973 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1974 }
1975 
1976 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1977 					struct hclge_pkt_buf_alloc *buf_alloc)
1978 {
1979 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1980 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1981 	int i;
1982 
1983 	/* let the last to be cleared first */
1984 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1985 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1986 		unsigned int mask = BIT((unsigned int)i);
1987 
1988 		if (hdev->hw_tc_map & mask &&
1989 		    hdev->tm_info.hw_pfc_map & mask) {
1990 			/* Reduce the number of pfc TC with private buffer */
1991 			priv->wl.low = 0;
1992 			priv->enable = 0;
1993 			priv->wl.high = 0;
1994 			priv->buf_size = 0;
1995 			pfc_priv_num--;
1996 		}
1997 
1998 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1999 		    pfc_priv_num == 0)
2000 			break;
2001 	}
2002 
2003 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2004 }
2005 
2006 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2007 				      struct hclge_pkt_buf_alloc *buf_alloc)
2008 {
2009 #define COMPENSATE_BUFFER	0x3C00
2010 #define COMPENSATE_HALF_MPS_NUM	5
2011 #define PRIV_WL_GAP		0x1800
2012 
2013 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2014 	u32 tc_num = hclge_get_tc_num(hdev);
2015 	u32 half_mps = hdev->mps >> 1;
2016 	u32 min_rx_priv;
2017 	unsigned int i;
2018 
2019 	if (tc_num)
2020 		rx_priv = rx_priv / tc_num;
2021 
2022 	if (tc_num <= NEED_RESERVE_TC_NUM)
2023 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2024 
2025 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2026 			COMPENSATE_HALF_MPS_NUM * half_mps;
2027 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2028 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2029 
2030 	if (rx_priv < min_rx_priv)
2031 		return false;
2032 
2033 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2034 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2035 
2036 		priv->enable = 0;
2037 		priv->wl.low = 0;
2038 		priv->wl.high = 0;
2039 		priv->buf_size = 0;
2040 
2041 		if (!(hdev->hw_tc_map & BIT(i)))
2042 			continue;
2043 
2044 		priv->enable = 1;
2045 		priv->buf_size = rx_priv;
2046 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2047 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2048 	}
2049 
2050 	buf_alloc->s_buf.buf_size = 0;
2051 
2052 	return true;
2053 }
2054 
2055 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2056  * @hdev: pointer to struct hclge_dev
2057  * @buf_alloc: pointer to buffer calculation data
2058  * @return: 0: calculate sucessful, negative: fail
2059  */
2060 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2061 				struct hclge_pkt_buf_alloc *buf_alloc)
2062 {
2063 	/* When DCB is not supported, rx private buffer is not allocated. */
2064 	if (!hnae3_dev_dcb_supported(hdev)) {
2065 		u32 rx_all = hdev->pkt_buf_size;
2066 
2067 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2068 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2069 			return -ENOMEM;
2070 
2071 		return 0;
2072 	}
2073 
2074 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2075 		return 0;
2076 
2077 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2078 		return 0;
2079 
2080 	/* try to decrease the buffer size */
2081 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2082 		return 0;
2083 
2084 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2085 		return 0;
2086 
2087 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2088 		return 0;
2089 
2090 	return -ENOMEM;
2091 }
2092 
2093 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2094 				   struct hclge_pkt_buf_alloc *buf_alloc)
2095 {
2096 	struct hclge_rx_priv_buff_cmd *req;
2097 	struct hclge_desc desc;
2098 	int ret;
2099 	int i;
2100 
2101 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2102 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2103 
2104 	/* Alloc private buffer TCs */
2105 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2106 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2107 
2108 		req->buf_num[i] =
2109 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2110 		req->buf_num[i] |=
2111 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2112 	}
2113 
2114 	req->shared_buf =
2115 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2116 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2117 
2118 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2119 	if (ret)
2120 		dev_err(&hdev->pdev->dev,
2121 			"rx private buffer alloc cmd failed %d\n", ret);
2122 
2123 	return ret;
2124 }
2125 
2126 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2127 				   struct hclge_pkt_buf_alloc *buf_alloc)
2128 {
2129 	struct hclge_rx_priv_wl_buf *req;
2130 	struct hclge_priv_buf *priv;
2131 	struct hclge_desc desc[2];
2132 	int i, j;
2133 	int ret;
2134 
2135 	for (i = 0; i < 2; i++) {
2136 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2137 					   false);
2138 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2139 
2140 		/* The first descriptor set the NEXT bit to 1 */
2141 		if (i == 0)
2142 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2143 		else
2144 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2145 
2146 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2147 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2148 
2149 			priv = &buf_alloc->priv_buf[idx];
2150 			req->tc_wl[j].high =
2151 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2152 			req->tc_wl[j].high |=
2153 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2154 			req->tc_wl[j].low =
2155 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2156 			req->tc_wl[j].low |=
2157 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2158 		}
2159 	}
2160 
2161 	/* Send 2 descriptor at one time */
2162 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2163 	if (ret)
2164 		dev_err(&hdev->pdev->dev,
2165 			"rx private waterline config cmd failed %d\n",
2166 			ret);
2167 	return ret;
2168 }
2169 
2170 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2171 				    struct hclge_pkt_buf_alloc *buf_alloc)
2172 {
2173 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2174 	struct hclge_rx_com_thrd *req;
2175 	struct hclge_desc desc[2];
2176 	struct hclge_tc_thrd *tc;
2177 	int i, j;
2178 	int ret;
2179 
2180 	for (i = 0; i < 2; i++) {
2181 		hclge_cmd_setup_basic_desc(&desc[i],
2182 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2183 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2184 
2185 		/* The first descriptor set the NEXT bit to 1 */
2186 		if (i == 0)
2187 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2188 		else
2189 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2190 
2191 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2192 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2193 
2194 			req->com_thrd[j].high =
2195 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2196 			req->com_thrd[j].high |=
2197 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2198 			req->com_thrd[j].low =
2199 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2200 			req->com_thrd[j].low |=
2201 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2202 		}
2203 	}
2204 
2205 	/* Send 2 descriptors at one time */
2206 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2207 	if (ret)
2208 		dev_err(&hdev->pdev->dev,
2209 			"common threshold config cmd failed %d\n", ret);
2210 	return ret;
2211 }
2212 
2213 static int hclge_common_wl_config(struct hclge_dev *hdev,
2214 				  struct hclge_pkt_buf_alloc *buf_alloc)
2215 {
2216 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2217 	struct hclge_rx_com_wl *req;
2218 	struct hclge_desc desc;
2219 	int ret;
2220 
2221 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2222 
2223 	req = (struct hclge_rx_com_wl *)desc.data;
2224 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2225 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2226 
2227 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2228 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2229 
2230 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2231 	if (ret)
2232 		dev_err(&hdev->pdev->dev,
2233 			"common waterline config cmd failed %d\n", ret);
2234 
2235 	return ret;
2236 }
2237 
2238 int hclge_buffer_alloc(struct hclge_dev *hdev)
2239 {
2240 	struct hclge_pkt_buf_alloc *pkt_buf;
2241 	int ret;
2242 
2243 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2244 	if (!pkt_buf)
2245 		return -ENOMEM;
2246 
2247 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2248 	if (ret) {
2249 		dev_err(&hdev->pdev->dev,
2250 			"could not calc tx buffer size for all TCs %d\n", ret);
2251 		goto out;
2252 	}
2253 
2254 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2255 	if (ret) {
2256 		dev_err(&hdev->pdev->dev,
2257 			"could not alloc tx buffers %d\n", ret);
2258 		goto out;
2259 	}
2260 
2261 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2262 	if (ret) {
2263 		dev_err(&hdev->pdev->dev,
2264 			"could not calc rx priv buffer size for all TCs %d\n",
2265 			ret);
2266 		goto out;
2267 	}
2268 
2269 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2270 	if (ret) {
2271 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2272 			ret);
2273 		goto out;
2274 	}
2275 
2276 	if (hnae3_dev_dcb_supported(hdev)) {
2277 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2278 		if (ret) {
2279 			dev_err(&hdev->pdev->dev,
2280 				"could not configure rx private waterline %d\n",
2281 				ret);
2282 			goto out;
2283 		}
2284 
2285 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2286 		if (ret) {
2287 			dev_err(&hdev->pdev->dev,
2288 				"could not configure common threshold %d\n",
2289 				ret);
2290 			goto out;
2291 		}
2292 	}
2293 
2294 	ret = hclge_common_wl_config(hdev, pkt_buf);
2295 	if (ret)
2296 		dev_err(&hdev->pdev->dev,
2297 			"could not configure common waterline %d\n", ret);
2298 
2299 out:
2300 	kfree(pkt_buf);
2301 	return ret;
2302 }
2303 
2304 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2305 {
2306 	struct hnae3_handle *roce = &vport->roce;
2307 	struct hnae3_handle *nic = &vport->nic;
2308 
2309 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2310 
2311 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2312 	    vport->back->num_msi_left == 0)
2313 		return -EINVAL;
2314 
2315 	roce->rinfo.base_vector = vport->back->roce_base_vector;
2316 
2317 	roce->rinfo.netdev = nic->kinfo.netdev;
2318 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
2319 
2320 	roce->pdev = nic->pdev;
2321 	roce->ae_algo = nic->ae_algo;
2322 	roce->numa_node_mask = nic->numa_node_mask;
2323 
2324 	return 0;
2325 }
2326 
2327 static int hclge_init_msi(struct hclge_dev *hdev)
2328 {
2329 	struct pci_dev *pdev = hdev->pdev;
2330 	int vectors;
2331 	int i;
2332 
2333 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2334 					hdev->num_msi,
2335 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2336 	if (vectors < 0) {
2337 		dev_err(&pdev->dev,
2338 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2339 			vectors);
2340 		return vectors;
2341 	}
2342 	if (vectors < hdev->num_msi)
2343 		dev_warn(&hdev->pdev->dev,
2344 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2345 			 hdev->num_msi, vectors);
2346 
2347 	hdev->num_msi = vectors;
2348 	hdev->num_msi_left = vectors;
2349 
2350 	hdev->base_msi_vector = pdev->irq;
2351 	hdev->roce_base_vector = hdev->base_msi_vector +
2352 				hdev->roce_base_msix_offset;
2353 
2354 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2355 					   sizeof(u16), GFP_KERNEL);
2356 	if (!hdev->vector_status) {
2357 		pci_free_irq_vectors(pdev);
2358 		return -ENOMEM;
2359 	}
2360 
2361 	for (i = 0; i < hdev->num_msi; i++)
2362 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2363 
2364 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2365 					sizeof(int), GFP_KERNEL);
2366 	if (!hdev->vector_irq) {
2367 		pci_free_irq_vectors(pdev);
2368 		return -ENOMEM;
2369 	}
2370 
2371 	return 0;
2372 }
2373 
2374 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2375 {
2376 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2377 		duplex = HCLGE_MAC_FULL;
2378 
2379 	return duplex;
2380 }
2381 
2382 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2383 				      u8 duplex)
2384 {
2385 	struct hclge_config_mac_speed_dup_cmd *req;
2386 	struct hclge_desc desc;
2387 	int ret;
2388 
2389 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2390 
2391 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2392 
2393 	if (duplex)
2394 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2395 
2396 	switch (speed) {
2397 	case HCLGE_MAC_SPEED_10M:
2398 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2399 				HCLGE_CFG_SPEED_S, 6);
2400 		break;
2401 	case HCLGE_MAC_SPEED_100M:
2402 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2403 				HCLGE_CFG_SPEED_S, 7);
2404 		break;
2405 	case HCLGE_MAC_SPEED_1G:
2406 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2407 				HCLGE_CFG_SPEED_S, 0);
2408 		break;
2409 	case HCLGE_MAC_SPEED_10G:
2410 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2411 				HCLGE_CFG_SPEED_S, 1);
2412 		break;
2413 	case HCLGE_MAC_SPEED_25G:
2414 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2415 				HCLGE_CFG_SPEED_S, 2);
2416 		break;
2417 	case HCLGE_MAC_SPEED_40G:
2418 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2419 				HCLGE_CFG_SPEED_S, 3);
2420 		break;
2421 	case HCLGE_MAC_SPEED_50G:
2422 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2423 				HCLGE_CFG_SPEED_S, 4);
2424 		break;
2425 	case HCLGE_MAC_SPEED_100G:
2426 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2427 				HCLGE_CFG_SPEED_S, 5);
2428 		break;
2429 	default:
2430 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2431 		return -EINVAL;
2432 	}
2433 
2434 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2435 		      1);
2436 
2437 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2438 	if (ret) {
2439 		dev_err(&hdev->pdev->dev,
2440 			"mac speed/duplex config cmd failed %d.\n", ret);
2441 		return ret;
2442 	}
2443 
2444 	return 0;
2445 }
2446 
2447 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2448 {
2449 	int ret;
2450 
2451 	duplex = hclge_check_speed_dup(duplex, speed);
2452 	if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2453 		return 0;
2454 
2455 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2456 	if (ret)
2457 		return ret;
2458 
2459 	hdev->hw.mac.speed = speed;
2460 	hdev->hw.mac.duplex = duplex;
2461 
2462 	return 0;
2463 }
2464 
2465 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2466 				     u8 duplex)
2467 {
2468 	struct hclge_vport *vport = hclge_get_vport(handle);
2469 	struct hclge_dev *hdev = vport->back;
2470 
2471 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2472 }
2473 
2474 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2475 {
2476 	struct hclge_config_auto_neg_cmd *req;
2477 	struct hclge_desc desc;
2478 	u32 flag = 0;
2479 	int ret;
2480 
2481 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2482 
2483 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2484 	if (enable)
2485 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2486 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2487 
2488 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2489 	if (ret)
2490 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2491 			ret);
2492 
2493 	return ret;
2494 }
2495 
2496 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2497 {
2498 	struct hclge_vport *vport = hclge_get_vport(handle);
2499 	struct hclge_dev *hdev = vport->back;
2500 
2501 	if (!hdev->hw.mac.support_autoneg) {
2502 		if (enable) {
2503 			dev_err(&hdev->pdev->dev,
2504 				"autoneg is not supported by current port\n");
2505 			return -EOPNOTSUPP;
2506 		} else {
2507 			return 0;
2508 		}
2509 	}
2510 
2511 	return hclge_set_autoneg_en(hdev, enable);
2512 }
2513 
2514 static int hclge_get_autoneg(struct hnae3_handle *handle)
2515 {
2516 	struct hclge_vport *vport = hclge_get_vport(handle);
2517 	struct hclge_dev *hdev = vport->back;
2518 	struct phy_device *phydev = hdev->hw.mac.phydev;
2519 
2520 	if (phydev)
2521 		return phydev->autoneg;
2522 
2523 	return hdev->hw.mac.autoneg;
2524 }
2525 
2526 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2527 {
2528 	struct hclge_vport *vport = hclge_get_vport(handle);
2529 	struct hclge_dev *hdev = vport->back;
2530 	int ret;
2531 
2532 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2533 
2534 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2535 	if (ret)
2536 		return ret;
2537 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2538 }
2539 
2540 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2541 {
2542 	struct hclge_vport *vport = hclge_get_vport(handle);
2543 	struct hclge_dev *hdev = vport->back;
2544 
2545 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2546 		return hclge_set_autoneg_en(hdev, !halt);
2547 
2548 	return 0;
2549 }
2550 
2551 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2552 {
2553 	struct hclge_config_fec_cmd *req;
2554 	struct hclge_desc desc;
2555 	int ret;
2556 
2557 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2558 
2559 	req = (struct hclge_config_fec_cmd *)desc.data;
2560 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2561 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2562 	if (fec_mode & BIT(HNAE3_FEC_RS))
2563 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2564 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2565 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2566 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2567 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2568 
2569 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2570 	if (ret)
2571 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2572 
2573 	return ret;
2574 }
2575 
2576 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2577 {
2578 	struct hclge_vport *vport = hclge_get_vport(handle);
2579 	struct hclge_dev *hdev = vport->back;
2580 	struct hclge_mac *mac = &hdev->hw.mac;
2581 	int ret;
2582 
2583 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2584 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2585 		return -EINVAL;
2586 	}
2587 
2588 	ret = hclge_set_fec_hw(hdev, fec_mode);
2589 	if (ret)
2590 		return ret;
2591 
2592 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2593 	return 0;
2594 }
2595 
2596 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2597 			  u8 *fec_mode)
2598 {
2599 	struct hclge_vport *vport = hclge_get_vport(handle);
2600 	struct hclge_dev *hdev = vport->back;
2601 	struct hclge_mac *mac = &hdev->hw.mac;
2602 
2603 	if (fec_ability)
2604 		*fec_ability = mac->fec_ability;
2605 	if (fec_mode)
2606 		*fec_mode = mac->fec_mode;
2607 }
2608 
2609 static int hclge_mac_init(struct hclge_dev *hdev)
2610 {
2611 	struct hclge_mac *mac = &hdev->hw.mac;
2612 	int ret;
2613 
2614 	hdev->support_sfp_query = true;
2615 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2616 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2617 					 hdev->hw.mac.duplex);
2618 	if (ret)
2619 		return ret;
2620 
2621 	if (hdev->hw.mac.support_autoneg) {
2622 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2623 		if (ret)
2624 			return ret;
2625 	}
2626 
2627 	mac->link = 0;
2628 
2629 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2630 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2631 		if (ret)
2632 			return ret;
2633 	}
2634 
2635 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2636 	if (ret) {
2637 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2638 		return ret;
2639 	}
2640 
2641 	ret = hclge_set_default_loopback(hdev);
2642 	if (ret)
2643 		return ret;
2644 
2645 	ret = hclge_buffer_alloc(hdev);
2646 	if (ret)
2647 		dev_err(&hdev->pdev->dev,
2648 			"allocate buffer fail, ret=%d\n", ret);
2649 
2650 	return ret;
2651 }
2652 
2653 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2654 {
2655 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2656 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2657 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2658 				    hclge_wq, &hdev->service_task, 0);
2659 }
2660 
2661 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2662 {
2663 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2664 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2665 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2666 				    hclge_wq, &hdev->service_task, 0);
2667 }
2668 
2669 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2670 {
2671 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2672 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2673 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2674 				    hclge_wq, &hdev->service_task,
2675 				    delay_time);
2676 }
2677 
2678 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2679 {
2680 	struct hclge_link_status_cmd *req;
2681 	struct hclge_desc desc;
2682 	int link_status;
2683 	int ret;
2684 
2685 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2686 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2687 	if (ret) {
2688 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2689 			ret);
2690 		return ret;
2691 	}
2692 
2693 	req = (struct hclge_link_status_cmd *)desc.data;
2694 	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2695 
2696 	return !!link_status;
2697 }
2698 
2699 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2700 {
2701 	unsigned int mac_state;
2702 	int link_stat;
2703 
2704 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2705 		return 0;
2706 
2707 	mac_state = hclge_get_mac_link_status(hdev);
2708 
2709 	if (hdev->hw.mac.phydev) {
2710 		if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2711 			link_stat = mac_state &
2712 				hdev->hw.mac.phydev->link;
2713 		else
2714 			link_stat = 0;
2715 
2716 	} else {
2717 		link_stat = mac_state;
2718 	}
2719 
2720 	return !!link_stat;
2721 }
2722 
2723 static void hclge_update_link_status(struct hclge_dev *hdev)
2724 {
2725 	struct hnae3_client *rclient = hdev->roce_client;
2726 	struct hnae3_client *client = hdev->nic_client;
2727 	struct hnae3_handle *rhandle;
2728 	struct hnae3_handle *handle;
2729 	int state;
2730 	int i;
2731 
2732 	if (!client)
2733 		return;
2734 
2735 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2736 		return;
2737 
2738 	state = hclge_get_mac_phy_link(hdev);
2739 	if (state != hdev->hw.mac.link) {
2740 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2741 			handle = &hdev->vport[i].nic;
2742 			client->ops->link_status_change(handle, state);
2743 			hclge_config_mac_tnl_int(hdev, state);
2744 			rhandle = &hdev->vport[i].roce;
2745 			if (rclient && rclient->ops->link_status_change)
2746 				rclient->ops->link_status_change(rhandle,
2747 								 state);
2748 		}
2749 		hdev->hw.mac.link = state;
2750 	}
2751 
2752 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2753 }
2754 
2755 static void hclge_update_port_capability(struct hclge_mac *mac)
2756 {
2757 	/* update fec ability by speed */
2758 	hclge_convert_setting_fec(mac);
2759 
2760 	/* firmware can not identify back plane type, the media type
2761 	 * read from configuration can help deal it
2762 	 */
2763 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2764 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2765 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2766 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2767 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2768 
2769 	if (mac->support_autoneg) {
2770 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2771 		linkmode_copy(mac->advertising, mac->supported);
2772 	} else {
2773 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2774 				   mac->supported);
2775 		linkmode_zero(mac->advertising);
2776 	}
2777 }
2778 
2779 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2780 {
2781 	struct hclge_sfp_info_cmd *resp;
2782 	struct hclge_desc desc;
2783 	int ret;
2784 
2785 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2786 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2787 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2788 	if (ret == -EOPNOTSUPP) {
2789 		dev_warn(&hdev->pdev->dev,
2790 			 "IMP do not support get SFP speed %d\n", ret);
2791 		return ret;
2792 	} else if (ret) {
2793 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2794 		return ret;
2795 	}
2796 
2797 	*speed = le32_to_cpu(resp->speed);
2798 
2799 	return 0;
2800 }
2801 
2802 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2803 {
2804 	struct hclge_sfp_info_cmd *resp;
2805 	struct hclge_desc desc;
2806 	int ret;
2807 
2808 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2809 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2810 
2811 	resp->query_type = QUERY_ACTIVE_SPEED;
2812 
2813 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2814 	if (ret == -EOPNOTSUPP) {
2815 		dev_warn(&hdev->pdev->dev,
2816 			 "IMP does not support get SFP info %d\n", ret);
2817 		return ret;
2818 	} else if (ret) {
2819 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2820 		return ret;
2821 	}
2822 
2823 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
2824 	 * set to mac->speed.
2825 	 */
2826 	if (!le32_to_cpu(resp->speed))
2827 		return 0;
2828 
2829 	mac->speed = le32_to_cpu(resp->speed);
2830 	/* if resp->speed_ability is 0, it means it's an old version
2831 	 * firmware, do not update these params
2832 	 */
2833 	if (resp->speed_ability) {
2834 		mac->module_type = le32_to_cpu(resp->module_type);
2835 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
2836 		mac->autoneg = resp->autoneg;
2837 		mac->support_autoneg = resp->autoneg_ability;
2838 		mac->speed_type = QUERY_ACTIVE_SPEED;
2839 		if (!resp->active_fec)
2840 			mac->fec_mode = 0;
2841 		else
2842 			mac->fec_mode = BIT(resp->active_fec);
2843 	} else {
2844 		mac->speed_type = QUERY_SFP_SPEED;
2845 	}
2846 
2847 	return 0;
2848 }
2849 
2850 static int hclge_update_port_info(struct hclge_dev *hdev)
2851 {
2852 	struct hclge_mac *mac = &hdev->hw.mac;
2853 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
2854 	int ret;
2855 
2856 	/* get the port info from SFP cmd if not copper port */
2857 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2858 		return 0;
2859 
2860 	/* if IMP does not support get SFP/qSFP info, return directly */
2861 	if (!hdev->support_sfp_query)
2862 		return 0;
2863 
2864 	if (hdev->pdev->revision >= 0x21)
2865 		ret = hclge_get_sfp_info(hdev, mac);
2866 	else
2867 		ret = hclge_get_sfp_speed(hdev, &speed);
2868 
2869 	if (ret == -EOPNOTSUPP) {
2870 		hdev->support_sfp_query = false;
2871 		return ret;
2872 	} else if (ret) {
2873 		return ret;
2874 	}
2875 
2876 	if (hdev->pdev->revision >= 0x21) {
2877 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2878 			hclge_update_port_capability(mac);
2879 			return 0;
2880 		}
2881 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2882 					       HCLGE_MAC_FULL);
2883 	} else {
2884 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2885 			return 0; /* do nothing if no SFP */
2886 
2887 		/* must config full duplex for SFP */
2888 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2889 	}
2890 }
2891 
2892 static int hclge_get_status(struct hnae3_handle *handle)
2893 {
2894 	struct hclge_vport *vport = hclge_get_vport(handle);
2895 	struct hclge_dev *hdev = vport->back;
2896 
2897 	hclge_update_link_status(hdev);
2898 
2899 	return hdev->hw.mac.link;
2900 }
2901 
2902 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2903 {
2904 	if (!pci_num_vf(hdev->pdev)) {
2905 		dev_err(&hdev->pdev->dev,
2906 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
2907 		return NULL;
2908 	}
2909 
2910 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2911 		dev_err(&hdev->pdev->dev,
2912 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
2913 			vf, pci_num_vf(hdev->pdev));
2914 		return NULL;
2915 	}
2916 
2917 	/* VF start from 1 in vport */
2918 	vf += HCLGE_VF_VPORT_START_NUM;
2919 	return &hdev->vport[vf];
2920 }
2921 
2922 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2923 			       struct ifla_vf_info *ivf)
2924 {
2925 	struct hclge_vport *vport = hclge_get_vport(handle);
2926 	struct hclge_dev *hdev = vport->back;
2927 
2928 	vport = hclge_get_vf_vport(hdev, vf);
2929 	if (!vport)
2930 		return -EINVAL;
2931 
2932 	ivf->vf = vf;
2933 	ivf->linkstate = vport->vf_info.link_state;
2934 	ivf->spoofchk = vport->vf_info.spoofchk;
2935 	ivf->trusted = vport->vf_info.trusted;
2936 	ivf->min_tx_rate = 0;
2937 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2938 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2939 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2940 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2941 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
2942 
2943 	return 0;
2944 }
2945 
2946 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2947 				   int link_state)
2948 {
2949 	struct hclge_vport *vport = hclge_get_vport(handle);
2950 	struct hclge_dev *hdev = vport->back;
2951 
2952 	vport = hclge_get_vf_vport(hdev, vf);
2953 	if (!vport)
2954 		return -EINVAL;
2955 
2956 	vport->vf_info.link_state = link_state;
2957 
2958 	return 0;
2959 }
2960 
2961 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2962 {
2963 	u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2964 
2965 	/* fetch the events from their corresponding regs */
2966 	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2967 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2968 	msix_src_reg = hclge_read_dev(&hdev->hw,
2969 				      HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2970 
2971 	/* Assumption: If by any chance reset and mailbox events are reported
2972 	 * together then we will only process reset event in this go and will
2973 	 * defer the processing of the mailbox events. Since, we would have not
2974 	 * cleared RX CMDQ event this time we would receive again another
2975 	 * interrupt from H/W just for the mailbox.
2976 	 *
2977 	 * check for vector0 reset event sources
2978 	 */
2979 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2980 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2981 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2982 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2983 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2984 		hdev->rst_stats.imp_rst_cnt++;
2985 		return HCLGE_VECTOR0_EVENT_RST;
2986 	}
2987 
2988 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2989 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2990 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2991 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2992 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2993 		hdev->rst_stats.global_rst_cnt++;
2994 		return HCLGE_VECTOR0_EVENT_RST;
2995 	}
2996 
2997 	/* check for vector0 msix event source */
2998 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2999 		*clearval = msix_src_reg;
3000 		return HCLGE_VECTOR0_EVENT_ERR;
3001 	}
3002 
3003 	/* check for vector0 mailbox(=CMDQ RX) event source */
3004 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3005 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3006 		*clearval = cmdq_src_reg;
3007 		return HCLGE_VECTOR0_EVENT_MBX;
3008 	}
3009 
3010 	/* print other vector0 event source */
3011 	dev_info(&hdev->pdev->dev,
3012 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3013 		 cmdq_src_reg, msix_src_reg);
3014 	*clearval = msix_src_reg;
3015 
3016 	return HCLGE_VECTOR0_EVENT_OTHER;
3017 }
3018 
3019 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3020 				    u32 regclr)
3021 {
3022 	switch (event_type) {
3023 	case HCLGE_VECTOR0_EVENT_RST:
3024 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3025 		break;
3026 	case HCLGE_VECTOR0_EVENT_MBX:
3027 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3028 		break;
3029 	default:
3030 		break;
3031 	}
3032 }
3033 
3034 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3035 {
3036 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3037 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3038 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3039 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3040 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3041 }
3042 
3043 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3044 {
3045 	writel(enable ? 1 : 0, vector->addr);
3046 }
3047 
3048 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3049 {
3050 	struct hclge_dev *hdev = data;
3051 	u32 clearval = 0;
3052 	u32 event_cause;
3053 
3054 	hclge_enable_vector(&hdev->misc_vector, false);
3055 	event_cause = hclge_check_event_cause(hdev, &clearval);
3056 
3057 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
3058 	switch (event_cause) {
3059 	case HCLGE_VECTOR0_EVENT_ERR:
3060 		/* we do not know what type of reset is required now. This could
3061 		 * only be decided after we fetch the type of errors which
3062 		 * caused this event. Therefore, we will do below for now:
3063 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3064 		 *    have defered type of reset to be used.
3065 		 * 2. Schedule the reset serivce task.
3066 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3067 		 *    will fetch the correct type of reset.  This would be done
3068 		 *    by first decoding the types of errors.
3069 		 */
3070 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3071 		/* fall through */
3072 	case HCLGE_VECTOR0_EVENT_RST:
3073 		hclge_reset_task_schedule(hdev);
3074 		break;
3075 	case HCLGE_VECTOR0_EVENT_MBX:
3076 		/* If we are here then,
3077 		 * 1. Either we are not handling any mbx task and we are not
3078 		 *    scheduled as well
3079 		 *                        OR
3080 		 * 2. We could be handling a mbx task but nothing more is
3081 		 *    scheduled.
3082 		 * In both cases, we should schedule mbx task as there are more
3083 		 * mbx messages reported by this interrupt.
3084 		 */
3085 		hclge_mbx_task_schedule(hdev);
3086 		break;
3087 	default:
3088 		dev_warn(&hdev->pdev->dev,
3089 			 "received unknown or unhandled event of vector0\n");
3090 		break;
3091 	}
3092 
3093 	hclge_clear_event_cause(hdev, event_cause, clearval);
3094 
3095 	/* Enable interrupt if it is not cause by reset. And when
3096 	 * clearval equal to 0, it means interrupt status may be
3097 	 * cleared by hardware before driver reads status register.
3098 	 * For this case, vector0 interrupt also should be enabled.
3099 	 */
3100 	if (!clearval ||
3101 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3102 		hclge_enable_vector(&hdev->misc_vector, true);
3103 	}
3104 
3105 	return IRQ_HANDLED;
3106 }
3107 
3108 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3109 {
3110 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3111 		dev_warn(&hdev->pdev->dev,
3112 			 "vector(vector_id %d) has been freed.\n", vector_id);
3113 		return;
3114 	}
3115 
3116 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3117 	hdev->num_msi_left += 1;
3118 	hdev->num_msi_used -= 1;
3119 }
3120 
3121 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3122 {
3123 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3124 
3125 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3126 
3127 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3128 	hdev->vector_status[0] = 0;
3129 
3130 	hdev->num_msi_left -= 1;
3131 	hdev->num_msi_used += 1;
3132 }
3133 
3134 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3135 				      const cpumask_t *mask)
3136 {
3137 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3138 					      affinity_notify);
3139 
3140 	cpumask_copy(&hdev->affinity_mask, mask);
3141 }
3142 
3143 static void hclge_irq_affinity_release(struct kref *ref)
3144 {
3145 }
3146 
3147 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3148 {
3149 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3150 			      &hdev->affinity_mask);
3151 
3152 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3153 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3154 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3155 				  &hdev->affinity_notify);
3156 }
3157 
3158 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3159 {
3160 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3161 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3162 }
3163 
3164 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3165 {
3166 	int ret;
3167 
3168 	hclge_get_misc_vector(hdev);
3169 
3170 	/* this would be explicitly freed in the end */
3171 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3172 		 HCLGE_NAME, pci_name(hdev->pdev));
3173 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3174 			  0, hdev->misc_vector.name, hdev);
3175 	if (ret) {
3176 		hclge_free_vector(hdev, 0);
3177 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3178 			hdev->misc_vector.vector_irq);
3179 	}
3180 
3181 	return ret;
3182 }
3183 
3184 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3185 {
3186 	free_irq(hdev->misc_vector.vector_irq, hdev);
3187 	hclge_free_vector(hdev, 0);
3188 }
3189 
3190 int hclge_notify_client(struct hclge_dev *hdev,
3191 			enum hnae3_reset_notify_type type)
3192 {
3193 	struct hnae3_client *client = hdev->nic_client;
3194 	u16 i;
3195 
3196 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3197 		return 0;
3198 
3199 	if (!client->ops->reset_notify)
3200 		return -EOPNOTSUPP;
3201 
3202 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3203 		struct hnae3_handle *handle = &hdev->vport[i].nic;
3204 		int ret;
3205 
3206 		ret = client->ops->reset_notify(handle, type);
3207 		if (ret) {
3208 			dev_err(&hdev->pdev->dev,
3209 				"notify nic client failed %d(%d)\n", type, ret);
3210 			return ret;
3211 		}
3212 	}
3213 
3214 	return 0;
3215 }
3216 
3217 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3218 				    enum hnae3_reset_notify_type type)
3219 {
3220 	struct hnae3_client *client = hdev->roce_client;
3221 	int ret = 0;
3222 	u16 i;
3223 
3224 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3225 		return 0;
3226 
3227 	if (!client->ops->reset_notify)
3228 		return -EOPNOTSUPP;
3229 
3230 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3231 		struct hnae3_handle *handle = &hdev->vport[i].roce;
3232 
3233 		ret = client->ops->reset_notify(handle, type);
3234 		if (ret) {
3235 			dev_err(&hdev->pdev->dev,
3236 				"notify roce client failed %d(%d)",
3237 				type, ret);
3238 			return ret;
3239 		}
3240 	}
3241 
3242 	return ret;
3243 }
3244 
3245 static int hclge_reset_wait(struct hclge_dev *hdev)
3246 {
3247 #define HCLGE_RESET_WATI_MS	100
3248 #define HCLGE_RESET_WAIT_CNT	350
3249 
3250 	u32 val, reg, reg_bit;
3251 	u32 cnt = 0;
3252 
3253 	switch (hdev->reset_type) {
3254 	case HNAE3_IMP_RESET:
3255 		reg = HCLGE_GLOBAL_RESET_REG;
3256 		reg_bit = HCLGE_IMP_RESET_BIT;
3257 		break;
3258 	case HNAE3_GLOBAL_RESET:
3259 		reg = HCLGE_GLOBAL_RESET_REG;
3260 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3261 		break;
3262 	case HNAE3_FUNC_RESET:
3263 		reg = HCLGE_FUN_RST_ING;
3264 		reg_bit = HCLGE_FUN_RST_ING_B;
3265 		break;
3266 	default:
3267 		dev_err(&hdev->pdev->dev,
3268 			"Wait for unsupported reset type: %d\n",
3269 			hdev->reset_type);
3270 		return -EINVAL;
3271 	}
3272 
3273 	val = hclge_read_dev(&hdev->hw, reg);
3274 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3275 		msleep(HCLGE_RESET_WATI_MS);
3276 		val = hclge_read_dev(&hdev->hw, reg);
3277 		cnt++;
3278 	}
3279 
3280 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3281 		dev_warn(&hdev->pdev->dev,
3282 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3283 		return -EBUSY;
3284 	}
3285 
3286 	return 0;
3287 }
3288 
3289 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3290 {
3291 	struct hclge_vf_rst_cmd *req;
3292 	struct hclge_desc desc;
3293 
3294 	req = (struct hclge_vf_rst_cmd *)desc.data;
3295 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3296 	req->dest_vfid = func_id;
3297 
3298 	if (reset)
3299 		req->vf_rst = 0x1;
3300 
3301 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3302 }
3303 
3304 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3305 {
3306 	int i;
3307 
3308 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3309 		struct hclge_vport *vport = &hdev->vport[i];
3310 		int ret;
3311 
3312 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3313 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3314 		if (ret) {
3315 			dev_err(&hdev->pdev->dev,
3316 				"set vf(%u) rst failed %d!\n",
3317 				vport->vport_id, ret);
3318 			return ret;
3319 		}
3320 
3321 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3322 			continue;
3323 
3324 		/* Inform VF to process the reset.
3325 		 * hclge_inform_reset_assert_to_vf may fail if VF
3326 		 * driver is not loaded.
3327 		 */
3328 		ret = hclge_inform_reset_assert_to_vf(vport);
3329 		if (ret)
3330 			dev_warn(&hdev->pdev->dev,
3331 				 "inform reset to vf(%u) failed %d!\n",
3332 				 vport->vport_id, ret);
3333 	}
3334 
3335 	return 0;
3336 }
3337 
3338 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3339 {
3340 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3341 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3342 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3343 		return;
3344 
3345 	hclge_mbx_handler(hdev);
3346 
3347 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3348 }
3349 
3350 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3351 {
3352 	struct hclge_pf_rst_sync_cmd *req;
3353 	struct hclge_desc desc;
3354 	int cnt = 0;
3355 	int ret;
3356 
3357 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3358 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3359 
3360 	do {
3361 		/* vf need to down netdev by mbx during PF or FLR reset */
3362 		hclge_mailbox_service_task(hdev);
3363 
3364 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3365 		/* for compatible with old firmware, wait
3366 		 * 100 ms for VF to stop IO
3367 		 */
3368 		if (ret == -EOPNOTSUPP) {
3369 			msleep(HCLGE_RESET_SYNC_TIME);
3370 			return;
3371 		} else if (ret) {
3372 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3373 				 ret);
3374 			return;
3375 		} else if (req->all_vf_ready) {
3376 			return;
3377 		}
3378 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3379 		hclge_cmd_reuse_desc(&desc, true);
3380 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3381 
3382 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3383 }
3384 
3385 void hclge_report_hw_error(struct hclge_dev *hdev,
3386 			   enum hnae3_hw_error_type type)
3387 {
3388 	struct hnae3_client *client = hdev->nic_client;
3389 	u16 i;
3390 
3391 	if (!client || !client->ops->process_hw_error ||
3392 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3393 		return;
3394 
3395 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3396 		client->ops->process_hw_error(&hdev->vport[i].nic, type);
3397 }
3398 
3399 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3400 {
3401 	u32 reg_val;
3402 
3403 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3404 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3405 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3406 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3407 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3408 	}
3409 
3410 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3411 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3412 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3413 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3414 	}
3415 }
3416 
3417 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3418 {
3419 	struct hclge_desc desc;
3420 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3421 	int ret;
3422 
3423 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3424 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3425 	req->fun_reset_vfid = func_id;
3426 
3427 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3428 	if (ret)
3429 		dev_err(&hdev->pdev->dev,
3430 			"send function reset cmd fail, status =%d\n", ret);
3431 
3432 	return ret;
3433 }
3434 
3435 static void hclge_do_reset(struct hclge_dev *hdev)
3436 {
3437 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3438 	struct pci_dev *pdev = hdev->pdev;
3439 	u32 val;
3440 
3441 	if (hclge_get_hw_reset_stat(handle)) {
3442 		dev_info(&pdev->dev, "Hardware reset not finish\n");
3443 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3444 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3445 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3446 		return;
3447 	}
3448 
3449 	switch (hdev->reset_type) {
3450 	case HNAE3_GLOBAL_RESET:
3451 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3452 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3453 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3454 		dev_info(&pdev->dev, "Global Reset requested\n");
3455 		break;
3456 	case HNAE3_FUNC_RESET:
3457 		dev_info(&pdev->dev, "PF Reset requested\n");
3458 		/* schedule again to check later */
3459 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3460 		hclge_reset_task_schedule(hdev);
3461 		break;
3462 	default:
3463 		dev_warn(&pdev->dev,
3464 			 "Unsupported reset type: %d\n", hdev->reset_type);
3465 		break;
3466 	}
3467 }
3468 
3469 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3470 						   unsigned long *addr)
3471 {
3472 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3473 	struct hclge_dev *hdev = ae_dev->priv;
3474 
3475 	/* first, resolve any unknown reset type to the known type(s) */
3476 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3477 		u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3478 					HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
3479 		/* we will intentionally ignore any errors from this function
3480 		 *  as we will end up in *some* reset request in any case
3481 		 */
3482 		if (hclge_handle_hw_msix_error(hdev, addr))
3483 			dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3484 				 msix_sts_reg);
3485 
3486 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3487 		/* We defered the clearing of the error event which caused
3488 		 * interrupt since it was not posssible to do that in
3489 		 * interrupt context (and this is the reason we introduced
3490 		 * new UNKNOWN reset type). Now, the errors have been
3491 		 * handled and cleared in hardware we can safely enable
3492 		 * interrupts. This is an exception to the norm.
3493 		 */
3494 		hclge_enable_vector(&hdev->misc_vector, true);
3495 	}
3496 
3497 	/* return the highest priority reset level amongst all */
3498 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3499 		rst_level = HNAE3_IMP_RESET;
3500 		clear_bit(HNAE3_IMP_RESET, addr);
3501 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3502 		clear_bit(HNAE3_FUNC_RESET, addr);
3503 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3504 		rst_level = HNAE3_GLOBAL_RESET;
3505 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3506 		clear_bit(HNAE3_FUNC_RESET, addr);
3507 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3508 		rst_level = HNAE3_FUNC_RESET;
3509 		clear_bit(HNAE3_FUNC_RESET, addr);
3510 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3511 		rst_level = HNAE3_FLR_RESET;
3512 		clear_bit(HNAE3_FLR_RESET, addr);
3513 	}
3514 
3515 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3516 	    rst_level < hdev->reset_type)
3517 		return HNAE3_NONE_RESET;
3518 
3519 	return rst_level;
3520 }
3521 
3522 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3523 {
3524 	u32 clearval = 0;
3525 
3526 	switch (hdev->reset_type) {
3527 	case HNAE3_IMP_RESET:
3528 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3529 		break;
3530 	case HNAE3_GLOBAL_RESET:
3531 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3532 		break;
3533 	default:
3534 		break;
3535 	}
3536 
3537 	if (!clearval)
3538 		return;
3539 
3540 	/* For revision 0x20, the reset interrupt source
3541 	 * can only be cleared after hardware reset done
3542 	 */
3543 	if (hdev->pdev->revision == 0x20)
3544 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3545 				clearval);
3546 
3547 	hclge_enable_vector(&hdev->misc_vector, true);
3548 }
3549 
3550 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3551 {
3552 	u32 reg_val;
3553 
3554 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3555 	if (enable)
3556 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3557 	else
3558 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3559 
3560 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3561 }
3562 
3563 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3564 {
3565 	int ret;
3566 
3567 	ret = hclge_set_all_vf_rst(hdev, true);
3568 	if (ret)
3569 		return ret;
3570 
3571 	hclge_func_reset_sync_vf(hdev);
3572 
3573 	return 0;
3574 }
3575 
3576 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3577 {
3578 	u32 reg_val;
3579 	int ret = 0;
3580 
3581 	switch (hdev->reset_type) {
3582 	case HNAE3_FUNC_RESET:
3583 		ret = hclge_func_reset_notify_vf(hdev);
3584 		if (ret)
3585 			return ret;
3586 
3587 		ret = hclge_func_reset_cmd(hdev, 0);
3588 		if (ret) {
3589 			dev_err(&hdev->pdev->dev,
3590 				"asserting function reset fail %d!\n", ret);
3591 			return ret;
3592 		}
3593 
3594 		/* After performaning pf reset, it is not necessary to do the
3595 		 * mailbox handling or send any command to firmware, because
3596 		 * any mailbox handling or command to firmware is only valid
3597 		 * after hclge_cmd_init is called.
3598 		 */
3599 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3600 		hdev->rst_stats.pf_rst_cnt++;
3601 		break;
3602 	case HNAE3_FLR_RESET:
3603 		ret = hclge_func_reset_notify_vf(hdev);
3604 		if (ret)
3605 			return ret;
3606 		break;
3607 	case HNAE3_IMP_RESET:
3608 		hclge_handle_imp_error(hdev);
3609 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3610 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3611 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3612 		break;
3613 	default:
3614 		break;
3615 	}
3616 
3617 	/* inform hardware that preparatory work is done */
3618 	msleep(HCLGE_RESET_SYNC_TIME);
3619 	hclge_reset_handshake(hdev, true);
3620 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3621 
3622 	return ret;
3623 }
3624 
3625 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3626 {
3627 #define MAX_RESET_FAIL_CNT 5
3628 
3629 	if (hdev->reset_pending) {
3630 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3631 			 hdev->reset_pending);
3632 		return true;
3633 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3634 		   HCLGE_RESET_INT_M) {
3635 		dev_info(&hdev->pdev->dev,
3636 			 "reset failed because new reset interrupt\n");
3637 		hclge_clear_reset_cause(hdev);
3638 		return false;
3639 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3640 		hdev->rst_stats.reset_fail_cnt++;
3641 		set_bit(hdev->reset_type, &hdev->reset_pending);
3642 		dev_info(&hdev->pdev->dev,
3643 			 "re-schedule reset task(%u)\n",
3644 			 hdev->rst_stats.reset_fail_cnt);
3645 		return true;
3646 	}
3647 
3648 	hclge_clear_reset_cause(hdev);
3649 
3650 	/* recover the handshake status when reset fail */
3651 	hclge_reset_handshake(hdev, true);
3652 
3653 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3654 
3655 	hclge_dbg_dump_rst_info(hdev);
3656 
3657 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3658 
3659 	return false;
3660 }
3661 
3662 static int hclge_set_rst_done(struct hclge_dev *hdev)
3663 {
3664 	struct hclge_pf_rst_done_cmd *req;
3665 	struct hclge_desc desc;
3666 	int ret;
3667 
3668 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
3669 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3670 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3671 
3672 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3673 	/* To be compatible with the old firmware, which does not support
3674 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3675 	 * return success
3676 	 */
3677 	if (ret == -EOPNOTSUPP) {
3678 		dev_warn(&hdev->pdev->dev,
3679 			 "current firmware does not support command(0x%x)!\n",
3680 			 HCLGE_OPC_PF_RST_DONE);
3681 		return 0;
3682 	} else if (ret) {
3683 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3684 			ret);
3685 	}
3686 
3687 	return ret;
3688 }
3689 
3690 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3691 {
3692 	int ret = 0;
3693 
3694 	switch (hdev->reset_type) {
3695 	case HNAE3_FUNC_RESET:
3696 		/* fall through */
3697 	case HNAE3_FLR_RESET:
3698 		ret = hclge_set_all_vf_rst(hdev, false);
3699 		break;
3700 	case HNAE3_GLOBAL_RESET:
3701 		/* fall through */
3702 	case HNAE3_IMP_RESET:
3703 		ret = hclge_set_rst_done(hdev);
3704 		break;
3705 	default:
3706 		break;
3707 	}
3708 
3709 	/* clear up the handshake status after re-initialize done */
3710 	hclge_reset_handshake(hdev, false);
3711 
3712 	return ret;
3713 }
3714 
3715 static int hclge_reset_stack(struct hclge_dev *hdev)
3716 {
3717 	int ret;
3718 
3719 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3720 	if (ret)
3721 		return ret;
3722 
3723 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3724 	if (ret)
3725 		return ret;
3726 
3727 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3728 	if (ret)
3729 		return ret;
3730 
3731 	return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3732 }
3733 
3734 static int hclge_reset_prepare(struct hclge_dev *hdev)
3735 {
3736 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3737 	int ret;
3738 
3739 	/* Initialize ae_dev reset status as well, in case enet layer wants to
3740 	 * know if device is undergoing reset
3741 	 */
3742 	ae_dev->reset_type = hdev->reset_type;
3743 	hdev->rst_stats.reset_cnt++;
3744 	/* perform reset of the stack & ae device for a client */
3745 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3746 	if (ret)
3747 		return ret;
3748 
3749 	rtnl_lock();
3750 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3751 	rtnl_unlock();
3752 	if (ret)
3753 		return ret;
3754 
3755 	return hclge_reset_prepare_wait(hdev);
3756 }
3757 
3758 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3759 {
3760 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3761 	enum hnae3_reset_type reset_level;
3762 	int ret;
3763 
3764 	hdev->rst_stats.hw_reset_done_cnt++;
3765 
3766 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3767 	if (ret)
3768 		return ret;
3769 
3770 	rtnl_lock();
3771 	ret = hclge_reset_stack(hdev);
3772 	rtnl_unlock();
3773 	if (ret)
3774 		return ret;
3775 
3776 	hclge_clear_reset_cause(hdev);
3777 
3778 	ret = hclge_reset_prepare_up(hdev);
3779 	if (ret)
3780 		return ret;
3781 
3782 
3783 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3784 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3785 	 * times
3786 	 */
3787 	if (ret &&
3788 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3789 		return ret;
3790 
3791 	rtnl_lock();
3792 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3793 	rtnl_unlock();
3794 	if (ret)
3795 		return ret;
3796 
3797 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3798 	if (ret)
3799 		return ret;
3800 
3801 	hdev->last_reset_time = jiffies;
3802 	hdev->rst_stats.reset_fail_cnt = 0;
3803 	hdev->rst_stats.reset_done_cnt++;
3804 	ae_dev->reset_type = HNAE3_NONE_RESET;
3805 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3806 
3807 	/* if default_reset_request has a higher level reset request,
3808 	 * it should be handled as soon as possible. since some errors
3809 	 * need this kind of reset to fix.
3810 	 */
3811 	reset_level = hclge_get_reset_level(ae_dev,
3812 					    &hdev->default_reset_request);
3813 	if (reset_level != HNAE3_NONE_RESET)
3814 		set_bit(reset_level, &hdev->reset_request);
3815 
3816 	return 0;
3817 }
3818 
3819 static void hclge_reset(struct hclge_dev *hdev)
3820 {
3821 	if (hclge_reset_prepare(hdev))
3822 		goto err_reset;
3823 
3824 	if (hclge_reset_wait(hdev))
3825 		goto err_reset;
3826 
3827 	if (hclge_reset_rebuild(hdev))
3828 		goto err_reset;
3829 
3830 	return;
3831 
3832 err_reset:
3833 	if (hclge_reset_err_handle(hdev))
3834 		hclge_reset_task_schedule(hdev);
3835 }
3836 
3837 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3838 {
3839 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3840 	struct hclge_dev *hdev = ae_dev->priv;
3841 
3842 	/* We might end up getting called broadly because of 2 below cases:
3843 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3844 	 *    normalcy is to reset.
3845 	 * 2. A new reset request from the stack due to timeout
3846 	 *
3847 	 * For the first case,error event might not have ae handle available.
3848 	 * check if this is a new reset request and we are not here just because
3849 	 * last reset attempt did not succeed and watchdog hit us again. We will
3850 	 * know this if last reset request did not occur very recently (watchdog
3851 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3852 	 * In case of new request we reset the "reset level" to PF reset.
3853 	 * And if it is a repeat reset request of the most recent one then we
3854 	 * want to make sure we throttle the reset request. Therefore, we will
3855 	 * not allow it again before 3*HZ times.
3856 	 */
3857 	if (!handle)
3858 		handle = &hdev->vport[0].nic;
3859 
3860 	if (time_before(jiffies, (hdev->last_reset_time +
3861 				  HCLGE_RESET_INTERVAL))) {
3862 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3863 		return;
3864 	} else if (hdev->default_reset_request) {
3865 		hdev->reset_level =
3866 			hclge_get_reset_level(ae_dev,
3867 					      &hdev->default_reset_request);
3868 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3869 		hdev->reset_level = HNAE3_FUNC_RESET;
3870 	}
3871 
3872 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3873 		 hdev->reset_level);
3874 
3875 	/* request reset & schedule reset task */
3876 	set_bit(hdev->reset_level, &hdev->reset_request);
3877 	hclge_reset_task_schedule(hdev);
3878 
3879 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3880 		hdev->reset_level++;
3881 }
3882 
3883 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3884 					enum hnae3_reset_type rst_type)
3885 {
3886 	struct hclge_dev *hdev = ae_dev->priv;
3887 
3888 	set_bit(rst_type, &hdev->default_reset_request);
3889 }
3890 
3891 static void hclge_reset_timer(struct timer_list *t)
3892 {
3893 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3894 
3895 	/* if default_reset_request has no value, it means that this reset
3896 	 * request has already be handled, so just return here
3897 	 */
3898 	if (!hdev->default_reset_request)
3899 		return;
3900 
3901 	dev_info(&hdev->pdev->dev,
3902 		 "triggering reset in reset timer\n");
3903 	hclge_reset_event(hdev->pdev, NULL);
3904 }
3905 
3906 static void hclge_reset_subtask(struct hclge_dev *hdev)
3907 {
3908 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3909 
3910 	/* check if there is any ongoing reset in the hardware. This status can
3911 	 * be checked from reset_pending. If there is then, we need to wait for
3912 	 * hardware to complete reset.
3913 	 *    a. If we are able to figure out in reasonable time that hardware
3914 	 *       has fully resetted then, we can proceed with driver, client
3915 	 *       reset.
3916 	 *    b. else, we can come back later to check this status so re-sched
3917 	 *       now.
3918 	 */
3919 	hdev->last_reset_time = jiffies;
3920 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3921 	if (hdev->reset_type != HNAE3_NONE_RESET)
3922 		hclge_reset(hdev);
3923 
3924 	/* check if we got any *new* reset requests to be honored */
3925 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3926 	if (hdev->reset_type != HNAE3_NONE_RESET)
3927 		hclge_do_reset(hdev);
3928 
3929 	hdev->reset_type = HNAE3_NONE_RESET;
3930 }
3931 
3932 static void hclge_reset_service_task(struct hclge_dev *hdev)
3933 {
3934 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3935 		return;
3936 
3937 	down(&hdev->reset_sem);
3938 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3939 
3940 	hclge_reset_subtask(hdev);
3941 
3942 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3943 	up(&hdev->reset_sem);
3944 }
3945 
3946 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3947 {
3948 	int i;
3949 
3950 	/* start from vport 1 for PF is always alive */
3951 	for (i = 1; i < hdev->num_alloc_vport; i++) {
3952 		struct hclge_vport *vport = &hdev->vport[i];
3953 
3954 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3955 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3956 
3957 		/* If vf is not alive, set to default value */
3958 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3959 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3960 	}
3961 }
3962 
3963 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3964 {
3965 	unsigned long delta = round_jiffies_relative(HZ);
3966 
3967 	/* Always handle the link updating to make sure link state is
3968 	 * updated when it is triggered by mbx.
3969 	 */
3970 	hclge_update_link_status(hdev);
3971 
3972 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3973 		delta = jiffies - hdev->last_serv_processed;
3974 
3975 		if (delta < round_jiffies_relative(HZ)) {
3976 			delta = round_jiffies_relative(HZ) - delta;
3977 			goto out;
3978 		}
3979 	}
3980 
3981 	hdev->serv_processed_cnt++;
3982 	hclge_update_vport_alive(hdev);
3983 
3984 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3985 		hdev->last_serv_processed = jiffies;
3986 		goto out;
3987 	}
3988 
3989 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3990 		hclge_update_stats_for_all(hdev);
3991 
3992 	hclge_update_port_info(hdev);
3993 	hclge_sync_vlan_filter(hdev);
3994 
3995 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
3996 		hclge_rfs_filter_expire(hdev);
3997 
3998 	hdev->last_serv_processed = jiffies;
3999 
4000 out:
4001 	hclge_task_schedule(hdev, delta);
4002 }
4003 
4004 static void hclge_service_task(struct work_struct *work)
4005 {
4006 	struct hclge_dev *hdev =
4007 		container_of(work, struct hclge_dev, service_task.work);
4008 
4009 	hclge_reset_service_task(hdev);
4010 	hclge_mailbox_service_task(hdev);
4011 	hclge_periodic_service_task(hdev);
4012 
4013 	/* Handle reset and mbx again in case periodical task delays the
4014 	 * handling by calling hclge_task_schedule() in
4015 	 * hclge_periodic_service_task().
4016 	 */
4017 	hclge_reset_service_task(hdev);
4018 	hclge_mailbox_service_task(hdev);
4019 }
4020 
4021 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4022 {
4023 	/* VF handle has no client */
4024 	if (!handle->client)
4025 		return container_of(handle, struct hclge_vport, nic);
4026 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4027 		return container_of(handle, struct hclge_vport, roce);
4028 	else
4029 		return container_of(handle, struct hclge_vport, nic);
4030 }
4031 
4032 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4033 			    struct hnae3_vector_info *vector_info)
4034 {
4035 	struct hclge_vport *vport = hclge_get_vport(handle);
4036 	struct hnae3_vector_info *vector = vector_info;
4037 	struct hclge_dev *hdev = vport->back;
4038 	int alloc = 0;
4039 	int i, j;
4040 
4041 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4042 	vector_num = min(hdev->num_msi_left, vector_num);
4043 
4044 	for (j = 0; j < vector_num; j++) {
4045 		for (i = 1; i < hdev->num_msi; i++) {
4046 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4047 				vector->vector = pci_irq_vector(hdev->pdev, i);
4048 				vector->io_addr = hdev->hw.io_base +
4049 					HCLGE_VECTOR_REG_BASE +
4050 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
4051 					vport->vport_id *
4052 					HCLGE_VECTOR_VF_OFFSET;
4053 				hdev->vector_status[i] = vport->vport_id;
4054 				hdev->vector_irq[i] = vector->vector;
4055 
4056 				vector++;
4057 				alloc++;
4058 
4059 				break;
4060 			}
4061 		}
4062 	}
4063 	hdev->num_msi_left -= alloc;
4064 	hdev->num_msi_used += alloc;
4065 
4066 	return alloc;
4067 }
4068 
4069 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4070 {
4071 	int i;
4072 
4073 	for (i = 0; i < hdev->num_msi; i++)
4074 		if (vector == hdev->vector_irq[i])
4075 			return i;
4076 
4077 	return -EINVAL;
4078 }
4079 
4080 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4081 {
4082 	struct hclge_vport *vport = hclge_get_vport(handle);
4083 	struct hclge_dev *hdev = vport->back;
4084 	int vector_id;
4085 
4086 	vector_id = hclge_get_vector_index(hdev, vector);
4087 	if (vector_id < 0) {
4088 		dev_err(&hdev->pdev->dev,
4089 			"Get vector index fail. vector = %d\n", vector);
4090 		return vector_id;
4091 	}
4092 
4093 	hclge_free_vector(hdev, vector_id);
4094 
4095 	return 0;
4096 }
4097 
4098 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4099 {
4100 	return HCLGE_RSS_KEY_SIZE;
4101 }
4102 
4103 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4104 {
4105 	return HCLGE_RSS_IND_TBL_SIZE;
4106 }
4107 
4108 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4109 				  const u8 hfunc, const u8 *key)
4110 {
4111 	struct hclge_rss_config_cmd *req;
4112 	unsigned int key_offset = 0;
4113 	struct hclge_desc desc;
4114 	int key_counts;
4115 	int key_size;
4116 	int ret;
4117 
4118 	key_counts = HCLGE_RSS_KEY_SIZE;
4119 	req = (struct hclge_rss_config_cmd *)desc.data;
4120 
4121 	while (key_counts) {
4122 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4123 					   false);
4124 
4125 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4126 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4127 
4128 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4129 		memcpy(req->hash_key,
4130 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4131 
4132 		key_counts -= key_size;
4133 		key_offset++;
4134 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4135 		if (ret) {
4136 			dev_err(&hdev->pdev->dev,
4137 				"Configure RSS config fail, status = %d\n",
4138 				ret);
4139 			return ret;
4140 		}
4141 	}
4142 	return 0;
4143 }
4144 
4145 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4146 {
4147 	struct hclge_rss_indirection_table_cmd *req;
4148 	struct hclge_desc desc;
4149 	int i, j;
4150 	int ret;
4151 
4152 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4153 
4154 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4155 		hclge_cmd_setup_basic_desc
4156 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4157 
4158 		req->start_table_index =
4159 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4160 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4161 
4162 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4163 			req->rss_result[j] =
4164 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4165 
4166 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4167 		if (ret) {
4168 			dev_err(&hdev->pdev->dev,
4169 				"Configure rss indir table fail,status = %d\n",
4170 				ret);
4171 			return ret;
4172 		}
4173 	}
4174 	return 0;
4175 }
4176 
4177 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4178 				 u16 *tc_size, u16 *tc_offset)
4179 {
4180 	struct hclge_rss_tc_mode_cmd *req;
4181 	struct hclge_desc desc;
4182 	int ret;
4183 	int i;
4184 
4185 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4186 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4187 
4188 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4189 		u16 mode = 0;
4190 
4191 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4192 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4193 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4194 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4195 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4196 
4197 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4198 	}
4199 
4200 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4201 	if (ret)
4202 		dev_err(&hdev->pdev->dev,
4203 			"Configure rss tc mode fail, status = %d\n", ret);
4204 
4205 	return ret;
4206 }
4207 
4208 static void hclge_get_rss_type(struct hclge_vport *vport)
4209 {
4210 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4211 	    vport->rss_tuple_sets.ipv4_udp_en ||
4212 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4213 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4214 	    vport->rss_tuple_sets.ipv6_udp_en ||
4215 	    vport->rss_tuple_sets.ipv6_sctp_en)
4216 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4217 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4218 		 vport->rss_tuple_sets.ipv6_fragment_en)
4219 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4220 	else
4221 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4222 }
4223 
4224 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4225 {
4226 	struct hclge_rss_input_tuple_cmd *req;
4227 	struct hclge_desc desc;
4228 	int ret;
4229 
4230 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4231 
4232 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4233 
4234 	/* Get the tuple cfg from pf */
4235 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4236 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4237 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4238 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4239 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4240 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4241 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4242 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4243 	hclge_get_rss_type(&hdev->vport[0]);
4244 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4245 	if (ret)
4246 		dev_err(&hdev->pdev->dev,
4247 			"Configure rss input fail, status = %d\n", ret);
4248 	return ret;
4249 }
4250 
4251 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4252 			 u8 *key, u8 *hfunc)
4253 {
4254 	struct hclge_vport *vport = hclge_get_vport(handle);
4255 	int i;
4256 
4257 	/* Get hash algorithm */
4258 	if (hfunc) {
4259 		switch (vport->rss_algo) {
4260 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4261 			*hfunc = ETH_RSS_HASH_TOP;
4262 			break;
4263 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4264 			*hfunc = ETH_RSS_HASH_XOR;
4265 			break;
4266 		default:
4267 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4268 			break;
4269 		}
4270 	}
4271 
4272 	/* Get the RSS Key required by the user */
4273 	if (key)
4274 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4275 
4276 	/* Get indirect table */
4277 	if (indir)
4278 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4279 			indir[i] =  vport->rss_indirection_tbl[i];
4280 
4281 	return 0;
4282 }
4283 
4284 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4285 			 const  u8 *key, const  u8 hfunc)
4286 {
4287 	struct hclge_vport *vport = hclge_get_vport(handle);
4288 	struct hclge_dev *hdev = vport->back;
4289 	u8 hash_algo;
4290 	int ret, i;
4291 
4292 	/* Set the RSS Hash Key if specififed by the user */
4293 	if (key) {
4294 		switch (hfunc) {
4295 		case ETH_RSS_HASH_TOP:
4296 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4297 			break;
4298 		case ETH_RSS_HASH_XOR:
4299 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4300 			break;
4301 		case ETH_RSS_HASH_NO_CHANGE:
4302 			hash_algo = vport->rss_algo;
4303 			break;
4304 		default:
4305 			return -EINVAL;
4306 		}
4307 
4308 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4309 		if (ret)
4310 			return ret;
4311 
4312 		/* Update the shadow RSS key with user specified qids */
4313 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4314 		vport->rss_algo = hash_algo;
4315 	}
4316 
4317 	/* Update the shadow RSS table with user specified qids */
4318 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4319 		vport->rss_indirection_tbl[i] = indir[i];
4320 
4321 	/* Update the hardware */
4322 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4323 }
4324 
4325 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4326 {
4327 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4328 
4329 	if (nfc->data & RXH_L4_B_2_3)
4330 		hash_sets |= HCLGE_D_PORT_BIT;
4331 	else
4332 		hash_sets &= ~HCLGE_D_PORT_BIT;
4333 
4334 	if (nfc->data & RXH_IP_SRC)
4335 		hash_sets |= HCLGE_S_IP_BIT;
4336 	else
4337 		hash_sets &= ~HCLGE_S_IP_BIT;
4338 
4339 	if (nfc->data & RXH_IP_DST)
4340 		hash_sets |= HCLGE_D_IP_BIT;
4341 	else
4342 		hash_sets &= ~HCLGE_D_IP_BIT;
4343 
4344 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4345 		hash_sets |= HCLGE_V_TAG_BIT;
4346 
4347 	return hash_sets;
4348 }
4349 
4350 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4351 			       struct ethtool_rxnfc *nfc)
4352 {
4353 	struct hclge_vport *vport = hclge_get_vport(handle);
4354 	struct hclge_dev *hdev = vport->back;
4355 	struct hclge_rss_input_tuple_cmd *req;
4356 	struct hclge_desc desc;
4357 	u8 tuple_sets;
4358 	int ret;
4359 
4360 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4361 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4362 		return -EINVAL;
4363 
4364 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4365 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4366 
4367 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4368 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4369 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4370 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4371 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4372 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4373 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4374 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4375 
4376 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4377 	switch (nfc->flow_type) {
4378 	case TCP_V4_FLOW:
4379 		req->ipv4_tcp_en = tuple_sets;
4380 		break;
4381 	case TCP_V6_FLOW:
4382 		req->ipv6_tcp_en = tuple_sets;
4383 		break;
4384 	case UDP_V4_FLOW:
4385 		req->ipv4_udp_en = tuple_sets;
4386 		break;
4387 	case UDP_V6_FLOW:
4388 		req->ipv6_udp_en = tuple_sets;
4389 		break;
4390 	case SCTP_V4_FLOW:
4391 		req->ipv4_sctp_en = tuple_sets;
4392 		break;
4393 	case SCTP_V6_FLOW:
4394 		if ((nfc->data & RXH_L4_B_0_1) ||
4395 		    (nfc->data & RXH_L4_B_2_3))
4396 			return -EINVAL;
4397 
4398 		req->ipv6_sctp_en = tuple_sets;
4399 		break;
4400 	case IPV4_FLOW:
4401 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4402 		break;
4403 	case IPV6_FLOW:
4404 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4405 		break;
4406 	default:
4407 		return -EINVAL;
4408 	}
4409 
4410 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4411 	if (ret) {
4412 		dev_err(&hdev->pdev->dev,
4413 			"Set rss tuple fail, status = %d\n", ret);
4414 		return ret;
4415 	}
4416 
4417 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4418 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4419 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4420 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4421 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4422 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4423 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4424 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4425 	hclge_get_rss_type(vport);
4426 	return 0;
4427 }
4428 
4429 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4430 			       struct ethtool_rxnfc *nfc)
4431 {
4432 	struct hclge_vport *vport = hclge_get_vport(handle);
4433 	u8 tuple_sets;
4434 
4435 	nfc->data = 0;
4436 
4437 	switch (nfc->flow_type) {
4438 	case TCP_V4_FLOW:
4439 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4440 		break;
4441 	case UDP_V4_FLOW:
4442 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4443 		break;
4444 	case TCP_V6_FLOW:
4445 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4446 		break;
4447 	case UDP_V6_FLOW:
4448 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4449 		break;
4450 	case SCTP_V4_FLOW:
4451 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4452 		break;
4453 	case SCTP_V6_FLOW:
4454 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4455 		break;
4456 	case IPV4_FLOW:
4457 	case IPV6_FLOW:
4458 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4459 		break;
4460 	default:
4461 		return -EINVAL;
4462 	}
4463 
4464 	if (!tuple_sets)
4465 		return 0;
4466 
4467 	if (tuple_sets & HCLGE_D_PORT_BIT)
4468 		nfc->data |= RXH_L4_B_2_3;
4469 	if (tuple_sets & HCLGE_S_PORT_BIT)
4470 		nfc->data |= RXH_L4_B_0_1;
4471 	if (tuple_sets & HCLGE_D_IP_BIT)
4472 		nfc->data |= RXH_IP_DST;
4473 	if (tuple_sets & HCLGE_S_IP_BIT)
4474 		nfc->data |= RXH_IP_SRC;
4475 
4476 	return 0;
4477 }
4478 
4479 static int hclge_get_tc_size(struct hnae3_handle *handle)
4480 {
4481 	struct hclge_vport *vport = hclge_get_vport(handle);
4482 	struct hclge_dev *hdev = vport->back;
4483 
4484 	return hdev->rss_size_max;
4485 }
4486 
4487 int hclge_rss_init_hw(struct hclge_dev *hdev)
4488 {
4489 	struct hclge_vport *vport = hdev->vport;
4490 	u8 *rss_indir = vport[0].rss_indirection_tbl;
4491 	u16 rss_size = vport[0].alloc_rss_size;
4492 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4493 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4494 	u8 *key = vport[0].rss_hash_key;
4495 	u8 hfunc = vport[0].rss_algo;
4496 	u16 tc_valid[HCLGE_MAX_TC_NUM];
4497 	u16 roundup_size;
4498 	unsigned int i;
4499 	int ret;
4500 
4501 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4502 	if (ret)
4503 		return ret;
4504 
4505 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4506 	if (ret)
4507 		return ret;
4508 
4509 	ret = hclge_set_rss_input_tuple(hdev);
4510 	if (ret)
4511 		return ret;
4512 
4513 	/* Each TC have the same queue size, and tc_size set to hardware is
4514 	 * the log2 of roundup power of two of rss_size, the acutal queue
4515 	 * size is limited by indirection table.
4516 	 */
4517 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4518 		dev_err(&hdev->pdev->dev,
4519 			"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4520 			rss_size);
4521 		return -EINVAL;
4522 	}
4523 
4524 	roundup_size = roundup_pow_of_two(rss_size);
4525 	roundup_size = ilog2(roundup_size);
4526 
4527 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4528 		tc_valid[i] = 0;
4529 
4530 		if (!(hdev->hw_tc_map & BIT(i)))
4531 			continue;
4532 
4533 		tc_valid[i] = 1;
4534 		tc_size[i] = roundup_size;
4535 		tc_offset[i] = rss_size * i;
4536 	}
4537 
4538 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4539 }
4540 
4541 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4542 {
4543 	struct hclge_vport *vport = hdev->vport;
4544 	int i, j;
4545 
4546 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4547 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4548 			vport[j].rss_indirection_tbl[i] =
4549 				i % vport[j].alloc_rss_size;
4550 	}
4551 }
4552 
4553 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4554 {
4555 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4556 	struct hclge_vport *vport = hdev->vport;
4557 
4558 	if (hdev->pdev->revision >= 0x21)
4559 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4560 
4561 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4562 		vport[i].rss_tuple_sets.ipv4_tcp_en =
4563 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4564 		vport[i].rss_tuple_sets.ipv4_udp_en =
4565 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4566 		vport[i].rss_tuple_sets.ipv4_sctp_en =
4567 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4568 		vport[i].rss_tuple_sets.ipv4_fragment_en =
4569 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4570 		vport[i].rss_tuple_sets.ipv6_tcp_en =
4571 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4572 		vport[i].rss_tuple_sets.ipv6_udp_en =
4573 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4574 		vport[i].rss_tuple_sets.ipv6_sctp_en =
4575 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4576 		vport[i].rss_tuple_sets.ipv6_fragment_en =
4577 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4578 
4579 		vport[i].rss_algo = rss_algo;
4580 
4581 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
4582 		       HCLGE_RSS_KEY_SIZE);
4583 	}
4584 
4585 	hclge_rss_indir_init_cfg(hdev);
4586 }
4587 
4588 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4589 				int vector_id, bool en,
4590 				struct hnae3_ring_chain_node *ring_chain)
4591 {
4592 	struct hclge_dev *hdev = vport->back;
4593 	struct hnae3_ring_chain_node *node;
4594 	struct hclge_desc desc;
4595 	struct hclge_ctrl_vector_chain_cmd *req =
4596 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
4597 	enum hclge_cmd_status status;
4598 	enum hclge_opcode_type op;
4599 	u16 tqp_type_and_id;
4600 	int i;
4601 
4602 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4603 	hclge_cmd_setup_basic_desc(&desc, op, false);
4604 	req->int_vector_id = vector_id;
4605 
4606 	i = 0;
4607 	for (node = ring_chain; node; node = node->next) {
4608 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4609 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4610 				HCLGE_INT_TYPE_S,
4611 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4612 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4613 				HCLGE_TQP_ID_S, node->tqp_index);
4614 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4615 				HCLGE_INT_GL_IDX_S,
4616 				hnae3_get_field(node->int_gl_idx,
4617 						HNAE3_RING_GL_IDX_M,
4618 						HNAE3_RING_GL_IDX_S));
4619 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4620 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4621 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4622 			req->vfid = vport->vport_id;
4623 
4624 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
4625 			if (status) {
4626 				dev_err(&hdev->pdev->dev,
4627 					"Map TQP fail, status is %d.\n",
4628 					status);
4629 				return -EIO;
4630 			}
4631 			i = 0;
4632 
4633 			hclge_cmd_setup_basic_desc(&desc,
4634 						   op,
4635 						   false);
4636 			req->int_vector_id = vector_id;
4637 		}
4638 	}
4639 
4640 	if (i > 0) {
4641 		req->int_cause_num = i;
4642 		req->vfid = vport->vport_id;
4643 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
4644 		if (status) {
4645 			dev_err(&hdev->pdev->dev,
4646 				"Map TQP fail, status is %d.\n", status);
4647 			return -EIO;
4648 		}
4649 	}
4650 
4651 	return 0;
4652 }
4653 
4654 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4655 				    struct hnae3_ring_chain_node *ring_chain)
4656 {
4657 	struct hclge_vport *vport = hclge_get_vport(handle);
4658 	struct hclge_dev *hdev = vport->back;
4659 	int vector_id;
4660 
4661 	vector_id = hclge_get_vector_index(hdev, vector);
4662 	if (vector_id < 0) {
4663 		dev_err(&hdev->pdev->dev,
4664 			"failed to get vector index. vector=%d\n", vector);
4665 		return vector_id;
4666 	}
4667 
4668 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4669 }
4670 
4671 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4672 				       struct hnae3_ring_chain_node *ring_chain)
4673 {
4674 	struct hclge_vport *vport = hclge_get_vport(handle);
4675 	struct hclge_dev *hdev = vport->back;
4676 	int vector_id, ret;
4677 
4678 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4679 		return 0;
4680 
4681 	vector_id = hclge_get_vector_index(hdev, vector);
4682 	if (vector_id < 0) {
4683 		dev_err(&handle->pdev->dev,
4684 			"Get vector index fail. ret =%d\n", vector_id);
4685 		return vector_id;
4686 	}
4687 
4688 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4689 	if (ret)
4690 		dev_err(&handle->pdev->dev,
4691 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4692 			vector_id, ret);
4693 
4694 	return ret;
4695 }
4696 
4697 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4698 				      struct hclge_promisc_param *param)
4699 {
4700 	struct hclge_promisc_cfg_cmd *req;
4701 	struct hclge_desc desc;
4702 	int ret;
4703 
4704 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4705 
4706 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
4707 	req->vf_id = param->vf_id;
4708 
4709 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4710 	 * pdev revision(0x20), new revision support them. The
4711 	 * value of this two fields will not return error when driver
4712 	 * send command to fireware in revision(0x20).
4713 	 */
4714 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4715 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4716 
4717 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4718 	if (ret)
4719 		dev_err(&hdev->pdev->dev,
4720 			"Set promisc mode fail, status is %d.\n", ret);
4721 
4722 	return ret;
4723 }
4724 
4725 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4726 				     bool en_uc, bool en_mc, bool en_bc,
4727 				     int vport_id)
4728 {
4729 	if (!param)
4730 		return;
4731 
4732 	memset(param, 0, sizeof(struct hclge_promisc_param));
4733 	if (en_uc)
4734 		param->enable = HCLGE_PROMISC_EN_UC;
4735 	if (en_mc)
4736 		param->enable |= HCLGE_PROMISC_EN_MC;
4737 	if (en_bc)
4738 		param->enable |= HCLGE_PROMISC_EN_BC;
4739 	param->vf_id = vport_id;
4740 }
4741 
4742 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4743 				 bool en_mc_pmc, bool en_bc_pmc)
4744 {
4745 	struct hclge_dev *hdev = vport->back;
4746 	struct hclge_promisc_param param;
4747 
4748 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4749 				 vport->vport_id);
4750 	return hclge_cmd_set_promisc_mode(hdev, &param);
4751 }
4752 
4753 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4754 				  bool en_mc_pmc)
4755 {
4756 	struct hclge_vport *vport = hclge_get_vport(handle);
4757 	bool en_bc_pmc = true;
4758 
4759 	/* For revision 0x20, if broadcast promisc enabled, vlan filter is
4760 	 * always bypassed. So broadcast promisc should be disabled until
4761 	 * user enable promisc mode
4762 	 */
4763 	if (handle->pdev->revision == 0x20)
4764 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4765 
4766 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4767 					    en_bc_pmc);
4768 }
4769 
4770 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4771 {
4772 	struct hclge_get_fd_mode_cmd *req;
4773 	struct hclge_desc desc;
4774 	int ret;
4775 
4776 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4777 
4778 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
4779 
4780 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4781 	if (ret) {
4782 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4783 		return ret;
4784 	}
4785 
4786 	*fd_mode = req->mode;
4787 
4788 	return ret;
4789 }
4790 
4791 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4792 				   u32 *stage1_entry_num,
4793 				   u32 *stage2_entry_num,
4794 				   u16 *stage1_counter_num,
4795 				   u16 *stage2_counter_num)
4796 {
4797 	struct hclge_get_fd_allocation_cmd *req;
4798 	struct hclge_desc desc;
4799 	int ret;
4800 
4801 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4802 
4803 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4804 
4805 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4806 	if (ret) {
4807 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4808 			ret);
4809 		return ret;
4810 	}
4811 
4812 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4813 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4814 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4815 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4816 
4817 	return ret;
4818 }
4819 
4820 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4821 {
4822 	struct hclge_set_fd_key_config_cmd *req;
4823 	struct hclge_fd_key_cfg *stage;
4824 	struct hclge_desc desc;
4825 	int ret;
4826 
4827 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4828 
4829 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4830 	stage = &hdev->fd_cfg.key_cfg[stage_num];
4831 	req->stage = stage_num;
4832 	req->key_select = stage->key_sel;
4833 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4834 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4835 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4836 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4837 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4838 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4839 
4840 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4841 	if (ret)
4842 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4843 
4844 	return ret;
4845 }
4846 
4847 static int hclge_init_fd_config(struct hclge_dev *hdev)
4848 {
4849 #define LOW_2_WORDS		0x03
4850 	struct hclge_fd_key_cfg *key_cfg;
4851 	int ret;
4852 
4853 	if (!hnae3_dev_fd_supported(hdev))
4854 		return 0;
4855 
4856 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4857 	if (ret)
4858 		return ret;
4859 
4860 	switch (hdev->fd_cfg.fd_mode) {
4861 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4862 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4863 		break;
4864 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4865 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4866 		break;
4867 	default:
4868 		dev_err(&hdev->pdev->dev,
4869 			"Unsupported flow director mode %u\n",
4870 			hdev->fd_cfg.fd_mode);
4871 		return -EOPNOTSUPP;
4872 	}
4873 
4874 	hdev->fd_cfg.proto_support =
4875 		TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4876 		UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4877 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4878 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4879 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4880 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4881 	key_cfg->outer_sipv6_word_en = 0;
4882 	key_cfg->outer_dipv6_word_en = 0;
4883 
4884 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4885 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4886 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4887 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4888 
4889 	/* If use max 400bit key, we can support tuples for ether type */
4890 	if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4891 		hdev->fd_cfg.proto_support |= ETHER_FLOW;
4892 		key_cfg->tuple_active |=
4893 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4894 	}
4895 
4896 	/* roce_type is used to filter roce frames
4897 	 * dst_vport is used to specify the rule
4898 	 */
4899 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4900 
4901 	ret = hclge_get_fd_allocation(hdev,
4902 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4903 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4904 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4905 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4906 	if (ret)
4907 		return ret;
4908 
4909 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4910 }
4911 
4912 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4913 				int loc, u8 *key, bool is_add)
4914 {
4915 	struct hclge_fd_tcam_config_1_cmd *req1;
4916 	struct hclge_fd_tcam_config_2_cmd *req2;
4917 	struct hclge_fd_tcam_config_3_cmd *req3;
4918 	struct hclge_desc desc[3];
4919 	int ret;
4920 
4921 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4922 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4923 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4924 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4925 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4926 
4927 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4928 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4929 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4930 
4931 	req1->stage = stage;
4932 	req1->xy_sel = sel_x ? 1 : 0;
4933 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4934 	req1->index = cpu_to_le32(loc);
4935 	req1->entry_vld = sel_x ? is_add : 0;
4936 
4937 	if (key) {
4938 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4939 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4940 		       sizeof(req2->tcam_data));
4941 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4942 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4943 	}
4944 
4945 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
4946 	if (ret)
4947 		dev_err(&hdev->pdev->dev,
4948 			"config tcam key fail, ret=%d\n",
4949 			ret);
4950 
4951 	return ret;
4952 }
4953 
4954 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4955 			      struct hclge_fd_ad_data *action)
4956 {
4957 	struct hclge_fd_ad_config_cmd *req;
4958 	struct hclge_desc desc;
4959 	u64 ad_data = 0;
4960 	int ret;
4961 
4962 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4963 
4964 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
4965 	req->index = cpu_to_le32(loc);
4966 	req->stage = stage;
4967 
4968 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4969 		      action->write_rule_id_to_bd);
4970 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4971 			action->rule_id);
4972 	ad_data <<= 32;
4973 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4974 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4975 		      action->forward_to_direct_queue);
4976 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4977 			action->queue_id);
4978 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4979 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4980 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4981 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4982 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4983 			action->counter_id);
4984 
4985 	req->ad_data = cpu_to_le64(ad_data);
4986 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4987 	if (ret)
4988 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4989 
4990 	return ret;
4991 }
4992 
4993 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4994 				   struct hclge_fd_rule *rule)
4995 {
4996 	u16 tmp_x_s, tmp_y_s;
4997 	u32 tmp_x_l, tmp_y_l;
4998 	int i;
4999 
5000 	if (rule->unused_tuple & tuple_bit)
5001 		return true;
5002 
5003 	switch (tuple_bit) {
5004 	case 0:
5005 		return false;
5006 	case BIT(INNER_DST_MAC):
5007 		for (i = 0; i < ETH_ALEN; i++) {
5008 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5009 			       rule->tuples_mask.dst_mac[i]);
5010 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5011 			       rule->tuples_mask.dst_mac[i]);
5012 		}
5013 
5014 		return true;
5015 	case BIT(INNER_SRC_MAC):
5016 		for (i = 0; i < ETH_ALEN; i++) {
5017 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5018 			       rule->tuples.src_mac[i]);
5019 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5020 			       rule->tuples.src_mac[i]);
5021 		}
5022 
5023 		return true;
5024 	case BIT(INNER_VLAN_TAG_FST):
5025 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5026 		       rule->tuples_mask.vlan_tag1);
5027 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5028 		       rule->tuples_mask.vlan_tag1);
5029 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5030 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5031 
5032 		return true;
5033 	case BIT(INNER_ETH_TYPE):
5034 		calc_x(tmp_x_s, rule->tuples.ether_proto,
5035 		       rule->tuples_mask.ether_proto);
5036 		calc_y(tmp_y_s, rule->tuples.ether_proto,
5037 		       rule->tuples_mask.ether_proto);
5038 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5039 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5040 
5041 		return true;
5042 	case BIT(INNER_IP_TOS):
5043 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5044 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5045 
5046 		return true;
5047 	case BIT(INNER_IP_PROTO):
5048 		calc_x(*key_x, rule->tuples.ip_proto,
5049 		       rule->tuples_mask.ip_proto);
5050 		calc_y(*key_y, rule->tuples.ip_proto,
5051 		       rule->tuples_mask.ip_proto);
5052 
5053 		return true;
5054 	case BIT(INNER_SRC_IP):
5055 		calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5056 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5057 		calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5058 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5059 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5060 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5061 
5062 		return true;
5063 	case BIT(INNER_DST_IP):
5064 		calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5065 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5066 		calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5067 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5068 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5069 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5070 
5071 		return true;
5072 	case BIT(INNER_SRC_PORT):
5073 		calc_x(tmp_x_s, rule->tuples.src_port,
5074 		       rule->tuples_mask.src_port);
5075 		calc_y(tmp_y_s, rule->tuples.src_port,
5076 		       rule->tuples_mask.src_port);
5077 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5078 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5079 
5080 		return true;
5081 	case BIT(INNER_DST_PORT):
5082 		calc_x(tmp_x_s, rule->tuples.dst_port,
5083 		       rule->tuples_mask.dst_port);
5084 		calc_y(tmp_y_s, rule->tuples.dst_port,
5085 		       rule->tuples_mask.dst_port);
5086 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5087 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5088 
5089 		return true;
5090 	default:
5091 		return false;
5092 	}
5093 }
5094 
5095 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5096 				 u8 vf_id, u8 network_port_id)
5097 {
5098 	u32 port_number = 0;
5099 
5100 	if (port_type == HOST_PORT) {
5101 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5102 				pf_id);
5103 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5104 				vf_id);
5105 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5106 	} else {
5107 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5108 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5109 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5110 	}
5111 
5112 	return port_number;
5113 }
5114 
5115 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5116 				       __le32 *key_x, __le32 *key_y,
5117 				       struct hclge_fd_rule *rule)
5118 {
5119 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5120 	u8 cur_pos = 0, tuple_size, shift_bits;
5121 	unsigned int i;
5122 
5123 	for (i = 0; i < MAX_META_DATA; i++) {
5124 		tuple_size = meta_data_key_info[i].key_length;
5125 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5126 
5127 		switch (tuple_bit) {
5128 		case BIT(ROCE_TYPE):
5129 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5130 			cur_pos += tuple_size;
5131 			break;
5132 		case BIT(DST_VPORT):
5133 			port_number = hclge_get_port_number(HOST_PORT, 0,
5134 							    rule->vf_id, 0);
5135 			hnae3_set_field(meta_data,
5136 					GENMASK(cur_pos + tuple_size, cur_pos),
5137 					cur_pos, port_number);
5138 			cur_pos += tuple_size;
5139 			break;
5140 		default:
5141 			break;
5142 		}
5143 	}
5144 
5145 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5146 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5147 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5148 
5149 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5150 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5151 }
5152 
5153 /* A complete key is combined with meta data key and tuple key.
5154  * Meta data key is stored at the MSB region, and tuple key is stored at
5155  * the LSB region, unused bits will be filled 0.
5156  */
5157 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5158 			    struct hclge_fd_rule *rule)
5159 {
5160 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5161 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5162 	u8 *cur_key_x, *cur_key_y;
5163 	unsigned int i;
5164 	int ret, tuple_size;
5165 	u8 meta_data_region;
5166 
5167 	memset(key_x, 0, sizeof(key_x));
5168 	memset(key_y, 0, sizeof(key_y));
5169 	cur_key_x = key_x;
5170 	cur_key_y = key_y;
5171 
5172 	for (i = 0 ; i < MAX_TUPLE; i++) {
5173 		bool tuple_valid;
5174 		u32 check_tuple;
5175 
5176 		tuple_size = tuple_key_info[i].key_length / 8;
5177 		check_tuple = key_cfg->tuple_active & BIT(i);
5178 
5179 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5180 						     cur_key_y, rule);
5181 		if (tuple_valid) {
5182 			cur_key_x += tuple_size;
5183 			cur_key_y += tuple_size;
5184 		}
5185 	}
5186 
5187 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5188 			MAX_META_DATA_LENGTH / 8;
5189 
5190 	hclge_fd_convert_meta_data(key_cfg,
5191 				   (__le32 *)(key_x + meta_data_region),
5192 				   (__le32 *)(key_y + meta_data_region),
5193 				   rule);
5194 
5195 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5196 				   true);
5197 	if (ret) {
5198 		dev_err(&hdev->pdev->dev,
5199 			"fd key_y config fail, loc=%u, ret=%d\n",
5200 			rule->queue_id, ret);
5201 		return ret;
5202 	}
5203 
5204 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5205 				   true);
5206 	if (ret)
5207 		dev_err(&hdev->pdev->dev,
5208 			"fd key_x config fail, loc=%u, ret=%d\n",
5209 			rule->queue_id, ret);
5210 	return ret;
5211 }
5212 
5213 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5214 			       struct hclge_fd_rule *rule)
5215 {
5216 	struct hclge_fd_ad_data ad_data;
5217 
5218 	ad_data.ad_id = rule->location;
5219 
5220 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5221 		ad_data.drop_packet = true;
5222 		ad_data.forward_to_direct_queue = false;
5223 		ad_data.queue_id = 0;
5224 	} else {
5225 		ad_data.drop_packet = false;
5226 		ad_data.forward_to_direct_queue = true;
5227 		ad_data.queue_id = rule->queue_id;
5228 	}
5229 
5230 	ad_data.use_counter = false;
5231 	ad_data.counter_id = 0;
5232 
5233 	ad_data.use_next_stage = false;
5234 	ad_data.next_input_key = 0;
5235 
5236 	ad_data.write_rule_id_to_bd = true;
5237 	ad_data.rule_id = rule->location;
5238 
5239 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5240 }
5241 
5242 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5243 			       struct ethtool_rx_flow_spec *fs, u32 *unused)
5244 {
5245 	struct ethtool_tcpip4_spec *tcp_ip4_spec;
5246 	struct ethtool_usrip4_spec *usr_ip4_spec;
5247 	struct ethtool_tcpip6_spec *tcp_ip6_spec;
5248 	struct ethtool_usrip6_spec *usr_ip6_spec;
5249 	struct ethhdr *ether_spec;
5250 
5251 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5252 		return -EINVAL;
5253 
5254 	if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5255 		return -EOPNOTSUPP;
5256 
5257 	if ((fs->flow_type & FLOW_EXT) &&
5258 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5259 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5260 		return -EOPNOTSUPP;
5261 	}
5262 
5263 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5264 	case SCTP_V4_FLOW:
5265 	case TCP_V4_FLOW:
5266 	case UDP_V4_FLOW:
5267 		tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5268 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5269 
5270 		if (!tcp_ip4_spec->ip4src)
5271 			*unused |= BIT(INNER_SRC_IP);
5272 
5273 		if (!tcp_ip4_spec->ip4dst)
5274 			*unused |= BIT(INNER_DST_IP);
5275 
5276 		if (!tcp_ip4_spec->psrc)
5277 			*unused |= BIT(INNER_SRC_PORT);
5278 
5279 		if (!tcp_ip4_spec->pdst)
5280 			*unused |= BIT(INNER_DST_PORT);
5281 
5282 		if (!tcp_ip4_spec->tos)
5283 			*unused |= BIT(INNER_IP_TOS);
5284 
5285 		break;
5286 	case IP_USER_FLOW:
5287 		usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5288 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5289 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5290 
5291 		if (!usr_ip4_spec->ip4src)
5292 			*unused |= BIT(INNER_SRC_IP);
5293 
5294 		if (!usr_ip4_spec->ip4dst)
5295 			*unused |= BIT(INNER_DST_IP);
5296 
5297 		if (!usr_ip4_spec->tos)
5298 			*unused |= BIT(INNER_IP_TOS);
5299 
5300 		if (!usr_ip4_spec->proto)
5301 			*unused |= BIT(INNER_IP_PROTO);
5302 
5303 		if (usr_ip4_spec->l4_4_bytes)
5304 			return -EOPNOTSUPP;
5305 
5306 		if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5307 			return -EOPNOTSUPP;
5308 
5309 		break;
5310 	case SCTP_V6_FLOW:
5311 	case TCP_V6_FLOW:
5312 	case UDP_V6_FLOW:
5313 		tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5314 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5315 			BIT(INNER_IP_TOS);
5316 
5317 		/* check whether src/dst ip address used */
5318 		if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5319 		    !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5320 			*unused |= BIT(INNER_SRC_IP);
5321 
5322 		if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5323 		    !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5324 			*unused |= BIT(INNER_DST_IP);
5325 
5326 		if (!tcp_ip6_spec->psrc)
5327 			*unused |= BIT(INNER_SRC_PORT);
5328 
5329 		if (!tcp_ip6_spec->pdst)
5330 			*unused |= BIT(INNER_DST_PORT);
5331 
5332 		if (tcp_ip6_spec->tclass)
5333 			return -EOPNOTSUPP;
5334 
5335 		break;
5336 	case IPV6_USER_FLOW:
5337 		usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5338 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5339 			BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5340 			BIT(INNER_DST_PORT);
5341 
5342 		/* check whether src/dst ip address used */
5343 		if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5344 		    !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5345 			*unused |= BIT(INNER_SRC_IP);
5346 
5347 		if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5348 		    !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5349 			*unused |= BIT(INNER_DST_IP);
5350 
5351 		if (!usr_ip6_spec->l4_proto)
5352 			*unused |= BIT(INNER_IP_PROTO);
5353 
5354 		if (usr_ip6_spec->tclass)
5355 			return -EOPNOTSUPP;
5356 
5357 		if (usr_ip6_spec->l4_4_bytes)
5358 			return -EOPNOTSUPP;
5359 
5360 		break;
5361 	case ETHER_FLOW:
5362 		ether_spec = &fs->h_u.ether_spec;
5363 		*unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5364 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5365 			BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5366 
5367 		if (is_zero_ether_addr(ether_spec->h_source))
5368 			*unused |= BIT(INNER_SRC_MAC);
5369 
5370 		if (is_zero_ether_addr(ether_spec->h_dest))
5371 			*unused |= BIT(INNER_DST_MAC);
5372 
5373 		if (!ether_spec->h_proto)
5374 			*unused |= BIT(INNER_ETH_TYPE);
5375 
5376 		break;
5377 	default:
5378 		return -EOPNOTSUPP;
5379 	}
5380 
5381 	if ((fs->flow_type & FLOW_EXT)) {
5382 		if (fs->h_ext.vlan_etype)
5383 			return -EOPNOTSUPP;
5384 		if (!fs->h_ext.vlan_tci)
5385 			*unused |= BIT(INNER_VLAN_TAG_FST);
5386 
5387 		if (fs->m_ext.vlan_tci) {
5388 			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5389 				return -EINVAL;
5390 		}
5391 	} else {
5392 		*unused |= BIT(INNER_VLAN_TAG_FST);
5393 	}
5394 
5395 	if (fs->flow_type & FLOW_MAC_EXT) {
5396 		if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5397 			return -EOPNOTSUPP;
5398 
5399 		if (is_zero_ether_addr(fs->h_ext.h_dest))
5400 			*unused |= BIT(INNER_DST_MAC);
5401 		else
5402 			*unused &= ~(BIT(INNER_DST_MAC));
5403 	}
5404 
5405 	return 0;
5406 }
5407 
5408 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5409 {
5410 	struct hclge_fd_rule *rule = NULL;
5411 	struct hlist_node *node2;
5412 
5413 	spin_lock_bh(&hdev->fd_rule_lock);
5414 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5415 		if (rule->location >= location)
5416 			break;
5417 	}
5418 
5419 	spin_unlock_bh(&hdev->fd_rule_lock);
5420 
5421 	return  rule && rule->location == location;
5422 }
5423 
5424 /* make sure being called after lock up with fd_rule_lock */
5425 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5426 				     struct hclge_fd_rule *new_rule,
5427 				     u16 location,
5428 				     bool is_add)
5429 {
5430 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
5431 	struct hlist_node *node2;
5432 
5433 	if (is_add && !new_rule)
5434 		return -EINVAL;
5435 
5436 	hlist_for_each_entry_safe(rule, node2,
5437 				  &hdev->fd_rule_list, rule_node) {
5438 		if (rule->location >= location)
5439 			break;
5440 		parent = rule;
5441 	}
5442 
5443 	if (rule && rule->location == location) {
5444 		hlist_del(&rule->rule_node);
5445 		kfree(rule);
5446 		hdev->hclge_fd_rule_num--;
5447 
5448 		if (!is_add) {
5449 			if (!hdev->hclge_fd_rule_num)
5450 				hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5451 			clear_bit(location, hdev->fd_bmap);
5452 
5453 			return 0;
5454 		}
5455 	} else if (!is_add) {
5456 		dev_err(&hdev->pdev->dev,
5457 			"delete fail, rule %u is inexistent\n",
5458 			location);
5459 		return -EINVAL;
5460 	}
5461 
5462 	INIT_HLIST_NODE(&new_rule->rule_node);
5463 
5464 	if (parent)
5465 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5466 	else
5467 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5468 
5469 	set_bit(location, hdev->fd_bmap);
5470 	hdev->hclge_fd_rule_num++;
5471 	hdev->fd_active_type = new_rule->rule_type;
5472 
5473 	return 0;
5474 }
5475 
5476 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5477 			      struct ethtool_rx_flow_spec *fs,
5478 			      struct hclge_fd_rule *rule)
5479 {
5480 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5481 
5482 	switch (flow_type) {
5483 	case SCTP_V4_FLOW:
5484 	case TCP_V4_FLOW:
5485 	case UDP_V4_FLOW:
5486 		rule->tuples.src_ip[IPV4_INDEX] =
5487 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5488 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5489 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5490 
5491 		rule->tuples.dst_ip[IPV4_INDEX] =
5492 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5493 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5494 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5495 
5496 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5497 		rule->tuples_mask.src_port =
5498 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5499 
5500 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5501 		rule->tuples_mask.dst_port =
5502 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5503 
5504 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5505 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5506 
5507 		rule->tuples.ether_proto = ETH_P_IP;
5508 		rule->tuples_mask.ether_proto = 0xFFFF;
5509 
5510 		break;
5511 	case IP_USER_FLOW:
5512 		rule->tuples.src_ip[IPV4_INDEX] =
5513 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5514 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5515 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5516 
5517 		rule->tuples.dst_ip[IPV4_INDEX] =
5518 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5519 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5520 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5521 
5522 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5523 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5524 
5525 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5526 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5527 
5528 		rule->tuples.ether_proto = ETH_P_IP;
5529 		rule->tuples_mask.ether_proto = 0xFFFF;
5530 
5531 		break;
5532 	case SCTP_V6_FLOW:
5533 	case TCP_V6_FLOW:
5534 	case UDP_V6_FLOW:
5535 		be32_to_cpu_array(rule->tuples.src_ip,
5536 				  fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5537 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5538 				  fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5539 
5540 		be32_to_cpu_array(rule->tuples.dst_ip,
5541 				  fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5542 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5543 				  fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5544 
5545 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5546 		rule->tuples_mask.src_port =
5547 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5548 
5549 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5550 		rule->tuples_mask.dst_port =
5551 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5552 
5553 		rule->tuples.ether_proto = ETH_P_IPV6;
5554 		rule->tuples_mask.ether_proto = 0xFFFF;
5555 
5556 		break;
5557 	case IPV6_USER_FLOW:
5558 		be32_to_cpu_array(rule->tuples.src_ip,
5559 				  fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5560 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5561 				  fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5562 
5563 		be32_to_cpu_array(rule->tuples.dst_ip,
5564 				  fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5565 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5566 				  fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5567 
5568 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5569 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5570 
5571 		rule->tuples.ether_proto = ETH_P_IPV6;
5572 		rule->tuples_mask.ether_proto = 0xFFFF;
5573 
5574 		break;
5575 	case ETHER_FLOW:
5576 		ether_addr_copy(rule->tuples.src_mac,
5577 				fs->h_u.ether_spec.h_source);
5578 		ether_addr_copy(rule->tuples_mask.src_mac,
5579 				fs->m_u.ether_spec.h_source);
5580 
5581 		ether_addr_copy(rule->tuples.dst_mac,
5582 				fs->h_u.ether_spec.h_dest);
5583 		ether_addr_copy(rule->tuples_mask.dst_mac,
5584 				fs->m_u.ether_spec.h_dest);
5585 
5586 		rule->tuples.ether_proto =
5587 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
5588 		rule->tuples_mask.ether_proto =
5589 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
5590 
5591 		break;
5592 	default:
5593 		return -EOPNOTSUPP;
5594 	}
5595 
5596 	switch (flow_type) {
5597 	case SCTP_V4_FLOW:
5598 	case SCTP_V6_FLOW:
5599 		rule->tuples.ip_proto = IPPROTO_SCTP;
5600 		rule->tuples_mask.ip_proto = 0xFF;
5601 		break;
5602 	case TCP_V4_FLOW:
5603 	case TCP_V6_FLOW:
5604 		rule->tuples.ip_proto = IPPROTO_TCP;
5605 		rule->tuples_mask.ip_proto = 0xFF;
5606 		break;
5607 	case UDP_V4_FLOW:
5608 	case UDP_V6_FLOW:
5609 		rule->tuples.ip_proto = IPPROTO_UDP;
5610 		rule->tuples_mask.ip_proto = 0xFF;
5611 		break;
5612 	default:
5613 		break;
5614 	}
5615 
5616 	if ((fs->flow_type & FLOW_EXT)) {
5617 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5618 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5619 	}
5620 
5621 	if (fs->flow_type & FLOW_MAC_EXT) {
5622 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5623 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5624 	}
5625 
5626 	return 0;
5627 }
5628 
5629 /* make sure being called after lock up with fd_rule_lock */
5630 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5631 				struct hclge_fd_rule *rule)
5632 {
5633 	int ret;
5634 
5635 	if (!rule) {
5636 		dev_err(&hdev->pdev->dev,
5637 			"The flow director rule is NULL\n");
5638 		return -EINVAL;
5639 	}
5640 
5641 	/* it will never fail here, so needn't to check return value */
5642 	hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5643 
5644 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5645 	if (ret)
5646 		goto clear_rule;
5647 
5648 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5649 	if (ret)
5650 		goto clear_rule;
5651 
5652 	return 0;
5653 
5654 clear_rule:
5655 	hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5656 	return ret;
5657 }
5658 
5659 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5660 			      struct ethtool_rxnfc *cmd)
5661 {
5662 	struct hclge_vport *vport = hclge_get_vport(handle);
5663 	struct hclge_dev *hdev = vport->back;
5664 	u16 dst_vport_id = 0, q_index = 0;
5665 	struct ethtool_rx_flow_spec *fs;
5666 	struct hclge_fd_rule *rule;
5667 	u32 unused = 0;
5668 	u8 action;
5669 	int ret;
5670 
5671 	if (!hnae3_dev_fd_supported(hdev))
5672 		return -EOPNOTSUPP;
5673 
5674 	if (!hdev->fd_en) {
5675 		dev_warn(&hdev->pdev->dev,
5676 			 "Please enable flow director first\n");
5677 		return -EOPNOTSUPP;
5678 	}
5679 
5680 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5681 
5682 	ret = hclge_fd_check_spec(hdev, fs, &unused);
5683 	if (ret) {
5684 		dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5685 		return ret;
5686 	}
5687 
5688 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5689 		action = HCLGE_FD_ACTION_DROP_PACKET;
5690 	} else {
5691 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5692 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5693 		u16 tqps;
5694 
5695 		if (vf > hdev->num_req_vfs) {
5696 			dev_err(&hdev->pdev->dev,
5697 				"Error: vf id (%u) > max vf num (%u)\n",
5698 				vf, hdev->num_req_vfs);
5699 			return -EINVAL;
5700 		}
5701 
5702 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5703 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5704 
5705 		if (ring >= tqps) {
5706 			dev_err(&hdev->pdev->dev,
5707 				"Error: queue id (%u) > max tqp num (%u)\n",
5708 				ring, tqps - 1);
5709 			return -EINVAL;
5710 		}
5711 
5712 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5713 		q_index = ring;
5714 	}
5715 
5716 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5717 	if (!rule)
5718 		return -ENOMEM;
5719 
5720 	ret = hclge_fd_get_tuple(hdev, fs, rule);
5721 	if (ret) {
5722 		kfree(rule);
5723 		return ret;
5724 	}
5725 
5726 	rule->flow_type = fs->flow_type;
5727 
5728 	rule->location = fs->location;
5729 	rule->unused_tuple = unused;
5730 	rule->vf_id = dst_vport_id;
5731 	rule->queue_id = q_index;
5732 	rule->action = action;
5733 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
5734 
5735 	/* to avoid rule conflict, when user configure rule by ethtool,
5736 	 * we need to clear all arfs rules
5737 	 */
5738 	hclge_clear_arfs_rules(handle);
5739 
5740 	spin_lock_bh(&hdev->fd_rule_lock);
5741 	ret = hclge_fd_config_rule(hdev, rule);
5742 
5743 	spin_unlock_bh(&hdev->fd_rule_lock);
5744 
5745 	return ret;
5746 }
5747 
5748 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5749 			      struct ethtool_rxnfc *cmd)
5750 {
5751 	struct hclge_vport *vport = hclge_get_vport(handle);
5752 	struct hclge_dev *hdev = vport->back;
5753 	struct ethtool_rx_flow_spec *fs;
5754 	int ret;
5755 
5756 	if (!hnae3_dev_fd_supported(hdev))
5757 		return -EOPNOTSUPP;
5758 
5759 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5760 
5761 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5762 		return -EINVAL;
5763 
5764 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
5765 		dev_err(&hdev->pdev->dev,
5766 			"Delete fail, rule %u is inexistent\n", fs->location);
5767 		return -ENOENT;
5768 	}
5769 
5770 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5771 				   NULL, false);
5772 	if (ret)
5773 		return ret;
5774 
5775 	spin_lock_bh(&hdev->fd_rule_lock);
5776 	ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5777 
5778 	spin_unlock_bh(&hdev->fd_rule_lock);
5779 
5780 	return ret;
5781 }
5782 
5783 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5784 				     bool clear_list)
5785 {
5786 	struct hclge_vport *vport = hclge_get_vport(handle);
5787 	struct hclge_dev *hdev = vport->back;
5788 	struct hclge_fd_rule *rule;
5789 	struct hlist_node *node;
5790 	u16 location;
5791 
5792 	if (!hnae3_dev_fd_supported(hdev))
5793 		return;
5794 
5795 	spin_lock_bh(&hdev->fd_rule_lock);
5796 	for_each_set_bit(location, hdev->fd_bmap,
5797 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5798 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5799 				     NULL, false);
5800 
5801 	if (clear_list) {
5802 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5803 					  rule_node) {
5804 			hlist_del(&rule->rule_node);
5805 			kfree(rule);
5806 		}
5807 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5808 		hdev->hclge_fd_rule_num = 0;
5809 		bitmap_zero(hdev->fd_bmap,
5810 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5811 	}
5812 
5813 	spin_unlock_bh(&hdev->fd_rule_lock);
5814 }
5815 
5816 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5817 {
5818 	struct hclge_vport *vport = hclge_get_vport(handle);
5819 	struct hclge_dev *hdev = vport->back;
5820 	struct hclge_fd_rule *rule;
5821 	struct hlist_node *node;
5822 	int ret;
5823 
5824 	/* Return ok here, because reset error handling will check this
5825 	 * return value. If error is returned here, the reset process will
5826 	 * fail.
5827 	 */
5828 	if (!hnae3_dev_fd_supported(hdev))
5829 		return 0;
5830 
5831 	/* if fd is disabled, should not restore it when reset */
5832 	if (!hdev->fd_en)
5833 		return 0;
5834 
5835 	spin_lock_bh(&hdev->fd_rule_lock);
5836 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5837 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5838 		if (!ret)
5839 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5840 
5841 		if (ret) {
5842 			dev_warn(&hdev->pdev->dev,
5843 				 "Restore rule %u failed, remove it\n",
5844 				 rule->location);
5845 			clear_bit(rule->location, hdev->fd_bmap);
5846 			hlist_del(&rule->rule_node);
5847 			kfree(rule);
5848 			hdev->hclge_fd_rule_num--;
5849 		}
5850 	}
5851 
5852 	if (hdev->hclge_fd_rule_num)
5853 		hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5854 
5855 	spin_unlock_bh(&hdev->fd_rule_lock);
5856 
5857 	return 0;
5858 }
5859 
5860 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5861 				 struct ethtool_rxnfc *cmd)
5862 {
5863 	struct hclge_vport *vport = hclge_get_vport(handle);
5864 	struct hclge_dev *hdev = vport->back;
5865 
5866 	if (!hnae3_dev_fd_supported(hdev))
5867 		return -EOPNOTSUPP;
5868 
5869 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
5870 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5871 
5872 	return 0;
5873 }
5874 
5875 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5876 				  struct ethtool_rxnfc *cmd)
5877 {
5878 	struct hclge_vport *vport = hclge_get_vport(handle);
5879 	struct hclge_fd_rule *rule = NULL;
5880 	struct hclge_dev *hdev = vport->back;
5881 	struct ethtool_rx_flow_spec *fs;
5882 	struct hlist_node *node2;
5883 
5884 	if (!hnae3_dev_fd_supported(hdev))
5885 		return -EOPNOTSUPP;
5886 
5887 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5888 
5889 	spin_lock_bh(&hdev->fd_rule_lock);
5890 
5891 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5892 		if (rule->location >= fs->location)
5893 			break;
5894 	}
5895 
5896 	if (!rule || fs->location != rule->location) {
5897 		spin_unlock_bh(&hdev->fd_rule_lock);
5898 
5899 		return -ENOENT;
5900 	}
5901 
5902 	fs->flow_type = rule->flow_type;
5903 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5904 	case SCTP_V4_FLOW:
5905 	case TCP_V4_FLOW:
5906 	case UDP_V4_FLOW:
5907 		fs->h_u.tcp_ip4_spec.ip4src =
5908 				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5909 		fs->m_u.tcp_ip4_spec.ip4src =
5910 			rule->unused_tuple & BIT(INNER_SRC_IP) ?
5911 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5912 
5913 		fs->h_u.tcp_ip4_spec.ip4dst =
5914 				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5915 		fs->m_u.tcp_ip4_spec.ip4dst =
5916 			rule->unused_tuple & BIT(INNER_DST_IP) ?
5917 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5918 
5919 		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5920 		fs->m_u.tcp_ip4_spec.psrc =
5921 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5922 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5923 
5924 		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5925 		fs->m_u.tcp_ip4_spec.pdst =
5926 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5927 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5928 
5929 		fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5930 		fs->m_u.tcp_ip4_spec.tos =
5931 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5932 				0 : rule->tuples_mask.ip_tos;
5933 
5934 		break;
5935 	case IP_USER_FLOW:
5936 		fs->h_u.usr_ip4_spec.ip4src =
5937 				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5938 		fs->m_u.tcp_ip4_spec.ip4src =
5939 			rule->unused_tuple & BIT(INNER_SRC_IP) ?
5940 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5941 
5942 		fs->h_u.usr_ip4_spec.ip4dst =
5943 				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5944 		fs->m_u.usr_ip4_spec.ip4dst =
5945 			rule->unused_tuple & BIT(INNER_DST_IP) ?
5946 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5947 
5948 		fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5949 		fs->m_u.usr_ip4_spec.tos =
5950 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5951 				0 : rule->tuples_mask.ip_tos;
5952 
5953 		fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5954 		fs->m_u.usr_ip4_spec.proto =
5955 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5956 				0 : rule->tuples_mask.ip_proto;
5957 
5958 		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5959 
5960 		break;
5961 	case SCTP_V6_FLOW:
5962 	case TCP_V6_FLOW:
5963 	case UDP_V6_FLOW:
5964 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5965 				  rule->tuples.src_ip, IPV6_SIZE);
5966 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5967 			memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5968 			       sizeof(int) * IPV6_SIZE);
5969 		else
5970 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5971 					  rule->tuples_mask.src_ip, IPV6_SIZE);
5972 
5973 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5974 				  rule->tuples.dst_ip, IPV6_SIZE);
5975 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5976 			memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5977 			       sizeof(int) * IPV6_SIZE);
5978 		else
5979 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5980 					  rule->tuples_mask.dst_ip, IPV6_SIZE);
5981 
5982 		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5983 		fs->m_u.tcp_ip6_spec.psrc =
5984 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5985 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5986 
5987 		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5988 		fs->m_u.tcp_ip6_spec.pdst =
5989 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5990 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5991 
5992 		break;
5993 	case IPV6_USER_FLOW:
5994 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5995 				  rule->tuples.src_ip, IPV6_SIZE);
5996 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5997 			memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5998 			       sizeof(int) * IPV6_SIZE);
5999 		else
6000 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
6001 					  rule->tuples_mask.src_ip, IPV6_SIZE);
6002 
6003 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
6004 				  rule->tuples.dst_ip, IPV6_SIZE);
6005 		if (rule->unused_tuple & BIT(INNER_DST_IP))
6006 			memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
6007 			       sizeof(int) * IPV6_SIZE);
6008 		else
6009 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
6010 					  rule->tuples_mask.dst_ip, IPV6_SIZE);
6011 
6012 		fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
6013 		fs->m_u.usr_ip6_spec.l4_proto =
6014 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6015 				0 : rule->tuples_mask.ip_proto;
6016 
6017 		break;
6018 	case ETHER_FLOW:
6019 		ether_addr_copy(fs->h_u.ether_spec.h_source,
6020 				rule->tuples.src_mac);
6021 		if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6022 			eth_zero_addr(fs->m_u.ether_spec.h_source);
6023 		else
6024 			ether_addr_copy(fs->m_u.ether_spec.h_source,
6025 					rule->tuples_mask.src_mac);
6026 
6027 		ether_addr_copy(fs->h_u.ether_spec.h_dest,
6028 				rule->tuples.dst_mac);
6029 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6030 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6031 		else
6032 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6033 					rule->tuples_mask.dst_mac);
6034 
6035 		fs->h_u.ether_spec.h_proto =
6036 				cpu_to_be16(rule->tuples.ether_proto);
6037 		fs->m_u.ether_spec.h_proto =
6038 				rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6039 				0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6040 
6041 		break;
6042 	default:
6043 		spin_unlock_bh(&hdev->fd_rule_lock);
6044 		return -EOPNOTSUPP;
6045 	}
6046 
6047 	if (fs->flow_type & FLOW_EXT) {
6048 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6049 		fs->m_ext.vlan_tci =
6050 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6051 				cpu_to_be16(VLAN_VID_MASK) :
6052 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
6053 	}
6054 
6055 	if (fs->flow_type & FLOW_MAC_EXT) {
6056 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6057 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6058 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6059 		else
6060 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6061 					rule->tuples_mask.dst_mac);
6062 	}
6063 
6064 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6065 		fs->ring_cookie = RX_CLS_FLOW_DISC;
6066 	} else {
6067 		u64 vf_id;
6068 
6069 		fs->ring_cookie = rule->queue_id;
6070 		vf_id = rule->vf_id;
6071 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6072 		fs->ring_cookie |= vf_id;
6073 	}
6074 
6075 	spin_unlock_bh(&hdev->fd_rule_lock);
6076 
6077 	return 0;
6078 }
6079 
6080 static int hclge_get_all_rules(struct hnae3_handle *handle,
6081 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
6082 {
6083 	struct hclge_vport *vport = hclge_get_vport(handle);
6084 	struct hclge_dev *hdev = vport->back;
6085 	struct hclge_fd_rule *rule;
6086 	struct hlist_node *node2;
6087 	int cnt = 0;
6088 
6089 	if (!hnae3_dev_fd_supported(hdev))
6090 		return -EOPNOTSUPP;
6091 
6092 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6093 
6094 	spin_lock_bh(&hdev->fd_rule_lock);
6095 	hlist_for_each_entry_safe(rule, node2,
6096 				  &hdev->fd_rule_list, rule_node) {
6097 		if (cnt == cmd->rule_cnt) {
6098 			spin_unlock_bh(&hdev->fd_rule_lock);
6099 			return -EMSGSIZE;
6100 		}
6101 
6102 		rule_locs[cnt] = rule->location;
6103 		cnt++;
6104 	}
6105 
6106 	spin_unlock_bh(&hdev->fd_rule_lock);
6107 
6108 	cmd->rule_cnt = cnt;
6109 
6110 	return 0;
6111 }
6112 
6113 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6114 				     struct hclge_fd_rule_tuples *tuples)
6115 {
6116 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6117 	tuples->ip_proto = fkeys->basic.ip_proto;
6118 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6119 
6120 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6121 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6122 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6123 	} else {
6124 		memcpy(tuples->src_ip,
6125 		       fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
6126 		       sizeof(tuples->src_ip));
6127 		memcpy(tuples->dst_ip,
6128 		       fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
6129 		       sizeof(tuples->dst_ip));
6130 	}
6131 }
6132 
6133 /* traverse all rules, check whether an existed rule has the same tuples */
6134 static struct hclge_fd_rule *
6135 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6136 			  const struct hclge_fd_rule_tuples *tuples)
6137 {
6138 	struct hclge_fd_rule *rule = NULL;
6139 	struct hlist_node *node;
6140 
6141 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6142 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6143 			return rule;
6144 	}
6145 
6146 	return NULL;
6147 }
6148 
6149 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6150 				     struct hclge_fd_rule *rule)
6151 {
6152 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6153 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6154 			     BIT(INNER_SRC_PORT);
6155 	rule->action = 0;
6156 	rule->vf_id = 0;
6157 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6158 	if (tuples->ether_proto == ETH_P_IP) {
6159 		if (tuples->ip_proto == IPPROTO_TCP)
6160 			rule->flow_type = TCP_V4_FLOW;
6161 		else
6162 			rule->flow_type = UDP_V4_FLOW;
6163 	} else {
6164 		if (tuples->ip_proto == IPPROTO_TCP)
6165 			rule->flow_type = TCP_V6_FLOW;
6166 		else
6167 			rule->flow_type = UDP_V6_FLOW;
6168 	}
6169 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6170 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6171 }
6172 
6173 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6174 				      u16 flow_id, struct flow_keys *fkeys)
6175 {
6176 	struct hclge_vport *vport = hclge_get_vport(handle);
6177 	struct hclge_fd_rule_tuples new_tuples;
6178 	struct hclge_dev *hdev = vport->back;
6179 	struct hclge_fd_rule *rule;
6180 	u16 tmp_queue_id;
6181 	u16 bit_id;
6182 	int ret;
6183 
6184 	if (!hnae3_dev_fd_supported(hdev))
6185 		return -EOPNOTSUPP;
6186 
6187 	memset(&new_tuples, 0, sizeof(new_tuples));
6188 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6189 
6190 	spin_lock_bh(&hdev->fd_rule_lock);
6191 
6192 	/* when there is already fd rule existed add by user,
6193 	 * arfs should not work
6194 	 */
6195 	if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6196 		spin_unlock_bh(&hdev->fd_rule_lock);
6197 
6198 		return -EOPNOTSUPP;
6199 	}
6200 
6201 	/* check is there flow director filter existed for this flow,
6202 	 * if not, create a new filter for it;
6203 	 * if filter exist with different queue id, modify the filter;
6204 	 * if filter exist with same queue id, do nothing
6205 	 */
6206 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6207 	if (!rule) {
6208 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6209 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6210 			spin_unlock_bh(&hdev->fd_rule_lock);
6211 
6212 			return -ENOSPC;
6213 		}
6214 
6215 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6216 		if (!rule) {
6217 			spin_unlock_bh(&hdev->fd_rule_lock);
6218 
6219 			return -ENOMEM;
6220 		}
6221 
6222 		set_bit(bit_id, hdev->fd_bmap);
6223 		rule->location = bit_id;
6224 		rule->flow_id = flow_id;
6225 		rule->queue_id = queue_id;
6226 		hclge_fd_build_arfs_rule(&new_tuples, rule);
6227 		ret = hclge_fd_config_rule(hdev, rule);
6228 
6229 		spin_unlock_bh(&hdev->fd_rule_lock);
6230 
6231 		if (ret)
6232 			return ret;
6233 
6234 		return rule->location;
6235 	}
6236 
6237 	spin_unlock_bh(&hdev->fd_rule_lock);
6238 
6239 	if (rule->queue_id == queue_id)
6240 		return rule->location;
6241 
6242 	tmp_queue_id = rule->queue_id;
6243 	rule->queue_id = queue_id;
6244 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6245 	if (ret) {
6246 		rule->queue_id = tmp_queue_id;
6247 		return ret;
6248 	}
6249 
6250 	return rule->location;
6251 }
6252 
6253 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6254 {
6255 #ifdef CONFIG_RFS_ACCEL
6256 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6257 	struct hclge_fd_rule *rule;
6258 	struct hlist_node *node;
6259 	HLIST_HEAD(del_list);
6260 
6261 	spin_lock_bh(&hdev->fd_rule_lock);
6262 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6263 		spin_unlock_bh(&hdev->fd_rule_lock);
6264 		return;
6265 	}
6266 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6267 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6268 					rule->flow_id, rule->location)) {
6269 			hlist_del_init(&rule->rule_node);
6270 			hlist_add_head(&rule->rule_node, &del_list);
6271 			hdev->hclge_fd_rule_num--;
6272 			clear_bit(rule->location, hdev->fd_bmap);
6273 		}
6274 	}
6275 	spin_unlock_bh(&hdev->fd_rule_lock);
6276 
6277 	hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6278 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6279 				     rule->location, NULL, false);
6280 		kfree(rule);
6281 	}
6282 #endif
6283 }
6284 
6285 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6286 {
6287 #ifdef CONFIG_RFS_ACCEL
6288 	struct hclge_vport *vport = hclge_get_vport(handle);
6289 	struct hclge_dev *hdev = vport->back;
6290 
6291 	if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6292 		hclge_del_all_fd_entries(handle, true);
6293 #endif
6294 }
6295 
6296 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6297 {
6298 	struct hclge_vport *vport = hclge_get_vport(handle);
6299 	struct hclge_dev *hdev = vport->back;
6300 
6301 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6302 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6303 }
6304 
6305 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6306 {
6307 	struct hclge_vport *vport = hclge_get_vport(handle);
6308 	struct hclge_dev *hdev = vport->back;
6309 
6310 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6311 }
6312 
6313 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6314 {
6315 	struct hclge_vport *vport = hclge_get_vport(handle);
6316 	struct hclge_dev *hdev = vport->back;
6317 
6318 	return hdev->rst_stats.hw_reset_done_cnt;
6319 }
6320 
6321 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6322 {
6323 	struct hclge_vport *vport = hclge_get_vport(handle);
6324 	struct hclge_dev *hdev = vport->back;
6325 	bool clear;
6326 
6327 	hdev->fd_en = enable;
6328 	clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6329 	if (!enable)
6330 		hclge_del_all_fd_entries(handle, clear);
6331 	else
6332 		hclge_restore_fd_entries(handle);
6333 }
6334 
6335 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6336 {
6337 	struct hclge_desc desc;
6338 	struct hclge_config_mac_mode_cmd *req =
6339 		(struct hclge_config_mac_mode_cmd *)desc.data;
6340 	u32 loop_en = 0;
6341 	int ret;
6342 
6343 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6344 
6345 	if (enable) {
6346 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6347 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6348 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6349 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6350 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6351 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6352 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6353 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6354 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6355 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6356 	}
6357 
6358 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6359 
6360 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6361 	if (ret)
6362 		dev_err(&hdev->pdev->dev,
6363 			"mac enable fail, ret =%d.\n", ret);
6364 }
6365 
6366 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6367 				     u8 switch_param, u8 param_mask)
6368 {
6369 	struct hclge_mac_vlan_switch_cmd *req;
6370 	struct hclge_desc desc;
6371 	u32 func_id;
6372 	int ret;
6373 
6374 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6375 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6376 
6377 	/* read current config parameter */
6378 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6379 				   true);
6380 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6381 	req->func_id = cpu_to_le32(func_id);
6382 
6383 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6384 	if (ret) {
6385 		dev_err(&hdev->pdev->dev,
6386 			"read mac vlan switch parameter fail, ret = %d\n", ret);
6387 		return ret;
6388 	}
6389 
6390 	/* modify and write new config parameter */
6391 	hclge_cmd_reuse_desc(&desc, false);
6392 	req->switch_param = (req->switch_param & param_mask) | switch_param;
6393 	req->param_mask = param_mask;
6394 
6395 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6396 	if (ret)
6397 		dev_err(&hdev->pdev->dev,
6398 			"set mac vlan switch parameter fail, ret = %d\n", ret);
6399 	return ret;
6400 }
6401 
6402 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6403 				       int link_ret)
6404 {
6405 #define HCLGE_PHY_LINK_STATUS_NUM  200
6406 
6407 	struct phy_device *phydev = hdev->hw.mac.phydev;
6408 	int i = 0;
6409 	int ret;
6410 
6411 	do {
6412 		ret = phy_read_status(phydev);
6413 		if (ret) {
6414 			dev_err(&hdev->pdev->dev,
6415 				"phy update link status fail, ret = %d\n", ret);
6416 			return;
6417 		}
6418 
6419 		if (phydev->link == link_ret)
6420 			break;
6421 
6422 		msleep(HCLGE_LINK_STATUS_MS);
6423 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6424 }
6425 
6426 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6427 {
6428 #define HCLGE_MAC_LINK_STATUS_NUM  100
6429 
6430 	int i = 0;
6431 	int ret;
6432 
6433 	do {
6434 		ret = hclge_get_mac_link_status(hdev);
6435 		if (ret < 0)
6436 			return ret;
6437 		else if (ret == link_ret)
6438 			return 0;
6439 
6440 		msleep(HCLGE_LINK_STATUS_MS);
6441 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6442 	return -EBUSY;
6443 }
6444 
6445 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6446 					  bool is_phy)
6447 {
6448 #define HCLGE_LINK_STATUS_DOWN 0
6449 #define HCLGE_LINK_STATUS_UP   1
6450 
6451 	int link_ret;
6452 
6453 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6454 
6455 	if (is_phy)
6456 		hclge_phy_link_status_wait(hdev, link_ret);
6457 
6458 	return hclge_mac_link_status_wait(hdev, link_ret);
6459 }
6460 
6461 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6462 {
6463 	struct hclge_config_mac_mode_cmd *req;
6464 	struct hclge_desc desc;
6465 	u32 loop_en;
6466 	int ret;
6467 
6468 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6469 	/* 1 Read out the MAC mode config at first */
6470 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6471 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6472 	if (ret) {
6473 		dev_err(&hdev->pdev->dev,
6474 			"mac loopback get fail, ret =%d.\n", ret);
6475 		return ret;
6476 	}
6477 
6478 	/* 2 Then setup the loopback flag */
6479 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6480 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6481 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6482 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6483 
6484 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6485 
6486 	/* 3 Config mac work mode with loopback flag
6487 	 * and its original configure parameters
6488 	 */
6489 	hclge_cmd_reuse_desc(&desc, false);
6490 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6491 	if (ret)
6492 		dev_err(&hdev->pdev->dev,
6493 			"mac loopback set fail, ret =%d.\n", ret);
6494 	return ret;
6495 }
6496 
6497 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6498 				     enum hnae3_loop loop_mode)
6499 {
6500 #define HCLGE_SERDES_RETRY_MS	10
6501 #define HCLGE_SERDES_RETRY_NUM	100
6502 
6503 	struct hclge_serdes_lb_cmd *req;
6504 	struct hclge_desc desc;
6505 	int ret, i = 0;
6506 	u8 loop_mode_b;
6507 
6508 	req = (struct hclge_serdes_lb_cmd *)desc.data;
6509 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6510 
6511 	switch (loop_mode) {
6512 	case HNAE3_LOOP_SERIAL_SERDES:
6513 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6514 		break;
6515 	case HNAE3_LOOP_PARALLEL_SERDES:
6516 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6517 		break;
6518 	default:
6519 		dev_err(&hdev->pdev->dev,
6520 			"unsupported serdes loopback mode %d\n", loop_mode);
6521 		return -ENOTSUPP;
6522 	}
6523 
6524 	if (en) {
6525 		req->enable = loop_mode_b;
6526 		req->mask = loop_mode_b;
6527 	} else {
6528 		req->mask = loop_mode_b;
6529 	}
6530 
6531 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6532 	if (ret) {
6533 		dev_err(&hdev->pdev->dev,
6534 			"serdes loopback set fail, ret = %d\n", ret);
6535 		return ret;
6536 	}
6537 
6538 	do {
6539 		msleep(HCLGE_SERDES_RETRY_MS);
6540 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6541 					   true);
6542 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6543 		if (ret) {
6544 			dev_err(&hdev->pdev->dev,
6545 				"serdes loopback get, ret = %d\n", ret);
6546 			return ret;
6547 		}
6548 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
6549 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6550 
6551 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6552 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6553 		return -EBUSY;
6554 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6555 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6556 		return -EIO;
6557 	}
6558 	return ret;
6559 }
6560 
6561 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6562 				     enum hnae3_loop loop_mode)
6563 {
6564 	int ret;
6565 
6566 	ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6567 	if (ret)
6568 		return ret;
6569 
6570 	hclge_cfg_mac_mode(hdev, en);
6571 
6572 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6573 	if (ret)
6574 		dev_err(&hdev->pdev->dev,
6575 			"serdes loopback config mac mode timeout\n");
6576 
6577 	return ret;
6578 }
6579 
6580 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6581 				     struct phy_device *phydev)
6582 {
6583 	int ret;
6584 
6585 	if (!phydev->suspended) {
6586 		ret = phy_suspend(phydev);
6587 		if (ret)
6588 			return ret;
6589 	}
6590 
6591 	ret = phy_resume(phydev);
6592 	if (ret)
6593 		return ret;
6594 
6595 	return phy_loopback(phydev, true);
6596 }
6597 
6598 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6599 				      struct phy_device *phydev)
6600 {
6601 	int ret;
6602 
6603 	ret = phy_loopback(phydev, false);
6604 	if (ret)
6605 		return ret;
6606 
6607 	return phy_suspend(phydev);
6608 }
6609 
6610 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6611 {
6612 	struct phy_device *phydev = hdev->hw.mac.phydev;
6613 	int ret;
6614 
6615 	if (!phydev)
6616 		return -ENOTSUPP;
6617 
6618 	if (en)
6619 		ret = hclge_enable_phy_loopback(hdev, phydev);
6620 	else
6621 		ret = hclge_disable_phy_loopback(hdev, phydev);
6622 	if (ret) {
6623 		dev_err(&hdev->pdev->dev,
6624 			"set phy loopback fail, ret = %d\n", ret);
6625 		return ret;
6626 	}
6627 
6628 	hclge_cfg_mac_mode(hdev, en);
6629 
6630 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6631 	if (ret)
6632 		dev_err(&hdev->pdev->dev,
6633 			"phy loopback config mac mode timeout\n");
6634 
6635 	return ret;
6636 }
6637 
6638 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6639 			    int stream_id, bool enable)
6640 {
6641 	struct hclge_desc desc;
6642 	struct hclge_cfg_com_tqp_queue_cmd *req =
6643 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6644 	int ret;
6645 
6646 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6647 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6648 	req->stream_id = cpu_to_le16(stream_id);
6649 	if (enable)
6650 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6651 
6652 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6653 	if (ret)
6654 		dev_err(&hdev->pdev->dev,
6655 			"Tqp enable fail, status =%d.\n", ret);
6656 	return ret;
6657 }
6658 
6659 static int hclge_set_loopback(struct hnae3_handle *handle,
6660 			      enum hnae3_loop loop_mode, bool en)
6661 {
6662 	struct hclge_vport *vport = hclge_get_vport(handle);
6663 	struct hnae3_knic_private_info *kinfo;
6664 	struct hclge_dev *hdev = vport->back;
6665 	int i, ret;
6666 
6667 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6668 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6669 	 * the same, the packets are looped back in the SSU. If SSU loopback
6670 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6671 	 */
6672 	if (hdev->pdev->revision >= 0x21) {
6673 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6674 
6675 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6676 						HCLGE_SWITCH_ALW_LPBK_MASK);
6677 		if (ret)
6678 			return ret;
6679 	}
6680 
6681 	switch (loop_mode) {
6682 	case HNAE3_LOOP_APP:
6683 		ret = hclge_set_app_loopback(hdev, en);
6684 		break;
6685 	case HNAE3_LOOP_SERIAL_SERDES:
6686 	case HNAE3_LOOP_PARALLEL_SERDES:
6687 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6688 		break;
6689 	case HNAE3_LOOP_PHY:
6690 		ret = hclge_set_phy_loopback(hdev, en);
6691 		break;
6692 	default:
6693 		ret = -ENOTSUPP;
6694 		dev_err(&hdev->pdev->dev,
6695 			"loop_mode %d is not supported\n", loop_mode);
6696 		break;
6697 	}
6698 
6699 	if (ret)
6700 		return ret;
6701 
6702 	kinfo = &vport->nic.kinfo;
6703 	for (i = 0; i < kinfo->num_tqps; i++) {
6704 		ret = hclge_tqp_enable(hdev, i, 0, en);
6705 		if (ret)
6706 			return ret;
6707 	}
6708 
6709 	return 0;
6710 }
6711 
6712 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6713 {
6714 	int ret;
6715 
6716 	ret = hclge_set_app_loopback(hdev, false);
6717 	if (ret)
6718 		return ret;
6719 
6720 	ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6721 	if (ret)
6722 		return ret;
6723 
6724 	return hclge_cfg_serdes_loopback(hdev, false,
6725 					 HNAE3_LOOP_PARALLEL_SERDES);
6726 }
6727 
6728 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6729 {
6730 	struct hclge_vport *vport = hclge_get_vport(handle);
6731 	struct hnae3_knic_private_info *kinfo;
6732 	struct hnae3_queue *queue;
6733 	struct hclge_tqp *tqp;
6734 	int i;
6735 
6736 	kinfo = &vport->nic.kinfo;
6737 	for (i = 0; i < kinfo->num_tqps; i++) {
6738 		queue = handle->kinfo.tqp[i];
6739 		tqp = container_of(queue, struct hclge_tqp, q);
6740 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6741 	}
6742 }
6743 
6744 static void hclge_flush_link_update(struct hclge_dev *hdev)
6745 {
6746 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
6747 
6748 	unsigned long last = hdev->serv_processed_cnt;
6749 	int i = 0;
6750 
6751 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6752 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6753 	       last == hdev->serv_processed_cnt)
6754 		usleep_range(1, 1);
6755 }
6756 
6757 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6758 {
6759 	struct hclge_vport *vport = hclge_get_vport(handle);
6760 	struct hclge_dev *hdev = vport->back;
6761 
6762 	if (enable) {
6763 		hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6764 	} else {
6765 		/* Set the DOWN flag here to disable link updating */
6766 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
6767 
6768 		/* flush memory to make sure DOWN is seen by service task */
6769 		smp_mb__before_atomic();
6770 		hclge_flush_link_update(hdev);
6771 	}
6772 }
6773 
6774 static int hclge_ae_start(struct hnae3_handle *handle)
6775 {
6776 	struct hclge_vport *vport = hclge_get_vport(handle);
6777 	struct hclge_dev *hdev = vport->back;
6778 
6779 	/* mac enable */
6780 	hclge_cfg_mac_mode(hdev, true);
6781 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6782 	hdev->hw.mac.link = 0;
6783 
6784 	/* reset tqp stats */
6785 	hclge_reset_tqp_stats(handle);
6786 
6787 	hclge_mac_start_phy(hdev);
6788 
6789 	return 0;
6790 }
6791 
6792 static void hclge_ae_stop(struct hnae3_handle *handle)
6793 {
6794 	struct hclge_vport *vport = hclge_get_vport(handle);
6795 	struct hclge_dev *hdev = vport->back;
6796 	int i;
6797 
6798 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
6799 
6800 	hclge_clear_arfs_rules(handle);
6801 
6802 	/* If it is not PF reset, the firmware will disable the MAC,
6803 	 * so it only need to stop phy here.
6804 	 */
6805 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6806 	    hdev->reset_type != HNAE3_FUNC_RESET) {
6807 		hclge_mac_stop_phy(hdev);
6808 		hclge_update_link_status(hdev);
6809 		return;
6810 	}
6811 
6812 	for (i = 0; i < handle->kinfo.num_tqps; i++)
6813 		hclge_reset_tqp(handle, i);
6814 
6815 	hclge_config_mac_tnl_int(hdev, false);
6816 
6817 	/* Mac disable */
6818 	hclge_cfg_mac_mode(hdev, false);
6819 
6820 	hclge_mac_stop_phy(hdev);
6821 
6822 	/* reset tqp stats */
6823 	hclge_reset_tqp_stats(handle);
6824 	hclge_update_link_status(hdev);
6825 }
6826 
6827 int hclge_vport_start(struct hclge_vport *vport)
6828 {
6829 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6830 	vport->last_active_jiffies = jiffies;
6831 	return 0;
6832 }
6833 
6834 void hclge_vport_stop(struct hclge_vport *vport)
6835 {
6836 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6837 }
6838 
6839 static int hclge_client_start(struct hnae3_handle *handle)
6840 {
6841 	struct hclge_vport *vport = hclge_get_vport(handle);
6842 
6843 	return hclge_vport_start(vport);
6844 }
6845 
6846 static void hclge_client_stop(struct hnae3_handle *handle)
6847 {
6848 	struct hclge_vport *vport = hclge_get_vport(handle);
6849 
6850 	hclge_vport_stop(vport);
6851 }
6852 
6853 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6854 					 u16 cmdq_resp, u8  resp_code,
6855 					 enum hclge_mac_vlan_tbl_opcode op)
6856 {
6857 	struct hclge_dev *hdev = vport->back;
6858 
6859 	if (cmdq_resp) {
6860 		dev_err(&hdev->pdev->dev,
6861 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6862 			cmdq_resp);
6863 		return -EIO;
6864 	}
6865 
6866 	if (op == HCLGE_MAC_VLAN_ADD) {
6867 		if ((!resp_code) || (resp_code == 1)) {
6868 			return 0;
6869 		} else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6870 			dev_err(&hdev->pdev->dev,
6871 				"add mac addr failed for uc_overflow.\n");
6872 			return -ENOSPC;
6873 		} else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6874 			dev_err(&hdev->pdev->dev,
6875 				"add mac addr failed for mc_overflow.\n");
6876 			return -ENOSPC;
6877 		}
6878 
6879 		dev_err(&hdev->pdev->dev,
6880 			"add mac addr failed for undefined, code=%u.\n",
6881 			resp_code);
6882 		return -EIO;
6883 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
6884 		if (!resp_code) {
6885 			return 0;
6886 		} else if (resp_code == 1) {
6887 			dev_dbg(&hdev->pdev->dev,
6888 				"remove mac addr failed for miss.\n");
6889 			return -ENOENT;
6890 		}
6891 
6892 		dev_err(&hdev->pdev->dev,
6893 			"remove mac addr failed for undefined, code=%u.\n",
6894 			resp_code);
6895 		return -EIO;
6896 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
6897 		if (!resp_code) {
6898 			return 0;
6899 		} else if (resp_code == 1) {
6900 			dev_dbg(&hdev->pdev->dev,
6901 				"lookup mac addr failed for miss.\n");
6902 			return -ENOENT;
6903 		}
6904 
6905 		dev_err(&hdev->pdev->dev,
6906 			"lookup mac addr failed for undefined, code=%u.\n",
6907 			resp_code);
6908 		return -EIO;
6909 	}
6910 
6911 	dev_err(&hdev->pdev->dev,
6912 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6913 
6914 	return -EINVAL;
6915 }
6916 
6917 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6918 {
6919 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6920 
6921 	unsigned int word_num;
6922 	unsigned int bit_num;
6923 
6924 	if (vfid > 255 || vfid < 0)
6925 		return -EIO;
6926 
6927 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6928 		word_num = vfid / 32;
6929 		bit_num  = vfid % 32;
6930 		if (clr)
6931 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6932 		else
6933 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6934 	} else {
6935 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6936 		bit_num  = vfid % 32;
6937 		if (clr)
6938 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6939 		else
6940 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6941 	}
6942 
6943 	return 0;
6944 }
6945 
6946 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6947 {
6948 #define HCLGE_DESC_NUMBER 3
6949 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6950 	int i, j;
6951 
6952 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6953 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6954 			if (desc[i].data[j])
6955 				return false;
6956 
6957 	return true;
6958 }
6959 
6960 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6961 				   const u8 *addr, bool is_mc)
6962 {
6963 	const unsigned char *mac_addr = addr;
6964 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6965 		       (mac_addr[0]) | (mac_addr[1] << 8);
6966 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6967 
6968 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6969 	if (is_mc) {
6970 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6971 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6972 	}
6973 
6974 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6975 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6976 }
6977 
6978 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6979 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
6980 {
6981 	struct hclge_dev *hdev = vport->back;
6982 	struct hclge_desc desc;
6983 	u8 resp_code;
6984 	u16 retval;
6985 	int ret;
6986 
6987 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6988 
6989 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6990 
6991 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6992 	if (ret) {
6993 		dev_err(&hdev->pdev->dev,
6994 			"del mac addr failed for cmd_send, ret =%d.\n",
6995 			ret);
6996 		return ret;
6997 	}
6998 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6999 	retval = le16_to_cpu(desc.retval);
7000 
7001 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7002 					     HCLGE_MAC_VLAN_REMOVE);
7003 }
7004 
7005 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7006 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
7007 				     struct hclge_desc *desc,
7008 				     bool is_mc)
7009 {
7010 	struct hclge_dev *hdev = vport->back;
7011 	u8 resp_code;
7012 	u16 retval;
7013 	int ret;
7014 
7015 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7016 	if (is_mc) {
7017 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7018 		memcpy(desc[0].data,
7019 		       req,
7020 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7021 		hclge_cmd_setup_basic_desc(&desc[1],
7022 					   HCLGE_OPC_MAC_VLAN_ADD,
7023 					   true);
7024 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7025 		hclge_cmd_setup_basic_desc(&desc[2],
7026 					   HCLGE_OPC_MAC_VLAN_ADD,
7027 					   true);
7028 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
7029 	} else {
7030 		memcpy(desc[0].data,
7031 		       req,
7032 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7033 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
7034 	}
7035 	if (ret) {
7036 		dev_err(&hdev->pdev->dev,
7037 			"lookup mac addr failed for cmd_send, ret =%d.\n",
7038 			ret);
7039 		return ret;
7040 	}
7041 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7042 	retval = le16_to_cpu(desc[0].retval);
7043 
7044 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7045 					     HCLGE_MAC_VLAN_LKUP);
7046 }
7047 
7048 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7049 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
7050 				  struct hclge_desc *mc_desc)
7051 {
7052 	struct hclge_dev *hdev = vport->back;
7053 	int cfg_status;
7054 	u8 resp_code;
7055 	u16 retval;
7056 	int ret;
7057 
7058 	if (!mc_desc) {
7059 		struct hclge_desc desc;
7060 
7061 		hclge_cmd_setup_basic_desc(&desc,
7062 					   HCLGE_OPC_MAC_VLAN_ADD,
7063 					   false);
7064 		memcpy(desc.data, req,
7065 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7066 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7067 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7068 		retval = le16_to_cpu(desc.retval);
7069 
7070 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7071 							   resp_code,
7072 							   HCLGE_MAC_VLAN_ADD);
7073 	} else {
7074 		hclge_cmd_reuse_desc(&mc_desc[0], false);
7075 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7076 		hclge_cmd_reuse_desc(&mc_desc[1], false);
7077 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7078 		hclge_cmd_reuse_desc(&mc_desc[2], false);
7079 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7080 		memcpy(mc_desc[0].data, req,
7081 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7082 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7083 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7084 		retval = le16_to_cpu(mc_desc[0].retval);
7085 
7086 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7087 							   resp_code,
7088 							   HCLGE_MAC_VLAN_ADD);
7089 	}
7090 
7091 	if (ret) {
7092 		dev_err(&hdev->pdev->dev,
7093 			"add mac addr failed for cmd_send, ret =%d.\n",
7094 			ret);
7095 		return ret;
7096 	}
7097 
7098 	return cfg_status;
7099 }
7100 
7101 static int hclge_init_umv_space(struct hclge_dev *hdev)
7102 {
7103 	u16 allocated_size = 0;
7104 	int ret;
7105 
7106 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7107 				  true);
7108 	if (ret)
7109 		return ret;
7110 
7111 	if (allocated_size < hdev->wanted_umv_size)
7112 		dev_warn(&hdev->pdev->dev,
7113 			 "Alloc umv space failed, want %u, get %u\n",
7114 			 hdev->wanted_umv_size, allocated_size);
7115 
7116 	mutex_init(&hdev->umv_mutex);
7117 	hdev->max_umv_size = allocated_size;
7118 	/* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7119 	 * preserve some unicast mac vlan table entries shared by pf
7120 	 * and its vfs.
7121 	 */
7122 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7123 	hdev->share_umv_size = hdev->priv_umv_size +
7124 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
7125 
7126 	return 0;
7127 }
7128 
7129 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7130 {
7131 	int ret;
7132 
7133 	if (hdev->max_umv_size > 0) {
7134 		ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7135 					  false);
7136 		if (ret)
7137 			return ret;
7138 		hdev->max_umv_size = 0;
7139 	}
7140 	mutex_destroy(&hdev->umv_mutex);
7141 
7142 	return 0;
7143 }
7144 
7145 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7146 			       u16 *allocated_size, bool is_alloc)
7147 {
7148 	struct hclge_umv_spc_alc_cmd *req;
7149 	struct hclge_desc desc;
7150 	int ret;
7151 
7152 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7153 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7154 	if (!is_alloc)
7155 		hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7156 
7157 	req->space_size = cpu_to_le32(space_size);
7158 
7159 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7160 	if (ret) {
7161 		dev_err(&hdev->pdev->dev,
7162 			"%s umv space failed for cmd_send, ret =%d\n",
7163 			is_alloc ? "allocate" : "free", ret);
7164 		return ret;
7165 	}
7166 
7167 	if (is_alloc && allocated_size)
7168 		*allocated_size = le32_to_cpu(desc.data[1]);
7169 
7170 	return 0;
7171 }
7172 
7173 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7174 {
7175 	struct hclge_vport *vport;
7176 	int i;
7177 
7178 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7179 		vport = &hdev->vport[i];
7180 		vport->used_umv_num = 0;
7181 	}
7182 
7183 	mutex_lock(&hdev->umv_mutex);
7184 	hdev->share_umv_size = hdev->priv_umv_size +
7185 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
7186 	mutex_unlock(&hdev->umv_mutex);
7187 }
7188 
7189 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7190 {
7191 	struct hclge_dev *hdev = vport->back;
7192 	bool is_full;
7193 
7194 	mutex_lock(&hdev->umv_mutex);
7195 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7196 		   hdev->share_umv_size == 0);
7197 	mutex_unlock(&hdev->umv_mutex);
7198 
7199 	return is_full;
7200 }
7201 
7202 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7203 {
7204 	struct hclge_dev *hdev = vport->back;
7205 
7206 	mutex_lock(&hdev->umv_mutex);
7207 	if (is_free) {
7208 		if (vport->used_umv_num > hdev->priv_umv_size)
7209 			hdev->share_umv_size++;
7210 
7211 		if (vport->used_umv_num > 0)
7212 			vport->used_umv_num--;
7213 	} else {
7214 		if (vport->used_umv_num >= hdev->priv_umv_size &&
7215 		    hdev->share_umv_size > 0)
7216 			hdev->share_umv_size--;
7217 		vport->used_umv_num++;
7218 	}
7219 	mutex_unlock(&hdev->umv_mutex);
7220 }
7221 
7222 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7223 			     const unsigned char *addr)
7224 {
7225 	struct hclge_vport *vport = hclge_get_vport(handle);
7226 
7227 	return hclge_add_uc_addr_common(vport, addr);
7228 }
7229 
7230 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7231 			     const unsigned char *addr)
7232 {
7233 	struct hclge_dev *hdev = vport->back;
7234 	struct hclge_mac_vlan_tbl_entry_cmd req;
7235 	struct hclge_desc desc;
7236 	u16 egress_port = 0;
7237 	int ret;
7238 
7239 	/* mac addr check */
7240 	if (is_zero_ether_addr(addr) ||
7241 	    is_broadcast_ether_addr(addr) ||
7242 	    is_multicast_ether_addr(addr)) {
7243 		dev_err(&hdev->pdev->dev,
7244 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7245 			 addr, is_zero_ether_addr(addr),
7246 			 is_broadcast_ether_addr(addr),
7247 			 is_multicast_ether_addr(addr));
7248 		return -EINVAL;
7249 	}
7250 
7251 	memset(&req, 0, sizeof(req));
7252 
7253 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7254 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7255 
7256 	req.egress_port = cpu_to_le16(egress_port);
7257 
7258 	hclge_prepare_mac_addr(&req, addr, false);
7259 
7260 	/* Lookup the mac address in the mac_vlan table, and add
7261 	 * it if the entry is inexistent. Repeated unicast entry
7262 	 * is not allowed in the mac vlan table.
7263 	 */
7264 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7265 	if (ret == -ENOENT) {
7266 		if (!hclge_is_umv_space_full(vport)) {
7267 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7268 			if (!ret)
7269 				hclge_update_umv_space(vport, false);
7270 			return ret;
7271 		}
7272 
7273 		dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7274 			hdev->priv_umv_size);
7275 
7276 		return -ENOSPC;
7277 	}
7278 
7279 	/* check if we just hit the duplicate */
7280 	if (!ret) {
7281 		dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7282 			 vport->vport_id, addr);
7283 		return 0;
7284 	}
7285 
7286 	dev_err(&hdev->pdev->dev,
7287 		"PF failed to add unicast entry(%pM) in the MAC table\n",
7288 		addr);
7289 
7290 	return ret;
7291 }
7292 
7293 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7294 			    const unsigned char *addr)
7295 {
7296 	struct hclge_vport *vport = hclge_get_vport(handle);
7297 
7298 	return hclge_rm_uc_addr_common(vport, addr);
7299 }
7300 
7301 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7302 			    const unsigned char *addr)
7303 {
7304 	struct hclge_dev *hdev = vport->back;
7305 	struct hclge_mac_vlan_tbl_entry_cmd req;
7306 	int ret;
7307 
7308 	/* mac addr check */
7309 	if (is_zero_ether_addr(addr) ||
7310 	    is_broadcast_ether_addr(addr) ||
7311 	    is_multicast_ether_addr(addr)) {
7312 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7313 			addr);
7314 		return -EINVAL;
7315 	}
7316 
7317 	memset(&req, 0, sizeof(req));
7318 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7319 	hclge_prepare_mac_addr(&req, addr, false);
7320 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
7321 	if (!ret)
7322 		hclge_update_umv_space(vport, true);
7323 
7324 	return ret;
7325 }
7326 
7327 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7328 			     const unsigned char *addr)
7329 {
7330 	struct hclge_vport *vport = hclge_get_vport(handle);
7331 
7332 	return hclge_add_mc_addr_common(vport, addr);
7333 }
7334 
7335 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7336 			     const unsigned char *addr)
7337 {
7338 	struct hclge_dev *hdev = vport->back;
7339 	struct hclge_mac_vlan_tbl_entry_cmd req;
7340 	struct hclge_desc desc[3];
7341 	int status;
7342 
7343 	/* mac addr check */
7344 	if (!is_multicast_ether_addr(addr)) {
7345 		dev_err(&hdev->pdev->dev,
7346 			"Add mc mac err! invalid mac:%pM.\n",
7347 			 addr);
7348 		return -EINVAL;
7349 	}
7350 	memset(&req, 0, sizeof(req));
7351 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7352 	hclge_prepare_mac_addr(&req, addr, true);
7353 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7354 	if (status) {
7355 		/* This mac addr do not exist, add new entry for it */
7356 		memset(desc[0].data, 0, sizeof(desc[0].data));
7357 		memset(desc[1].data, 0, sizeof(desc[0].data));
7358 		memset(desc[2].data, 0, sizeof(desc[0].data));
7359 	}
7360 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7361 	if (status)
7362 		return status;
7363 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7364 
7365 	if (status == -ENOSPC)
7366 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7367 
7368 	return status;
7369 }
7370 
7371 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7372 			    const unsigned char *addr)
7373 {
7374 	struct hclge_vport *vport = hclge_get_vport(handle);
7375 
7376 	return hclge_rm_mc_addr_common(vport, addr);
7377 }
7378 
7379 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7380 			    const unsigned char *addr)
7381 {
7382 	struct hclge_dev *hdev = vport->back;
7383 	struct hclge_mac_vlan_tbl_entry_cmd req;
7384 	enum hclge_cmd_status status;
7385 	struct hclge_desc desc[3];
7386 
7387 	/* mac addr check */
7388 	if (!is_multicast_ether_addr(addr)) {
7389 		dev_dbg(&hdev->pdev->dev,
7390 			"Remove mc mac err! invalid mac:%pM.\n",
7391 			 addr);
7392 		return -EINVAL;
7393 	}
7394 
7395 	memset(&req, 0, sizeof(req));
7396 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7397 	hclge_prepare_mac_addr(&req, addr, true);
7398 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7399 	if (!status) {
7400 		/* This mac addr exist, remove this handle's VFID for it */
7401 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7402 		if (status)
7403 			return status;
7404 
7405 		if (hclge_is_all_function_id_zero(desc))
7406 			/* All the vfid is zero, so need to delete this entry */
7407 			status = hclge_remove_mac_vlan_tbl(vport, &req);
7408 		else
7409 			/* Not all the vfid is zero, update the vfid */
7410 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7411 
7412 	} else {
7413 		/* Maybe this mac address is in mta table, but it cannot be
7414 		 * deleted here because an entry of mta represents an address
7415 		 * range rather than a specific address. the delete action to
7416 		 * all entries will take effect in update_mta_status called by
7417 		 * hns3_nic_set_rx_mode.
7418 		 */
7419 		status = 0;
7420 	}
7421 
7422 	return status;
7423 }
7424 
7425 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7426 			       enum HCLGE_MAC_ADDR_TYPE mac_type)
7427 {
7428 	struct hclge_vport_mac_addr_cfg *mac_cfg;
7429 	struct list_head *list;
7430 
7431 	if (!vport->vport_id)
7432 		return;
7433 
7434 	mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7435 	if (!mac_cfg)
7436 		return;
7437 
7438 	mac_cfg->hd_tbl_status = true;
7439 	memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7440 
7441 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7442 	       &vport->uc_mac_list : &vport->mc_mac_list;
7443 
7444 	list_add_tail(&mac_cfg->node, list);
7445 }
7446 
7447 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7448 			      bool is_write_tbl,
7449 			      enum HCLGE_MAC_ADDR_TYPE mac_type)
7450 {
7451 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7452 	struct list_head *list;
7453 	bool uc_flag, mc_flag;
7454 
7455 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7456 	       &vport->uc_mac_list : &vport->mc_mac_list;
7457 
7458 	uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7459 	mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7460 
7461 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7462 		if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7463 			if (uc_flag && mac_cfg->hd_tbl_status)
7464 				hclge_rm_uc_addr_common(vport, mac_addr);
7465 
7466 			if (mc_flag && mac_cfg->hd_tbl_status)
7467 				hclge_rm_mc_addr_common(vport, mac_addr);
7468 
7469 			list_del(&mac_cfg->node);
7470 			kfree(mac_cfg);
7471 			break;
7472 		}
7473 	}
7474 }
7475 
7476 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7477 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
7478 {
7479 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7480 	struct list_head *list;
7481 
7482 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7483 	       &vport->uc_mac_list : &vport->mc_mac_list;
7484 
7485 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7486 		if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7487 			hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7488 
7489 		if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7490 			hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7491 
7492 		mac_cfg->hd_tbl_status = false;
7493 		if (is_del_list) {
7494 			list_del(&mac_cfg->node);
7495 			kfree(mac_cfg);
7496 		}
7497 	}
7498 }
7499 
7500 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7501 {
7502 	struct hclge_vport_mac_addr_cfg *mac, *tmp;
7503 	struct hclge_vport *vport;
7504 	int i;
7505 
7506 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7507 		vport = &hdev->vport[i];
7508 		list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7509 			list_del(&mac->node);
7510 			kfree(mac);
7511 		}
7512 
7513 		list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7514 			list_del(&mac->node);
7515 			kfree(mac);
7516 		}
7517 	}
7518 }
7519 
7520 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7521 					      u16 cmdq_resp, u8 resp_code)
7522 {
7523 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
7524 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
7525 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
7526 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
7527 
7528 	int return_status;
7529 
7530 	if (cmdq_resp) {
7531 		dev_err(&hdev->pdev->dev,
7532 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7533 			cmdq_resp);
7534 		return -EIO;
7535 	}
7536 
7537 	switch (resp_code) {
7538 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
7539 	case HCLGE_ETHERTYPE_ALREADY_ADD:
7540 		return_status = 0;
7541 		break;
7542 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7543 		dev_err(&hdev->pdev->dev,
7544 			"add mac ethertype failed for manager table overflow.\n");
7545 		return_status = -EIO;
7546 		break;
7547 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
7548 		dev_err(&hdev->pdev->dev,
7549 			"add mac ethertype failed for key conflict.\n");
7550 		return_status = -EIO;
7551 		break;
7552 	default:
7553 		dev_err(&hdev->pdev->dev,
7554 			"add mac ethertype failed for undefined, code=%u.\n",
7555 			resp_code);
7556 		return_status = -EIO;
7557 	}
7558 
7559 	return return_status;
7560 }
7561 
7562 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7563 				     u8 *mac_addr)
7564 {
7565 	struct hclge_mac_vlan_tbl_entry_cmd req;
7566 	struct hclge_dev *hdev = vport->back;
7567 	struct hclge_desc desc;
7568 	u16 egress_port = 0;
7569 	int i;
7570 
7571 	if (is_zero_ether_addr(mac_addr))
7572 		return false;
7573 
7574 	memset(&req, 0, sizeof(req));
7575 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7576 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7577 	req.egress_port = cpu_to_le16(egress_port);
7578 	hclge_prepare_mac_addr(&req, mac_addr, false);
7579 
7580 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7581 		return true;
7582 
7583 	vf_idx += HCLGE_VF_VPORT_START_NUM;
7584 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7585 		if (i != vf_idx &&
7586 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7587 			return true;
7588 
7589 	return false;
7590 }
7591 
7592 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7593 			    u8 *mac_addr)
7594 {
7595 	struct hclge_vport *vport = hclge_get_vport(handle);
7596 	struct hclge_dev *hdev = vport->back;
7597 
7598 	vport = hclge_get_vf_vport(hdev, vf);
7599 	if (!vport)
7600 		return -EINVAL;
7601 
7602 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7603 		dev_info(&hdev->pdev->dev,
7604 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
7605 			 mac_addr);
7606 		return 0;
7607 	}
7608 
7609 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7610 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7611 			mac_addr);
7612 		return -EEXIST;
7613 	}
7614 
7615 	ether_addr_copy(vport->vf_info.mac, mac_addr);
7616 	dev_info(&hdev->pdev->dev,
7617 		 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7618 		 vf, mac_addr);
7619 
7620 	return hclge_inform_reset_assert_to_vf(vport);
7621 }
7622 
7623 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7624 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
7625 {
7626 	struct hclge_desc desc;
7627 	u8 resp_code;
7628 	u16 retval;
7629 	int ret;
7630 
7631 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7632 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7633 
7634 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7635 	if (ret) {
7636 		dev_err(&hdev->pdev->dev,
7637 			"add mac ethertype failed for cmd_send, ret =%d.\n",
7638 			ret);
7639 		return ret;
7640 	}
7641 
7642 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7643 	retval = le16_to_cpu(desc.retval);
7644 
7645 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7646 }
7647 
7648 static int init_mgr_tbl(struct hclge_dev *hdev)
7649 {
7650 	int ret;
7651 	int i;
7652 
7653 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7654 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7655 		if (ret) {
7656 			dev_err(&hdev->pdev->dev,
7657 				"add mac ethertype failed, ret =%d.\n",
7658 				ret);
7659 			return ret;
7660 		}
7661 	}
7662 
7663 	return 0;
7664 }
7665 
7666 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7667 {
7668 	struct hclge_vport *vport = hclge_get_vport(handle);
7669 	struct hclge_dev *hdev = vport->back;
7670 
7671 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
7672 }
7673 
7674 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7675 			      bool is_first)
7676 {
7677 	const unsigned char *new_addr = (const unsigned char *)p;
7678 	struct hclge_vport *vport = hclge_get_vport(handle);
7679 	struct hclge_dev *hdev = vport->back;
7680 	int ret;
7681 
7682 	/* mac addr check */
7683 	if (is_zero_ether_addr(new_addr) ||
7684 	    is_broadcast_ether_addr(new_addr) ||
7685 	    is_multicast_ether_addr(new_addr)) {
7686 		dev_err(&hdev->pdev->dev,
7687 			"Change uc mac err! invalid mac:%pM.\n",
7688 			 new_addr);
7689 		return -EINVAL;
7690 	}
7691 
7692 	if ((!is_first || is_kdump_kernel()) &&
7693 	    hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7694 		dev_warn(&hdev->pdev->dev,
7695 			 "remove old uc mac address fail.\n");
7696 
7697 	ret = hclge_add_uc_addr(handle, new_addr);
7698 	if (ret) {
7699 		dev_err(&hdev->pdev->dev,
7700 			"add uc mac address fail, ret =%d.\n",
7701 			ret);
7702 
7703 		if (!is_first &&
7704 		    hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7705 			dev_err(&hdev->pdev->dev,
7706 				"restore uc mac address fail.\n");
7707 
7708 		return -EIO;
7709 	}
7710 
7711 	ret = hclge_pause_addr_cfg(hdev, new_addr);
7712 	if (ret) {
7713 		dev_err(&hdev->pdev->dev,
7714 			"configure mac pause address fail, ret =%d.\n",
7715 			ret);
7716 		return -EIO;
7717 	}
7718 
7719 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7720 
7721 	return 0;
7722 }
7723 
7724 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7725 			  int cmd)
7726 {
7727 	struct hclge_vport *vport = hclge_get_vport(handle);
7728 	struct hclge_dev *hdev = vport->back;
7729 
7730 	if (!hdev->hw.mac.phydev)
7731 		return -EOPNOTSUPP;
7732 
7733 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7734 }
7735 
7736 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7737 				      u8 fe_type, bool filter_en, u8 vf_id)
7738 {
7739 	struct hclge_vlan_filter_ctrl_cmd *req;
7740 	struct hclge_desc desc;
7741 	int ret;
7742 
7743 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7744 
7745 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7746 	req->vlan_type = vlan_type;
7747 	req->vlan_fe = filter_en ? fe_type : 0;
7748 	req->vf_id = vf_id;
7749 
7750 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7751 	if (ret)
7752 		dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7753 			ret);
7754 
7755 	return ret;
7756 }
7757 
7758 #define HCLGE_FILTER_TYPE_VF		0
7759 #define HCLGE_FILTER_TYPE_PORT		1
7760 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
7761 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
7762 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
7763 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
7764 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
7765 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
7766 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
7767 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
7768 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
7769 
7770 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7771 {
7772 	struct hclge_vport *vport = hclge_get_vport(handle);
7773 	struct hclge_dev *hdev = vport->back;
7774 
7775 	if (hdev->pdev->revision >= 0x21) {
7776 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7777 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
7778 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7779 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
7780 	} else {
7781 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7782 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7783 					   0);
7784 	}
7785 	if (enable)
7786 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
7787 	else
7788 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7789 }
7790 
7791 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7792 				    bool is_kill, u16 vlan,
7793 				    __be16 proto)
7794 {
7795 	struct hclge_vport *vport = &hdev->vport[vfid];
7796 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
7797 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
7798 	struct hclge_desc desc[2];
7799 	u8 vf_byte_val;
7800 	u8 vf_byte_off;
7801 	int ret;
7802 
7803 	/* if vf vlan table is full, firmware will close vf vlan filter, it
7804 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
7805 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
7806 	 * new vlan, because tx packets with these vlan id will be dropped.
7807 	 */
7808 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7809 		if (vport->vf_info.spoofchk && vlan) {
7810 			dev_err(&hdev->pdev->dev,
7811 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
7812 			return -EPERM;
7813 		}
7814 		return 0;
7815 	}
7816 
7817 	hclge_cmd_setup_basic_desc(&desc[0],
7818 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7819 	hclge_cmd_setup_basic_desc(&desc[1],
7820 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7821 
7822 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7823 
7824 	vf_byte_off = vfid / 8;
7825 	vf_byte_val = 1 << (vfid % 8);
7826 
7827 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7828 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7829 
7830 	req0->vlan_id  = cpu_to_le16(vlan);
7831 	req0->vlan_cfg = is_kill;
7832 
7833 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7834 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7835 	else
7836 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7837 
7838 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
7839 	if (ret) {
7840 		dev_err(&hdev->pdev->dev,
7841 			"Send vf vlan command fail, ret =%d.\n",
7842 			ret);
7843 		return ret;
7844 	}
7845 
7846 	if (!is_kill) {
7847 #define HCLGE_VF_VLAN_NO_ENTRY	2
7848 		if (!req0->resp_code || req0->resp_code == 1)
7849 			return 0;
7850 
7851 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7852 			set_bit(vfid, hdev->vf_vlan_full);
7853 			dev_warn(&hdev->pdev->dev,
7854 				 "vf vlan table is full, vf vlan filter is disabled\n");
7855 			return 0;
7856 		}
7857 
7858 		dev_err(&hdev->pdev->dev,
7859 			"Add vf vlan filter fail, ret =%u.\n",
7860 			req0->resp_code);
7861 	} else {
7862 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
7863 		if (!req0->resp_code)
7864 			return 0;
7865 
7866 		/* vf vlan filter is disabled when vf vlan table is full,
7867 		 * then new vlan id will not be added into vf vlan table.
7868 		 * Just return 0 without warning, avoid massive verbose
7869 		 * print logs when unload.
7870 		 */
7871 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7872 			return 0;
7873 
7874 		dev_err(&hdev->pdev->dev,
7875 			"Kill vf vlan filter fail, ret =%u.\n",
7876 			req0->resp_code);
7877 	}
7878 
7879 	return -EIO;
7880 }
7881 
7882 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7883 				      u16 vlan_id, bool is_kill)
7884 {
7885 	struct hclge_vlan_filter_pf_cfg_cmd *req;
7886 	struct hclge_desc desc;
7887 	u8 vlan_offset_byte_val;
7888 	u8 vlan_offset_byte;
7889 	u8 vlan_offset_160;
7890 	int ret;
7891 
7892 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7893 
7894 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7895 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7896 			   HCLGE_VLAN_BYTE_SIZE;
7897 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7898 
7899 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7900 	req->vlan_offset = vlan_offset_160;
7901 	req->vlan_cfg = is_kill;
7902 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7903 
7904 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7905 	if (ret)
7906 		dev_err(&hdev->pdev->dev,
7907 			"port vlan command, send fail, ret =%d.\n", ret);
7908 	return ret;
7909 }
7910 
7911 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7912 				    u16 vport_id, u16 vlan_id,
7913 				    bool is_kill)
7914 {
7915 	u16 vport_idx, vport_num = 0;
7916 	int ret;
7917 
7918 	if (is_kill && !vlan_id)
7919 		return 0;
7920 
7921 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7922 				       proto);
7923 	if (ret) {
7924 		dev_err(&hdev->pdev->dev,
7925 			"Set %u vport vlan filter config fail, ret =%d.\n",
7926 			vport_id, ret);
7927 		return ret;
7928 	}
7929 
7930 	/* vlan 0 may be added twice when 8021q module is enabled */
7931 	if (!is_kill && !vlan_id &&
7932 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
7933 		return 0;
7934 
7935 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7936 		dev_err(&hdev->pdev->dev,
7937 			"Add port vlan failed, vport %u is already in vlan %u\n",
7938 			vport_id, vlan_id);
7939 		return -EINVAL;
7940 	}
7941 
7942 	if (is_kill &&
7943 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7944 		dev_err(&hdev->pdev->dev,
7945 			"Delete port vlan failed, vport %u is not in vlan %u\n",
7946 			vport_id, vlan_id);
7947 		return -EINVAL;
7948 	}
7949 
7950 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7951 		vport_num++;
7952 
7953 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7954 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7955 						 is_kill);
7956 
7957 	return ret;
7958 }
7959 
7960 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7961 {
7962 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7963 	struct hclge_vport_vtag_tx_cfg_cmd *req;
7964 	struct hclge_dev *hdev = vport->back;
7965 	struct hclge_desc desc;
7966 	u16 bmap_index;
7967 	int status;
7968 
7969 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7970 
7971 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7972 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7973 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7974 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7975 		      vcfg->accept_tag1 ? 1 : 0);
7976 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7977 		      vcfg->accept_untag1 ? 1 : 0);
7978 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7979 		      vcfg->accept_tag2 ? 1 : 0);
7980 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7981 		      vcfg->accept_untag2 ? 1 : 0);
7982 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7983 		      vcfg->insert_tag1_en ? 1 : 0);
7984 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7985 		      vcfg->insert_tag2_en ? 1 : 0);
7986 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7987 
7988 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7989 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
7990 			HCLGE_VF_NUM_PER_BYTE;
7991 	req->vf_bitmap[bmap_index] =
7992 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7993 
7994 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
7995 	if (status)
7996 		dev_err(&hdev->pdev->dev,
7997 			"Send port txvlan cfg command fail, ret =%d\n",
7998 			status);
7999 
8000 	return status;
8001 }
8002 
8003 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8004 {
8005 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8006 	struct hclge_vport_vtag_rx_cfg_cmd *req;
8007 	struct hclge_dev *hdev = vport->back;
8008 	struct hclge_desc desc;
8009 	u16 bmap_index;
8010 	int status;
8011 
8012 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8013 
8014 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8015 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8016 		      vcfg->strip_tag1_en ? 1 : 0);
8017 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8018 		      vcfg->strip_tag2_en ? 1 : 0);
8019 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8020 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
8021 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8022 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
8023 
8024 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8025 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8026 			HCLGE_VF_NUM_PER_BYTE;
8027 	req->vf_bitmap[bmap_index] =
8028 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8029 
8030 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8031 	if (status)
8032 		dev_err(&hdev->pdev->dev,
8033 			"Send port rxvlan cfg command fail, ret =%d\n",
8034 			status);
8035 
8036 	return status;
8037 }
8038 
8039 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8040 				  u16 port_base_vlan_state,
8041 				  u16 vlan_tag)
8042 {
8043 	int ret;
8044 
8045 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8046 		vport->txvlan_cfg.accept_tag1 = true;
8047 		vport->txvlan_cfg.insert_tag1_en = false;
8048 		vport->txvlan_cfg.default_tag1 = 0;
8049 	} else {
8050 		vport->txvlan_cfg.accept_tag1 = false;
8051 		vport->txvlan_cfg.insert_tag1_en = true;
8052 		vport->txvlan_cfg.default_tag1 = vlan_tag;
8053 	}
8054 
8055 	vport->txvlan_cfg.accept_untag1 = true;
8056 
8057 	/* accept_tag2 and accept_untag2 are not supported on
8058 	 * pdev revision(0x20), new revision support them,
8059 	 * this two fields can not be configured by user.
8060 	 */
8061 	vport->txvlan_cfg.accept_tag2 = true;
8062 	vport->txvlan_cfg.accept_untag2 = true;
8063 	vport->txvlan_cfg.insert_tag2_en = false;
8064 	vport->txvlan_cfg.default_tag2 = 0;
8065 
8066 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8067 		vport->rxvlan_cfg.strip_tag1_en = false;
8068 		vport->rxvlan_cfg.strip_tag2_en =
8069 				vport->rxvlan_cfg.rx_vlan_offload_en;
8070 	} else {
8071 		vport->rxvlan_cfg.strip_tag1_en =
8072 				vport->rxvlan_cfg.rx_vlan_offload_en;
8073 		vport->rxvlan_cfg.strip_tag2_en = true;
8074 	}
8075 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8076 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8077 
8078 	ret = hclge_set_vlan_tx_offload_cfg(vport);
8079 	if (ret)
8080 		return ret;
8081 
8082 	return hclge_set_vlan_rx_offload_cfg(vport);
8083 }
8084 
8085 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8086 {
8087 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8088 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8089 	struct hclge_desc desc;
8090 	int status;
8091 
8092 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8093 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8094 	rx_req->ot_fst_vlan_type =
8095 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8096 	rx_req->ot_sec_vlan_type =
8097 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8098 	rx_req->in_fst_vlan_type =
8099 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8100 	rx_req->in_sec_vlan_type =
8101 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8102 
8103 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8104 	if (status) {
8105 		dev_err(&hdev->pdev->dev,
8106 			"Send rxvlan protocol type command fail, ret =%d\n",
8107 			status);
8108 		return status;
8109 	}
8110 
8111 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8112 
8113 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8114 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8115 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8116 
8117 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8118 	if (status)
8119 		dev_err(&hdev->pdev->dev,
8120 			"Send txvlan protocol type command fail, ret =%d\n",
8121 			status);
8122 
8123 	return status;
8124 }
8125 
8126 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8127 {
8128 #define HCLGE_DEF_VLAN_TYPE		0x8100
8129 
8130 	struct hnae3_handle *handle = &hdev->vport[0].nic;
8131 	struct hclge_vport *vport;
8132 	int ret;
8133 	int i;
8134 
8135 	if (hdev->pdev->revision >= 0x21) {
8136 		/* for revision 0x21, vf vlan filter is per function */
8137 		for (i = 0; i < hdev->num_alloc_vport; i++) {
8138 			vport = &hdev->vport[i];
8139 			ret = hclge_set_vlan_filter_ctrl(hdev,
8140 							 HCLGE_FILTER_TYPE_VF,
8141 							 HCLGE_FILTER_FE_EGRESS,
8142 							 true,
8143 							 vport->vport_id);
8144 			if (ret)
8145 				return ret;
8146 		}
8147 
8148 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8149 						 HCLGE_FILTER_FE_INGRESS, true,
8150 						 0);
8151 		if (ret)
8152 			return ret;
8153 	} else {
8154 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8155 						 HCLGE_FILTER_FE_EGRESS_V1_B,
8156 						 true, 0);
8157 		if (ret)
8158 			return ret;
8159 	}
8160 
8161 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
8162 
8163 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8164 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8165 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8166 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8167 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8168 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8169 
8170 	ret = hclge_set_vlan_protocol_type(hdev);
8171 	if (ret)
8172 		return ret;
8173 
8174 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8175 		u16 vlan_tag;
8176 
8177 		vport = &hdev->vport[i];
8178 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8179 
8180 		ret = hclge_vlan_offload_cfg(vport,
8181 					     vport->port_base_vlan_cfg.state,
8182 					     vlan_tag);
8183 		if (ret)
8184 			return ret;
8185 	}
8186 
8187 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8188 }
8189 
8190 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8191 				       bool writen_to_tbl)
8192 {
8193 	struct hclge_vport_vlan_cfg *vlan;
8194 
8195 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8196 	if (!vlan)
8197 		return;
8198 
8199 	vlan->hd_tbl_status = writen_to_tbl;
8200 	vlan->vlan_id = vlan_id;
8201 
8202 	list_add_tail(&vlan->node, &vport->vlan_list);
8203 }
8204 
8205 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8206 {
8207 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8208 	struct hclge_dev *hdev = vport->back;
8209 	int ret;
8210 
8211 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8212 		if (!vlan->hd_tbl_status) {
8213 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8214 						       vport->vport_id,
8215 						       vlan->vlan_id, false);
8216 			if (ret) {
8217 				dev_err(&hdev->pdev->dev,
8218 					"restore vport vlan list failed, ret=%d\n",
8219 					ret);
8220 				return ret;
8221 			}
8222 		}
8223 		vlan->hd_tbl_status = true;
8224 	}
8225 
8226 	return 0;
8227 }
8228 
8229 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8230 				      bool is_write_tbl)
8231 {
8232 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8233 	struct hclge_dev *hdev = vport->back;
8234 
8235 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8236 		if (vlan->vlan_id == vlan_id) {
8237 			if (is_write_tbl && vlan->hd_tbl_status)
8238 				hclge_set_vlan_filter_hw(hdev,
8239 							 htons(ETH_P_8021Q),
8240 							 vport->vport_id,
8241 							 vlan_id,
8242 							 true);
8243 
8244 			list_del(&vlan->node);
8245 			kfree(vlan);
8246 			break;
8247 		}
8248 	}
8249 }
8250 
8251 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8252 {
8253 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8254 	struct hclge_dev *hdev = vport->back;
8255 
8256 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8257 		if (vlan->hd_tbl_status)
8258 			hclge_set_vlan_filter_hw(hdev,
8259 						 htons(ETH_P_8021Q),
8260 						 vport->vport_id,
8261 						 vlan->vlan_id,
8262 						 true);
8263 
8264 		vlan->hd_tbl_status = false;
8265 		if (is_del_list) {
8266 			list_del(&vlan->node);
8267 			kfree(vlan);
8268 		}
8269 	}
8270 }
8271 
8272 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8273 {
8274 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8275 	struct hclge_vport *vport;
8276 	int i;
8277 
8278 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8279 		vport = &hdev->vport[i];
8280 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8281 			list_del(&vlan->node);
8282 			kfree(vlan);
8283 		}
8284 	}
8285 }
8286 
8287 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8288 {
8289 	struct hclge_vport *vport = hclge_get_vport(handle);
8290 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8291 	struct hclge_dev *hdev = vport->back;
8292 	u16 vlan_proto;
8293 	u16 state, vlan_id;
8294 	int i;
8295 
8296 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8297 		vport = &hdev->vport[i];
8298 		vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8299 		vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8300 		state = vport->port_base_vlan_cfg.state;
8301 
8302 		if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8303 			hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8304 						 vport->vport_id, vlan_id,
8305 						 false);
8306 			continue;
8307 		}
8308 
8309 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8310 			int ret;
8311 
8312 			if (!vlan->hd_tbl_status)
8313 				continue;
8314 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8315 						       vport->vport_id,
8316 						       vlan->vlan_id, false);
8317 			if (ret)
8318 				break;
8319 		}
8320 	}
8321 }
8322 
8323 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8324 {
8325 	struct hclge_vport *vport = hclge_get_vport(handle);
8326 
8327 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8328 		vport->rxvlan_cfg.strip_tag1_en = false;
8329 		vport->rxvlan_cfg.strip_tag2_en = enable;
8330 	} else {
8331 		vport->rxvlan_cfg.strip_tag1_en = enable;
8332 		vport->rxvlan_cfg.strip_tag2_en = true;
8333 	}
8334 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8335 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8336 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8337 
8338 	return hclge_set_vlan_rx_offload_cfg(vport);
8339 }
8340 
8341 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8342 					    u16 port_base_vlan_state,
8343 					    struct hclge_vlan_info *new_info,
8344 					    struct hclge_vlan_info *old_info)
8345 {
8346 	struct hclge_dev *hdev = vport->back;
8347 	int ret;
8348 
8349 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8350 		hclge_rm_vport_all_vlan_table(vport, false);
8351 		return hclge_set_vlan_filter_hw(hdev,
8352 						 htons(new_info->vlan_proto),
8353 						 vport->vport_id,
8354 						 new_info->vlan_tag,
8355 						 false);
8356 	}
8357 
8358 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8359 				       vport->vport_id, old_info->vlan_tag,
8360 				       true);
8361 	if (ret)
8362 		return ret;
8363 
8364 	return hclge_add_vport_all_vlan_table(vport);
8365 }
8366 
8367 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8368 				    struct hclge_vlan_info *vlan_info)
8369 {
8370 	struct hnae3_handle *nic = &vport->nic;
8371 	struct hclge_vlan_info *old_vlan_info;
8372 	struct hclge_dev *hdev = vport->back;
8373 	int ret;
8374 
8375 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8376 
8377 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8378 	if (ret)
8379 		return ret;
8380 
8381 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8382 		/* add new VLAN tag */
8383 		ret = hclge_set_vlan_filter_hw(hdev,
8384 					       htons(vlan_info->vlan_proto),
8385 					       vport->vport_id,
8386 					       vlan_info->vlan_tag,
8387 					       false);
8388 		if (ret)
8389 			return ret;
8390 
8391 		/* remove old VLAN tag */
8392 		ret = hclge_set_vlan_filter_hw(hdev,
8393 					       htons(old_vlan_info->vlan_proto),
8394 					       vport->vport_id,
8395 					       old_vlan_info->vlan_tag,
8396 					       true);
8397 		if (ret)
8398 			return ret;
8399 
8400 		goto update;
8401 	}
8402 
8403 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8404 					       old_vlan_info);
8405 	if (ret)
8406 		return ret;
8407 
8408 	/* update state only when disable/enable port based VLAN */
8409 	vport->port_base_vlan_cfg.state = state;
8410 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8411 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8412 	else
8413 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8414 
8415 update:
8416 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8417 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8418 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8419 
8420 	return 0;
8421 }
8422 
8423 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8424 					  enum hnae3_port_base_vlan_state state,
8425 					  u16 vlan)
8426 {
8427 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8428 		if (!vlan)
8429 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8430 		else
8431 			return HNAE3_PORT_BASE_VLAN_ENABLE;
8432 	} else {
8433 		if (!vlan)
8434 			return HNAE3_PORT_BASE_VLAN_DISABLE;
8435 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8436 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8437 		else
8438 			return HNAE3_PORT_BASE_VLAN_MODIFY;
8439 	}
8440 }
8441 
8442 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8443 				    u16 vlan, u8 qos, __be16 proto)
8444 {
8445 	struct hclge_vport *vport = hclge_get_vport(handle);
8446 	struct hclge_dev *hdev = vport->back;
8447 	struct hclge_vlan_info vlan_info;
8448 	u16 state;
8449 	int ret;
8450 
8451 	if (hdev->pdev->revision == 0x20)
8452 		return -EOPNOTSUPP;
8453 
8454 	vport = hclge_get_vf_vport(hdev, vfid);
8455 	if (!vport)
8456 		return -EINVAL;
8457 
8458 	/* qos is a 3 bits value, so can not be bigger than 7 */
8459 	if (vlan > VLAN_N_VID - 1 || qos > 7)
8460 		return -EINVAL;
8461 	if (proto != htons(ETH_P_8021Q))
8462 		return -EPROTONOSUPPORT;
8463 
8464 	state = hclge_get_port_base_vlan_state(vport,
8465 					       vport->port_base_vlan_cfg.state,
8466 					       vlan);
8467 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8468 		return 0;
8469 
8470 	vlan_info.vlan_tag = vlan;
8471 	vlan_info.qos = qos;
8472 	vlan_info.vlan_proto = ntohs(proto);
8473 
8474 	if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8475 		return hclge_update_port_base_vlan_cfg(vport, state,
8476 						       &vlan_info);
8477 	} else {
8478 		ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8479 							vport->vport_id, state,
8480 							vlan, qos,
8481 							ntohs(proto));
8482 		return ret;
8483 	}
8484 }
8485 
8486 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8487 			  u16 vlan_id, bool is_kill)
8488 {
8489 	struct hclge_vport *vport = hclge_get_vport(handle);
8490 	struct hclge_dev *hdev = vport->back;
8491 	bool writen_to_tbl = false;
8492 	int ret = 0;
8493 
8494 	/* When device is resetting, firmware is unable to handle
8495 	 * mailbox. Just record the vlan id, and remove it after
8496 	 * reset finished.
8497 	 */
8498 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8499 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
8500 		return -EBUSY;
8501 	}
8502 
8503 	/* when port base vlan enabled, we use port base vlan as the vlan
8504 	 * filter entry. In this case, we don't update vlan filter table
8505 	 * when user add new vlan or remove exist vlan, just update the vport
8506 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
8507 	 * table until port base vlan disabled
8508 	 */
8509 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8510 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8511 					       vlan_id, is_kill);
8512 		writen_to_tbl = true;
8513 	}
8514 
8515 	if (!ret) {
8516 		if (is_kill)
8517 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
8518 		else
8519 			hclge_add_vport_vlan_table(vport, vlan_id,
8520 						   writen_to_tbl);
8521 	} else if (is_kill) {
8522 		/* when remove hw vlan filter failed, record the vlan id,
8523 		 * and try to remove it from hw later, to be consistence
8524 		 * with stack
8525 		 */
8526 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
8527 	}
8528 	return ret;
8529 }
8530 
8531 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8532 {
8533 #define HCLGE_MAX_SYNC_COUNT	60
8534 
8535 	int i, ret, sync_cnt = 0;
8536 	u16 vlan_id;
8537 
8538 	/* start from vport 1 for PF is always alive */
8539 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8540 		struct hclge_vport *vport = &hdev->vport[i];
8541 
8542 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8543 					 VLAN_N_VID);
8544 		while (vlan_id != VLAN_N_VID) {
8545 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8546 						       vport->vport_id, vlan_id,
8547 						       true);
8548 			if (ret && ret != -EINVAL)
8549 				return;
8550 
8551 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8552 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
8553 
8554 			sync_cnt++;
8555 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8556 				return;
8557 
8558 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8559 						 VLAN_N_VID);
8560 		}
8561 	}
8562 }
8563 
8564 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8565 {
8566 	struct hclge_config_max_frm_size_cmd *req;
8567 	struct hclge_desc desc;
8568 
8569 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8570 
8571 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8572 	req->max_frm_size = cpu_to_le16(new_mps);
8573 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8574 
8575 	return hclge_cmd_send(&hdev->hw, &desc, 1);
8576 }
8577 
8578 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8579 {
8580 	struct hclge_vport *vport = hclge_get_vport(handle);
8581 
8582 	return hclge_set_vport_mtu(vport, new_mtu);
8583 }
8584 
8585 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8586 {
8587 	struct hclge_dev *hdev = vport->back;
8588 	int i, max_frm_size, ret;
8589 
8590 	/* HW supprt 2 layer vlan */
8591 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8592 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8593 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
8594 		return -EINVAL;
8595 
8596 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8597 	mutex_lock(&hdev->vport_lock);
8598 	/* VF's mps must fit within hdev->mps */
8599 	if (vport->vport_id && max_frm_size > hdev->mps) {
8600 		mutex_unlock(&hdev->vport_lock);
8601 		return -EINVAL;
8602 	} else if (vport->vport_id) {
8603 		vport->mps = max_frm_size;
8604 		mutex_unlock(&hdev->vport_lock);
8605 		return 0;
8606 	}
8607 
8608 	/* PF's mps must be greater then VF's mps */
8609 	for (i = 1; i < hdev->num_alloc_vport; i++)
8610 		if (max_frm_size < hdev->vport[i].mps) {
8611 			mutex_unlock(&hdev->vport_lock);
8612 			return -EINVAL;
8613 		}
8614 
8615 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8616 
8617 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
8618 	if (ret) {
8619 		dev_err(&hdev->pdev->dev,
8620 			"Change mtu fail, ret =%d\n", ret);
8621 		goto out;
8622 	}
8623 
8624 	hdev->mps = max_frm_size;
8625 	vport->mps = max_frm_size;
8626 
8627 	ret = hclge_buffer_alloc(hdev);
8628 	if (ret)
8629 		dev_err(&hdev->pdev->dev,
8630 			"Allocate buffer fail, ret =%d\n", ret);
8631 
8632 out:
8633 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8634 	mutex_unlock(&hdev->vport_lock);
8635 	return ret;
8636 }
8637 
8638 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8639 				    bool enable)
8640 {
8641 	struct hclge_reset_tqp_queue_cmd *req;
8642 	struct hclge_desc desc;
8643 	int ret;
8644 
8645 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8646 
8647 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8648 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8649 	if (enable)
8650 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8651 
8652 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8653 	if (ret) {
8654 		dev_err(&hdev->pdev->dev,
8655 			"Send tqp reset cmd error, status =%d\n", ret);
8656 		return ret;
8657 	}
8658 
8659 	return 0;
8660 }
8661 
8662 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8663 {
8664 	struct hclge_reset_tqp_queue_cmd *req;
8665 	struct hclge_desc desc;
8666 	int ret;
8667 
8668 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8669 
8670 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8671 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8672 
8673 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8674 	if (ret) {
8675 		dev_err(&hdev->pdev->dev,
8676 			"Get reset status error, status =%d\n", ret);
8677 		return ret;
8678 	}
8679 
8680 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8681 }
8682 
8683 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8684 {
8685 	struct hnae3_queue *queue;
8686 	struct hclge_tqp *tqp;
8687 
8688 	queue = handle->kinfo.tqp[queue_id];
8689 	tqp = container_of(queue, struct hclge_tqp, q);
8690 
8691 	return tqp->index;
8692 }
8693 
8694 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8695 {
8696 	struct hclge_vport *vport = hclge_get_vport(handle);
8697 	struct hclge_dev *hdev = vport->back;
8698 	int reset_try_times = 0;
8699 	int reset_status;
8700 	u16 queue_gid;
8701 	int ret;
8702 
8703 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8704 
8705 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8706 	if (ret) {
8707 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8708 		return ret;
8709 	}
8710 
8711 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8712 	if (ret) {
8713 		dev_err(&hdev->pdev->dev,
8714 			"Send reset tqp cmd fail, ret = %d\n", ret);
8715 		return ret;
8716 	}
8717 
8718 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8719 		reset_status = hclge_get_reset_status(hdev, queue_gid);
8720 		if (reset_status)
8721 			break;
8722 
8723 		/* Wait for tqp hw reset */
8724 		usleep_range(1000, 1200);
8725 	}
8726 
8727 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8728 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8729 		return ret;
8730 	}
8731 
8732 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8733 	if (ret)
8734 		dev_err(&hdev->pdev->dev,
8735 			"Deassert the soft reset fail, ret = %d\n", ret);
8736 
8737 	return ret;
8738 }
8739 
8740 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8741 {
8742 	struct hclge_dev *hdev = vport->back;
8743 	int reset_try_times = 0;
8744 	int reset_status;
8745 	u16 queue_gid;
8746 	int ret;
8747 
8748 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8749 
8750 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8751 	if (ret) {
8752 		dev_warn(&hdev->pdev->dev,
8753 			 "Send reset tqp cmd fail, ret = %d\n", ret);
8754 		return;
8755 	}
8756 
8757 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8758 		reset_status = hclge_get_reset_status(hdev, queue_gid);
8759 		if (reset_status)
8760 			break;
8761 
8762 		/* Wait for tqp hw reset */
8763 		usleep_range(1000, 1200);
8764 	}
8765 
8766 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8767 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8768 		return;
8769 	}
8770 
8771 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8772 	if (ret)
8773 		dev_warn(&hdev->pdev->dev,
8774 			 "Deassert the soft reset fail, ret = %d\n", ret);
8775 }
8776 
8777 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8778 {
8779 	struct hclge_vport *vport = hclge_get_vport(handle);
8780 	struct hclge_dev *hdev = vport->back;
8781 
8782 	return hdev->fw_version;
8783 }
8784 
8785 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8786 {
8787 	struct phy_device *phydev = hdev->hw.mac.phydev;
8788 
8789 	if (!phydev)
8790 		return;
8791 
8792 	phy_set_asym_pause(phydev, rx_en, tx_en);
8793 }
8794 
8795 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8796 {
8797 	int ret;
8798 
8799 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8800 		return 0;
8801 
8802 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8803 	if (ret)
8804 		dev_err(&hdev->pdev->dev,
8805 			"configure pauseparam error, ret = %d.\n", ret);
8806 
8807 	return ret;
8808 }
8809 
8810 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8811 {
8812 	struct phy_device *phydev = hdev->hw.mac.phydev;
8813 	u16 remote_advertising = 0;
8814 	u16 local_advertising;
8815 	u32 rx_pause, tx_pause;
8816 	u8 flowctl;
8817 
8818 	if (!phydev->link || !phydev->autoneg)
8819 		return 0;
8820 
8821 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8822 
8823 	if (phydev->pause)
8824 		remote_advertising = LPA_PAUSE_CAP;
8825 
8826 	if (phydev->asym_pause)
8827 		remote_advertising |= LPA_PAUSE_ASYM;
8828 
8829 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8830 					   remote_advertising);
8831 	tx_pause = flowctl & FLOW_CTRL_TX;
8832 	rx_pause = flowctl & FLOW_CTRL_RX;
8833 
8834 	if (phydev->duplex == HCLGE_MAC_HALF) {
8835 		tx_pause = 0;
8836 		rx_pause = 0;
8837 	}
8838 
8839 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8840 }
8841 
8842 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8843 				 u32 *rx_en, u32 *tx_en)
8844 {
8845 	struct hclge_vport *vport = hclge_get_vport(handle);
8846 	struct hclge_dev *hdev = vport->back;
8847 	struct phy_device *phydev = hdev->hw.mac.phydev;
8848 
8849 	*auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8850 
8851 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8852 		*rx_en = 0;
8853 		*tx_en = 0;
8854 		return;
8855 	}
8856 
8857 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8858 		*rx_en = 1;
8859 		*tx_en = 0;
8860 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8861 		*tx_en = 1;
8862 		*rx_en = 0;
8863 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8864 		*rx_en = 1;
8865 		*tx_en = 1;
8866 	} else {
8867 		*rx_en = 0;
8868 		*tx_en = 0;
8869 	}
8870 }
8871 
8872 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8873 					 u32 rx_en, u32 tx_en)
8874 {
8875 	if (rx_en && tx_en)
8876 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
8877 	else if (rx_en && !tx_en)
8878 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8879 	else if (!rx_en && tx_en)
8880 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8881 	else
8882 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
8883 
8884 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8885 }
8886 
8887 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8888 				u32 rx_en, u32 tx_en)
8889 {
8890 	struct hclge_vport *vport = hclge_get_vport(handle);
8891 	struct hclge_dev *hdev = vport->back;
8892 	struct phy_device *phydev = hdev->hw.mac.phydev;
8893 	u32 fc_autoneg;
8894 
8895 	if (phydev) {
8896 		fc_autoneg = hclge_get_autoneg(handle);
8897 		if (auto_neg != fc_autoneg) {
8898 			dev_info(&hdev->pdev->dev,
8899 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8900 			return -EOPNOTSUPP;
8901 		}
8902 	}
8903 
8904 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8905 		dev_info(&hdev->pdev->dev,
8906 			 "Priority flow control enabled. Cannot set link flow control.\n");
8907 		return -EOPNOTSUPP;
8908 	}
8909 
8910 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8911 
8912 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8913 
8914 	if (!auto_neg)
8915 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8916 
8917 	if (phydev)
8918 		return phy_start_aneg(phydev);
8919 
8920 	return -EOPNOTSUPP;
8921 }
8922 
8923 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8924 					  u8 *auto_neg, u32 *speed, u8 *duplex)
8925 {
8926 	struct hclge_vport *vport = hclge_get_vport(handle);
8927 	struct hclge_dev *hdev = vport->back;
8928 
8929 	if (speed)
8930 		*speed = hdev->hw.mac.speed;
8931 	if (duplex)
8932 		*duplex = hdev->hw.mac.duplex;
8933 	if (auto_neg)
8934 		*auto_neg = hdev->hw.mac.autoneg;
8935 }
8936 
8937 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8938 				 u8 *module_type)
8939 {
8940 	struct hclge_vport *vport = hclge_get_vport(handle);
8941 	struct hclge_dev *hdev = vport->back;
8942 
8943 	if (media_type)
8944 		*media_type = hdev->hw.mac.media_type;
8945 
8946 	if (module_type)
8947 		*module_type = hdev->hw.mac.module_type;
8948 }
8949 
8950 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8951 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
8952 {
8953 	struct hclge_vport *vport = hclge_get_vport(handle);
8954 	struct hclge_dev *hdev = vport->back;
8955 	struct phy_device *phydev = hdev->hw.mac.phydev;
8956 	int mdix_ctrl, mdix, is_resolved;
8957 	unsigned int retval;
8958 
8959 	if (!phydev) {
8960 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8961 		*tp_mdix = ETH_TP_MDI_INVALID;
8962 		return;
8963 	}
8964 
8965 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8966 
8967 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8968 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8969 				    HCLGE_PHY_MDIX_CTRL_S);
8970 
8971 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8972 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8973 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8974 
8975 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8976 
8977 	switch (mdix_ctrl) {
8978 	case 0x0:
8979 		*tp_mdix_ctrl = ETH_TP_MDI;
8980 		break;
8981 	case 0x1:
8982 		*tp_mdix_ctrl = ETH_TP_MDI_X;
8983 		break;
8984 	case 0x3:
8985 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8986 		break;
8987 	default:
8988 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8989 		break;
8990 	}
8991 
8992 	if (!is_resolved)
8993 		*tp_mdix = ETH_TP_MDI_INVALID;
8994 	else if (mdix)
8995 		*tp_mdix = ETH_TP_MDI_X;
8996 	else
8997 		*tp_mdix = ETH_TP_MDI;
8998 }
8999 
9000 static void hclge_info_show(struct hclge_dev *hdev)
9001 {
9002 	struct device *dev = &hdev->pdev->dev;
9003 
9004 	dev_info(dev, "PF info begin:\n");
9005 
9006 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9007 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9008 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9009 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9010 	dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9011 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9012 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9013 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9014 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9015 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9016 	dev_info(dev, "This is %s PF\n",
9017 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9018 	dev_info(dev, "DCB %s\n",
9019 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9020 	dev_info(dev, "MQPRIO %s\n",
9021 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9022 
9023 	dev_info(dev, "PF info end.\n");
9024 }
9025 
9026 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9027 					  struct hclge_vport *vport)
9028 {
9029 	struct hnae3_client *client = vport->nic.client;
9030 	struct hclge_dev *hdev = ae_dev->priv;
9031 	int rst_cnt = hdev->rst_stats.reset_cnt;
9032 	int ret;
9033 
9034 	ret = client->ops->init_instance(&vport->nic);
9035 	if (ret)
9036 		return ret;
9037 
9038 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9039 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9040 	    rst_cnt != hdev->rst_stats.reset_cnt) {
9041 		ret = -EBUSY;
9042 		goto init_nic_err;
9043 	}
9044 
9045 	/* Enable nic hw error interrupts */
9046 	ret = hclge_config_nic_hw_error(hdev, true);
9047 	if (ret) {
9048 		dev_err(&ae_dev->pdev->dev,
9049 			"fail(%d) to enable hw error interrupts\n", ret);
9050 		goto init_nic_err;
9051 	}
9052 
9053 	hnae3_set_client_init_flag(client, ae_dev, 1);
9054 
9055 	if (netif_msg_drv(&hdev->vport->nic))
9056 		hclge_info_show(hdev);
9057 
9058 	return ret;
9059 
9060 init_nic_err:
9061 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9062 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9063 		msleep(HCLGE_WAIT_RESET_DONE);
9064 
9065 	client->ops->uninit_instance(&vport->nic, 0);
9066 
9067 	return ret;
9068 }
9069 
9070 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9071 					   struct hclge_vport *vport)
9072 {
9073 	struct hnae3_client *client = vport->roce.client;
9074 	struct hclge_dev *hdev = ae_dev->priv;
9075 	int rst_cnt;
9076 	int ret;
9077 
9078 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9079 	    !hdev->nic_client)
9080 		return 0;
9081 
9082 	client = hdev->roce_client;
9083 	ret = hclge_init_roce_base_info(vport);
9084 	if (ret)
9085 		return ret;
9086 
9087 	rst_cnt = hdev->rst_stats.reset_cnt;
9088 	ret = client->ops->init_instance(&vport->roce);
9089 	if (ret)
9090 		return ret;
9091 
9092 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9093 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9094 	    rst_cnt != hdev->rst_stats.reset_cnt) {
9095 		ret = -EBUSY;
9096 		goto init_roce_err;
9097 	}
9098 
9099 	/* Enable roce ras interrupts */
9100 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
9101 	if (ret) {
9102 		dev_err(&ae_dev->pdev->dev,
9103 			"fail(%d) to enable roce ras interrupts\n", ret);
9104 		goto init_roce_err;
9105 	}
9106 
9107 	hnae3_set_client_init_flag(client, ae_dev, 1);
9108 
9109 	return 0;
9110 
9111 init_roce_err:
9112 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9113 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9114 		msleep(HCLGE_WAIT_RESET_DONE);
9115 
9116 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9117 
9118 	return ret;
9119 }
9120 
9121 static int hclge_init_client_instance(struct hnae3_client *client,
9122 				      struct hnae3_ae_dev *ae_dev)
9123 {
9124 	struct hclge_dev *hdev = ae_dev->priv;
9125 	struct hclge_vport *vport;
9126 	int i, ret;
9127 
9128 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9129 		vport = &hdev->vport[i];
9130 
9131 		switch (client->type) {
9132 		case HNAE3_CLIENT_KNIC:
9133 			hdev->nic_client = client;
9134 			vport->nic.client = client;
9135 			ret = hclge_init_nic_client_instance(ae_dev, vport);
9136 			if (ret)
9137 				goto clear_nic;
9138 
9139 			ret = hclge_init_roce_client_instance(ae_dev, vport);
9140 			if (ret)
9141 				goto clear_roce;
9142 
9143 			break;
9144 		case HNAE3_CLIENT_ROCE:
9145 			if (hnae3_dev_roce_supported(hdev)) {
9146 				hdev->roce_client = client;
9147 				vport->roce.client = client;
9148 			}
9149 
9150 			ret = hclge_init_roce_client_instance(ae_dev, vport);
9151 			if (ret)
9152 				goto clear_roce;
9153 
9154 			break;
9155 		default:
9156 			return -EINVAL;
9157 		}
9158 	}
9159 
9160 	return 0;
9161 
9162 clear_nic:
9163 	hdev->nic_client = NULL;
9164 	vport->nic.client = NULL;
9165 	return ret;
9166 clear_roce:
9167 	hdev->roce_client = NULL;
9168 	vport->roce.client = NULL;
9169 	return ret;
9170 }
9171 
9172 static void hclge_uninit_client_instance(struct hnae3_client *client,
9173 					 struct hnae3_ae_dev *ae_dev)
9174 {
9175 	struct hclge_dev *hdev = ae_dev->priv;
9176 	struct hclge_vport *vport;
9177 	int i;
9178 
9179 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9180 		vport = &hdev->vport[i];
9181 		if (hdev->roce_client) {
9182 			clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9183 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9184 				msleep(HCLGE_WAIT_RESET_DONE);
9185 
9186 			hdev->roce_client->ops->uninit_instance(&vport->roce,
9187 								0);
9188 			hdev->roce_client = NULL;
9189 			vport->roce.client = NULL;
9190 		}
9191 		if (client->type == HNAE3_CLIENT_ROCE)
9192 			return;
9193 		if (hdev->nic_client && client->ops->uninit_instance) {
9194 			clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9195 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9196 				msleep(HCLGE_WAIT_RESET_DONE);
9197 
9198 			client->ops->uninit_instance(&vport->nic, 0);
9199 			hdev->nic_client = NULL;
9200 			vport->nic.client = NULL;
9201 		}
9202 	}
9203 }
9204 
9205 static int hclge_pci_init(struct hclge_dev *hdev)
9206 {
9207 	struct pci_dev *pdev = hdev->pdev;
9208 	struct hclge_hw *hw;
9209 	int ret;
9210 
9211 	ret = pci_enable_device(pdev);
9212 	if (ret) {
9213 		dev_err(&pdev->dev, "failed to enable PCI device\n");
9214 		return ret;
9215 	}
9216 
9217 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9218 	if (ret) {
9219 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9220 		if (ret) {
9221 			dev_err(&pdev->dev,
9222 				"can't set consistent PCI DMA");
9223 			goto err_disable_device;
9224 		}
9225 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9226 	}
9227 
9228 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9229 	if (ret) {
9230 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9231 		goto err_disable_device;
9232 	}
9233 
9234 	pci_set_master(pdev);
9235 	hw = &hdev->hw;
9236 	hw->io_base = pcim_iomap(pdev, 2, 0);
9237 	if (!hw->io_base) {
9238 		dev_err(&pdev->dev, "Can't map configuration register space\n");
9239 		ret = -ENOMEM;
9240 		goto err_clr_master;
9241 	}
9242 
9243 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9244 
9245 	return 0;
9246 err_clr_master:
9247 	pci_clear_master(pdev);
9248 	pci_release_regions(pdev);
9249 err_disable_device:
9250 	pci_disable_device(pdev);
9251 
9252 	return ret;
9253 }
9254 
9255 static void hclge_pci_uninit(struct hclge_dev *hdev)
9256 {
9257 	struct pci_dev *pdev = hdev->pdev;
9258 
9259 	pcim_iounmap(pdev, hdev->hw.io_base);
9260 	pci_free_irq_vectors(pdev);
9261 	pci_clear_master(pdev);
9262 	pci_release_mem_regions(pdev);
9263 	pci_disable_device(pdev);
9264 }
9265 
9266 static void hclge_state_init(struct hclge_dev *hdev)
9267 {
9268 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9269 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9270 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9271 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9272 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9273 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9274 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9275 }
9276 
9277 static void hclge_state_uninit(struct hclge_dev *hdev)
9278 {
9279 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9280 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9281 
9282 	if (hdev->reset_timer.function)
9283 		del_timer_sync(&hdev->reset_timer);
9284 	if (hdev->service_task.work.func)
9285 		cancel_delayed_work_sync(&hdev->service_task);
9286 }
9287 
9288 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9289 {
9290 #define HCLGE_FLR_RETRY_WAIT_MS	500
9291 #define HCLGE_FLR_RETRY_CNT	5
9292 
9293 	struct hclge_dev *hdev = ae_dev->priv;
9294 	int retry_cnt = 0;
9295 	int ret;
9296 
9297 retry:
9298 	down(&hdev->reset_sem);
9299 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9300 	hdev->reset_type = HNAE3_FLR_RESET;
9301 	ret = hclge_reset_prepare(hdev);
9302 	if (ret) {
9303 		dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9304 			ret);
9305 		if (hdev->reset_pending ||
9306 		    retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9307 			dev_err(&hdev->pdev->dev,
9308 				"reset_pending:0x%lx, retry_cnt:%d\n",
9309 				hdev->reset_pending, retry_cnt);
9310 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9311 			up(&hdev->reset_sem);
9312 			msleep(HCLGE_FLR_RETRY_WAIT_MS);
9313 			goto retry;
9314 		}
9315 	}
9316 
9317 	/* disable misc vector before FLR done */
9318 	hclge_enable_vector(&hdev->misc_vector, false);
9319 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9320 	hdev->rst_stats.flr_rst_cnt++;
9321 }
9322 
9323 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9324 {
9325 	struct hclge_dev *hdev = ae_dev->priv;
9326 	int ret;
9327 
9328 	hclge_enable_vector(&hdev->misc_vector, true);
9329 
9330 	ret = hclge_reset_rebuild(hdev);
9331 	if (ret)
9332 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9333 
9334 	hdev->reset_type = HNAE3_NONE_RESET;
9335 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9336 	up(&hdev->reset_sem);
9337 }
9338 
9339 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9340 {
9341 	u16 i;
9342 
9343 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9344 		struct hclge_vport *vport = &hdev->vport[i];
9345 		int ret;
9346 
9347 		 /* Send cmd to clear VF's FUNC_RST_ING */
9348 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9349 		if (ret)
9350 			dev_warn(&hdev->pdev->dev,
9351 				 "clear vf(%u) rst failed %d!\n",
9352 				 vport->vport_id, ret);
9353 	}
9354 }
9355 
9356 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9357 {
9358 	struct pci_dev *pdev = ae_dev->pdev;
9359 	struct hclge_dev *hdev;
9360 	int ret;
9361 
9362 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9363 	if (!hdev) {
9364 		ret = -ENOMEM;
9365 		goto out;
9366 	}
9367 
9368 	hdev->pdev = pdev;
9369 	hdev->ae_dev = ae_dev;
9370 	hdev->reset_type = HNAE3_NONE_RESET;
9371 	hdev->reset_level = HNAE3_FUNC_RESET;
9372 	ae_dev->priv = hdev;
9373 
9374 	/* HW supprt 2 layer vlan */
9375 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9376 
9377 	mutex_init(&hdev->vport_lock);
9378 	spin_lock_init(&hdev->fd_rule_lock);
9379 	sema_init(&hdev->reset_sem, 1);
9380 
9381 	ret = hclge_pci_init(hdev);
9382 	if (ret)
9383 		goto out;
9384 
9385 	/* Firmware command queue initialize */
9386 	ret = hclge_cmd_queue_init(hdev);
9387 	if (ret)
9388 		goto err_pci_uninit;
9389 
9390 	/* Firmware command initialize */
9391 	ret = hclge_cmd_init(hdev);
9392 	if (ret)
9393 		goto err_cmd_uninit;
9394 
9395 	ret = hclge_get_cap(hdev);
9396 	if (ret)
9397 		goto err_cmd_uninit;
9398 
9399 	ret = hclge_configure(hdev);
9400 	if (ret) {
9401 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9402 		goto err_cmd_uninit;
9403 	}
9404 
9405 	ret = hclge_init_msi(hdev);
9406 	if (ret) {
9407 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9408 		goto err_cmd_uninit;
9409 	}
9410 
9411 	ret = hclge_misc_irq_init(hdev);
9412 	if (ret)
9413 		goto err_msi_uninit;
9414 
9415 	ret = hclge_alloc_tqps(hdev);
9416 	if (ret) {
9417 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9418 		goto err_msi_irq_uninit;
9419 	}
9420 
9421 	ret = hclge_alloc_vport(hdev);
9422 	if (ret)
9423 		goto err_msi_irq_uninit;
9424 
9425 	ret = hclge_map_tqp(hdev);
9426 	if (ret)
9427 		goto err_msi_irq_uninit;
9428 
9429 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9430 		ret = hclge_mac_mdio_config(hdev);
9431 		if (ret)
9432 			goto err_msi_irq_uninit;
9433 	}
9434 
9435 	ret = hclge_init_umv_space(hdev);
9436 	if (ret)
9437 		goto err_mdiobus_unreg;
9438 
9439 	ret = hclge_mac_init(hdev);
9440 	if (ret) {
9441 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9442 		goto err_mdiobus_unreg;
9443 	}
9444 
9445 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9446 	if (ret) {
9447 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9448 		goto err_mdiobus_unreg;
9449 	}
9450 
9451 	ret = hclge_config_gro(hdev, true);
9452 	if (ret)
9453 		goto err_mdiobus_unreg;
9454 
9455 	ret = hclge_init_vlan_config(hdev);
9456 	if (ret) {
9457 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9458 		goto err_mdiobus_unreg;
9459 	}
9460 
9461 	ret = hclge_tm_schd_init(hdev);
9462 	if (ret) {
9463 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9464 		goto err_mdiobus_unreg;
9465 	}
9466 
9467 	hclge_rss_init_cfg(hdev);
9468 	ret = hclge_rss_init_hw(hdev);
9469 	if (ret) {
9470 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9471 		goto err_mdiobus_unreg;
9472 	}
9473 
9474 	ret = init_mgr_tbl(hdev);
9475 	if (ret) {
9476 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9477 		goto err_mdiobus_unreg;
9478 	}
9479 
9480 	ret = hclge_init_fd_config(hdev);
9481 	if (ret) {
9482 		dev_err(&pdev->dev,
9483 			"fd table init fail, ret=%d\n", ret);
9484 		goto err_mdiobus_unreg;
9485 	}
9486 
9487 	INIT_KFIFO(hdev->mac_tnl_log);
9488 
9489 	hclge_dcb_ops_set(hdev);
9490 
9491 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9492 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9493 
9494 	/* Setup affinity after service timer setup because add_timer_on
9495 	 * is called in affinity notify.
9496 	 */
9497 	hclge_misc_affinity_setup(hdev);
9498 
9499 	hclge_clear_all_event_cause(hdev);
9500 	hclge_clear_resetting_state(hdev);
9501 
9502 	/* Log and clear the hw errors those already occurred */
9503 	hclge_handle_all_hns_hw_errors(ae_dev);
9504 
9505 	/* request delayed reset for the error recovery because an immediate
9506 	 * global reset on a PF affecting pending initialization of other PFs
9507 	 */
9508 	if (ae_dev->hw_err_reset_req) {
9509 		enum hnae3_reset_type reset_level;
9510 
9511 		reset_level = hclge_get_reset_level(ae_dev,
9512 						    &ae_dev->hw_err_reset_req);
9513 		hclge_set_def_reset_request(ae_dev, reset_level);
9514 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9515 	}
9516 
9517 	/* Enable MISC vector(vector0) */
9518 	hclge_enable_vector(&hdev->misc_vector, true);
9519 
9520 	hclge_state_init(hdev);
9521 	hdev->last_reset_time = jiffies;
9522 
9523 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9524 		 HCLGE_DRIVER_NAME);
9525 
9526 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
9527 
9528 	return 0;
9529 
9530 err_mdiobus_unreg:
9531 	if (hdev->hw.mac.phydev)
9532 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
9533 err_msi_irq_uninit:
9534 	hclge_misc_irq_uninit(hdev);
9535 err_msi_uninit:
9536 	pci_free_irq_vectors(pdev);
9537 err_cmd_uninit:
9538 	hclge_cmd_uninit(hdev);
9539 err_pci_uninit:
9540 	pcim_iounmap(pdev, hdev->hw.io_base);
9541 	pci_clear_master(pdev);
9542 	pci_release_regions(pdev);
9543 	pci_disable_device(pdev);
9544 out:
9545 	return ret;
9546 }
9547 
9548 static void hclge_stats_clear(struct hclge_dev *hdev)
9549 {
9550 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
9551 }
9552 
9553 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9554 {
9555 	return hclge_config_switch_param(hdev, vf, enable,
9556 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
9557 }
9558 
9559 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9560 {
9561 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9562 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
9563 					  enable, vf);
9564 }
9565 
9566 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9567 {
9568 	int ret;
9569 
9570 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9571 	if (ret) {
9572 		dev_err(&hdev->pdev->dev,
9573 			"Set vf %d mac spoof check %s failed, ret=%d\n",
9574 			vf, enable ? "on" : "off", ret);
9575 		return ret;
9576 	}
9577 
9578 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9579 	if (ret)
9580 		dev_err(&hdev->pdev->dev,
9581 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
9582 			vf, enable ? "on" : "off", ret);
9583 
9584 	return ret;
9585 }
9586 
9587 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9588 				 bool enable)
9589 {
9590 	struct hclge_vport *vport = hclge_get_vport(handle);
9591 	struct hclge_dev *hdev = vport->back;
9592 	u32 new_spoofchk = enable ? 1 : 0;
9593 	int ret;
9594 
9595 	if (hdev->pdev->revision == 0x20)
9596 		return -EOPNOTSUPP;
9597 
9598 	vport = hclge_get_vf_vport(hdev, vf);
9599 	if (!vport)
9600 		return -EINVAL;
9601 
9602 	if (vport->vf_info.spoofchk == new_spoofchk)
9603 		return 0;
9604 
9605 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9606 		dev_warn(&hdev->pdev->dev,
9607 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9608 			 vf);
9609 	else if (enable && hclge_is_umv_space_full(vport))
9610 		dev_warn(&hdev->pdev->dev,
9611 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9612 			 vf);
9613 
9614 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9615 	if (ret)
9616 		return ret;
9617 
9618 	vport->vf_info.spoofchk = new_spoofchk;
9619 	return 0;
9620 }
9621 
9622 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9623 {
9624 	struct hclge_vport *vport = hdev->vport;
9625 	int ret;
9626 	int i;
9627 
9628 	if (hdev->pdev->revision == 0x20)
9629 		return 0;
9630 
9631 	/* resume the vf spoof check state after reset */
9632 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9633 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9634 					       vport->vf_info.spoofchk);
9635 		if (ret)
9636 			return ret;
9637 
9638 		vport++;
9639 	}
9640 
9641 	return 0;
9642 }
9643 
9644 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9645 {
9646 	struct hclge_vport *vport = hclge_get_vport(handle);
9647 	struct hclge_dev *hdev = vport->back;
9648 	u32 new_trusted = enable ? 1 : 0;
9649 	bool en_bc_pmc;
9650 	int ret;
9651 
9652 	vport = hclge_get_vf_vport(hdev, vf);
9653 	if (!vport)
9654 		return -EINVAL;
9655 
9656 	if (vport->vf_info.trusted == new_trusted)
9657 		return 0;
9658 
9659 	/* Disable promisc mode for VF if it is not trusted any more. */
9660 	if (!enable && vport->vf_info.promisc_enable) {
9661 		en_bc_pmc = hdev->pdev->revision != 0x20;
9662 		ret = hclge_set_vport_promisc_mode(vport, false, false,
9663 						   en_bc_pmc);
9664 		if (ret)
9665 			return ret;
9666 		vport->vf_info.promisc_enable = 0;
9667 		hclge_inform_vf_promisc_info(vport);
9668 	}
9669 
9670 	vport->vf_info.trusted = new_trusted;
9671 
9672 	return 0;
9673 }
9674 
9675 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9676 {
9677 	int ret;
9678 	int vf;
9679 
9680 	/* reset vf rate to default value */
9681 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9682 		struct hclge_vport *vport = &hdev->vport[vf];
9683 
9684 		vport->vf_info.max_tx_rate = 0;
9685 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9686 		if (ret)
9687 			dev_err(&hdev->pdev->dev,
9688 				"vf%d failed to reset to default, ret=%d\n",
9689 				vf - HCLGE_VF_VPORT_START_NUM, ret);
9690 	}
9691 }
9692 
9693 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9694 				     int min_tx_rate, int max_tx_rate)
9695 {
9696 	if (min_tx_rate != 0 ||
9697 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9698 		dev_err(&hdev->pdev->dev,
9699 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9700 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9701 		return -EINVAL;
9702 	}
9703 
9704 	return 0;
9705 }
9706 
9707 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9708 			     int min_tx_rate, int max_tx_rate, bool force)
9709 {
9710 	struct hclge_vport *vport = hclge_get_vport(handle);
9711 	struct hclge_dev *hdev = vport->back;
9712 	int ret;
9713 
9714 	ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9715 	if (ret)
9716 		return ret;
9717 
9718 	vport = hclge_get_vf_vport(hdev, vf);
9719 	if (!vport)
9720 		return -EINVAL;
9721 
9722 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9723 		return 0;
9724 
9725 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9726 	if (ret)
9727 		return ret;
9728 
9729 	vport->vf_info.max_tx_rate = max_tx_rate;
9730 
9731 	return 0;
9732 }
9733 
9734 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9735 {
9736 	struct hnae3_handle *handle = &hdev->vport->nic;
9737 	struct hclge_vport *vport;
9738 	int ret;
9739 	int vf;
9740 
9741 	/* resume the vf max_tx_rate after reset */
9742 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9743 		vport = hclge_get_vf_vport(hdev, vf);
9744 		if (!vport)
9745 			return -EINVAL;
9746 
9747 		/* zero means max rate, after reset, firmware already set it to
9748 		 * max rate, so just continue.
9749 		 */
9750 		if (!vport->vf_info.max_tx_rate)
9751 			continue;
9752 
9753 		ret = hclge_set_vf_rate(handle, vf, 0,
9754 					vport->vf_info.max_tx_rate, true);
9755 		if (ret) {
9756 			dev_err(&hdev->pdev->dev,
9757 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
9758 				vf, vport->vf_info.max_tx_rate, ret);
9759 			return ret;
9760 		}
9761 	}
9762 
9763 	return 0;
9764 }
9765 
9766 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9767 {
9768 	struct hclge_vport *vport = hdev->vport;
9769 	int i;
9770 
9771 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9772 		hclge_vport_stop(vport);
9773 		vport++;
9774 	}
9775 }
9776 
9777 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9778 {
9779 	struct hclge_dev *hdev = ae_dev->priv;
9780 	struct pci_dev *pdev = ae_dev->pdev;
9781 	int ret;
9782 
9783 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9784 
9785 	hclge_stats_clear(hdev);
9786 	memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9787 	memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9788 
9789 	ret = hclge_cmd_init(hdev);
9790 	if (ret) {
9791 		dev_err(&pdev->dev, "Cmd queue init failed\n");
9792 		return ret;
9793 	}
9794 
9795 	ret = hclge_map_tqp(hdev);
9796 	if (ret) {
9797 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9798 		return ret;
9799 	}
9800 
9801 	hclge_reset_umv_space(hdev);
9802 
9803 	ret = hclge_mac_init(hdev);
9804 	if (ret) {
9805 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9806 		return ret;
9807 	}
9808 
9809 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9810 	if (ret) {
9811 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9812 		return ret;
9813 	}
9814 
9815 	ret = hclge_config_gro(hdev, true);
9816 	if (ret)
9817 		return ret;
9818 
9819 	ret = hclge_init_vlan_config(hdev);
9820 	if (ret) {
9821 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9822 		return ret;
9823 	}
9824 
9825 	ret = hclge_tm_init_hw(hdev, true);
9826 	if (ret) {
9827 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9828 		return ret;
9829 	}
9830 
9831 	ret = hclge_rss_init_hw(hdev);
9832 	if (ret) {
9833 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9834 		return ret;
9835 	}
9836 
9837 	ret = hclge_init_fd_config(hdev);
9838 	if (ret) {
9839 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9840 		return ret;
9841 	}
9842 
9843 	/* Log and clear the hw errors those already occurred */
9844 	hclge_handle_all_hns_hw_errors(ae_dev);
9845 
9846 	/* Re-enable the hw error interrupts because
9847 	 * the interrupts get disabled on global reset.
9848 	 */
9849 	ret = hclge_config_nic_hw_error(hdev, true);
9850 	if (ret) {
9851 		dev_err(&pdev->dev,
9852 			"fail(%d) to re-enable NIC hw error interrupts\n",
9853 			ret);
9854 		return ret;
9855 	}
9856 
9857 	if (hdev->roce_client) {
9858 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
9859 		if (ret) {
9860 			dev_err(&pdev->dev,
9861 				"fail(%d) to re-enable roce ras interrupts\n",
9862 				ret);
9863 			return ret;
9864 		}
9865 	}
9866 
9867 	hclge_reset_vport_state(hdev);
9868 	ret = hclge_reset_vport_spoofchk(hdev);
9869 	if (ret)
9870 		return ret;
9871 
9872 	ret = hclge_resume_vf_rate(hdev);
9873 	if (ret)
9874 		return ret;
9875 
9876 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9877 		 HCLGE_DRIVER_NAME);
9878 
9879 	return 0;
9880 }
9881 
9882 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9883 {
9884 	struct hclge_dev *hdev = ae_dev->priv;
9885 	struct hclge_mac *mac = &hdev->hw.mac;
9886 
9887 	hclge_reset_vf_rate(hdev);
9888 	hclge_misc_affinity_teardown(hdev);
9889 	hclge_state_uninit(hdev);
9890 
9891 	if (mac->phydev)
9892 		mdiobus_unregister(mac->mdio_bus);
9893 
9894 	hclge_uninit_umv_space(hdev);
9895 
9896 	/* Disable MISC vector(vector0) */
9897 	hclge_enable_vector(&hdev->misc_vector, false);
9898 	synchronize_irq(hdev->misc_vector.vector_irq);
9899 
9900 	/* Disable all hw interrupts */
9901 	hclge_config_mac_tnl_int(hdev, false);
9902 	hclge_config_nic_hw_error(hdev, false);
9903 	hclge_config_rocee_ras_interrupt(hdev, false);
9904 
9905 	hclge_cmd_uninit(hdev);
9906 	hclge_misc_irq_uninit(hdev);
9907 	hclge_pci_uninit(hdev);
9908 	mutex_destroy(&hdev->vport_lock);
9909 	hclge_uninit_vport_mac_table(hdev);
9910 	hclge_uninit_vport_vlan_table(hdev);
9911 	ae_dev->priv = NULL;
9912 }
9913 
9914 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9915 {
9916 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9917 	struct hclge_vport *vport = hclge_get_vport(handle);
9918 	struct hclge_dev *hdev = vport->back;
9919 
9920 	return min_t(u32, hdev->rss_size_max,
9921 		     vport->alloc_tqps / kinfo->num_tc);
9922 }
9923 
9924 static void hclge_get_channels(struct hnae3_handle *handle,
9925 			       struct ethtool_channels *ch)
9926 {
9927 	ch->max_combined = hclge_get_max_channels(handle);
9928 	ch->other_count = 1;
9929 	ch->max_other = 1;
9930 	ch->combined_count = handle->kinfo.rss_size;
9931 }
9932 
9933 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9934 					u16 *alloc_tqps, u16 *max_rss_size)
9935 {
9936 	struct hclge_vport *vport = hclge_get_vport(handle);
9937 	struct hclge_dev *hdev = vport->back;
9938 
9939 	*alloc_tqps = vport->alloc_tqps;
9940 	*max_rss_size = hdev->rss_size_max;
9941 }
9942 
9943 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9944 			      bool rxfh_configured)
9945 {
9946 	struct hclge_vport *vport = hclge_get_vport(handle);
9947 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9948 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9949 	struct hclge_dev *hdev = vport->back;
9950 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9951 	u16 cur_rss_size = kinfo->rss_size;
9952 	u16 cur_tqps = kinfo->num_tqps;
9953 	u16 tc_valid[HCLGE_MAX_TC_NUM];
9954 	u16 roundup_size;
9955 	u32 *rss_indir;
9956 	unsigned int i;
9957 	int ret;
9958 
9959 	kinfo->req_rss_size = new_tqps_num;
9960 
9961 	ret = hclge_tm_vport_map_update(hdev);
9962 	if (ret) {
9963 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9964 		return ret;
9965 	}
9966 
9967 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
9968 	roundup_size = ilog2(roundup_size);
9969 	/* Set the RSS TC mode according to the new RSS size */
9970 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9971 		tc_valid[i] = 0;
9972 
9973 		if (!(hdev->hw_tc_map & BIT(i)))
9974 			continue;
9975 
9976 		tc_valid[i] = 1;
9977 		tc_size[i] = roundup_size;
9978 		tc_offset[i] = kinfo->rss_size * i;
9979 	}
9980 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9981 	if (ret)
9982 		return ret;
9983 
9984 	/* RSS indirection table has been configuared by user */
9985 	if (rxfh_configured)
9986 		goto out;
9987 
9988 	/* Reinitializes the rss indirect table according to the new RSS size */
9989 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9990 	if (!rss_indir)
9991 		return -ENOMEM;
9992 
9993 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9994 		rss_indir[i] = i % kinfo->rss_size;
9995 
9996 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9997 	if (ret)
9998 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9999 			ret);
10000 
10001 	kfree(rss_indir);
10002 
10003 out:
10004 	if (!ret)
10005 		dev_info(&hdev->pdev->dev,
10006 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10007 			 cur_rss_size, kinfo->rss_size,
10008 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10009 
10010 	return ret;
10011 }
10012 
10013 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10014 			      u32 *regs_num_64_bit)
10015 {
10016 	struct hclge_desc desc;
10017 	u32 total_num;
10018 	int ret;
10019 
10020 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10021 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10022 	if (ret) {
10023 		dev_err(&hdev->pdev->dev,
10024 			"Query register number cmd failed, ret = %d.\n", ret);
10025 		return ret;
10026 	}
10027 
10028 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
10029 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
10030 
10031 	total_num = *regs_num_32_bit + *regs_num_64_bit;
10032 	if (!total_num)
10033 		return -EINVAL;
10034 
10035 	return 0;
10036 }
10037 
10038 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10039 				 void *data)
10040 {
10041 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10042 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10043 
10044 	struct hclge_desc *desc;
10045 	u32 *reg_val = data;
10046 	__le32 *desc_data;
10047 	int nodata_num;
10048 	int cmd_num;
10049 	int i, k, n;
10050 	int ret;
10051 
10052 	if (regs_num == 0)
10053 		return 0;
10054 
10055 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10056 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10057 			       HCLGE_32_BIT_REG_RTN_DATANUM);
10058 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10059 	if (!desc)
10060 		return -ENOMEM;
10061 
10062 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10063 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10064 	if (ret) {
10065 		dev_err(&hdev->pdev->dev,
10066 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
10067 		kfree(desc);
10068 		return ret;
10069 	}
10070 
10071 	for (i = 0; i < cmd_num; i++) {
10072 		if (i == 0) {
10073 			desc_data = (__le32 *)(&desc[i].data[0]);
10074 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10075 		} else {
10076 			desc_data = (__le32 *)(&desc[i]);
10077 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
10078 		}
10079 		for (k = 0; k < n; k++) {
10080 			*reg_val++ = le32_to_cpu(*desc_data++);
10081 
10082 			regs_num--;
10083 			if (!regs_num)
10084 				break;
10085 		}
10086 	}
10087 
10088 	kfree(desc);
10089 	return 0;
10090 }
10091 
10092 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10093 				 void *data)
10094 {
10095 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10096 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10097 
10098 	struct hclge_desc *desc;
10099 	u64 *reg_val = data;
10100 	__le64 *desc_data;
10101 	int nodata_len;
10102 	int cmd_num;
10103 	int i, k, n;
10104 	int ret;
10105 
10106 	if (regs_num == 0)
10107 		return 0;
10108 
10109 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10110 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10111 			       HCLGE_64_BIT_REG_RTN_DATANUM);
10112 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10113 	if (!desc)
10114 		return -ENOMEM;
10115 
10116 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10117 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10118 	if (ret) {
10119 		dev_err(&hdev->pdev->dev,
10120 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
10121 		kfree(desc);
10122 		return ret;
10123 	}
10124 
10125 	for (i = 0; i < cmd_num; i++) {
10126 		if (i == 0) {
10127 			desc_data = (__le64 *)(&desc[i].data[0]);
10128 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10129 		} else {
10130 			desc_data = (__le64 *)(&desc[i]);
10131 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
10132 		}
10133 		for (k = 0; k < n; k++) {
10134 			*reg_val++ = le64_to_cpu(*desc_data++);
10135 
10136 			regs_num--;
10137 			if (!regs_num)
10138 				break;
10139 		}
10140 	}
10141 
10142 	kfree(desc);
10143 	return 0;
10144 }
10145 
10146 #define MAX_SEPARATE_NUM	4
10147 #define SEPARATOR_VALUE		0xFDFCFBFA
10148 #define REG_NUM_PER_LINE	4
10149 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
10150 #define REG_SEPARATOR_LINE	1
10151 #define REG_NUM_REMAIN_MASK	3
10152 #define BD_LIST_MAX_NUM		30
10153 
10154 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10155 {
10156 	/*prepare 4 commands to query DFX BD number*/
10157 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10158 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10159 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10160 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10161 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10162 	desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10163 	hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10164 
10165 	return hclge_cmd_send(&hdev->hw, desc, 4);
10166 }
10167 
10168 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10169 				    int *bd_num_list,
10170 				    u32 type_num)
10171 {
10172 	u32 entries_per_desc, desc_index, index, offset, i;
10173 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10174 	int ret;
10175 
10176 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
10177 	if (ret) {
10178 		dev_err(&hdev->pdev->dev,
10179 			"Get dfx bd num fail, status is %d.\n", ret);
10180 		return ret;
10181 	}
10182 
10183 	entries_per_desc = ARRAY_SIZE(desc[0].data);
10184 	for (i = 0; i < type_num; i++) {
10185 		offset = hclge_dfx_bd_offset_list[i];
10186 		index = offset % entries_per_desc;
10187 		desc_index = offset / entries_per_desc;
10188 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10189 	}
10190 
10191 	return ret;
10192 }
10193 
10194 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10195 				  struct hclge_desc *desc_src, int bd_num,
10196 				  enum hclge_opcode_type cmd)
10197 {
10198 	struct hclge_desc *desc = desc_src;
10199 	int i, ret;
10200 
10201 	hclge_cmd_setup_basic_desc(desc, cmd, true);
10202 	for (i = 0; i < bd_num - 1; i++) {
10203 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10204 		desc++;
10205 		hclge_cmd_setup_basic_desc(desc, cmd, true);
10206 	}
10207 
10208 	desc = desc_src;
10209 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10210 	if (ret)
10211 		dev_err(&hdev->pdev->dev,
10212 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10213 			cmd, ret);
10214 
10215 	return ret;
10216 }
10217 
10218 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10219 				    void *data)
10220 {
10221 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10222 	struct hclge_desc *desc = desc_src;
10223 	u32 *reg = data;
10224 
10225 	entries_per_desc = ARRAY_SIZE(desc->data);
10226 	reg_num = entries_per_desc * bd_num;
10227 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10228 	for (i = 0; i < reg_num; i++) {
10229 		index = i % entries_per_desc;
10230 		desc_index = i / entries_per_desc;
10231 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
10232 	}
10233 	for (i = 0; i < separator_num; i++)
10234 		*reg++ = SEPARATOR_VALUE;
10235 
10236 	return reg_num + separator_num;
10237 }
10238 
10239 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10240 {
10241 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10242 	int data_len_per_desc, data_len, bd_num, i;
10243 	int bd_num_list[BD_LIST_MAX_NUM];
10244 	int ret;
10245 
10246 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10247 	if (ret) {
10248 		dev_err(&hdev->pdev->dev,
10249 			"Get dfx reg bd num fail, status is %d.\n", ret);
10250 		return ret;
10251 	}
10252 
10253 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
10254 	*len = 0;
10255 	for (i = 0; i < dfx_reg_type_num; i++) {
10256 		bd_num = bd_num_list[i];
10257 		data_len = data_len_per_desc * bd_num;
10258 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10259 	}
10260 
10261 	return ret;
10262 }
10263 
10264 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10265 {
10266 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10267 	int bd_num, bd_num_max, buf_len, i;
10268 	int bd_num_list[BD_LIST_MAX_NUM];
10269 	struct hclge_desc *desc_src;
10270 	u32 *reg = data;
10271 	int ret;
10272 
10273 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10274 	if (ret) {
10275 		dev_err(&hdev->pdev->dev,
10276 			"Get dfx reg bd num fail, status is %d.\n", ret);
10277 		return ret;
10278 	}
10279 
10280 	bd_num_max = bd_num_list[0];
10281 	for (i = 1; i < dfx_reg_type_num; i++)
10282 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10283 
10284 	buf_len = sizeof(*desc_src) * bd_num_max;
10285 	desc_src = kzalloc(buf_len, GFP_KERNEL);
10286 	if (!desc_src)
10287 		return -ENOMEM;
10288 
10289 	for (i = 0; i < dfx_reg_type_num; i++) {
10290 		bd_num = bd_num_list[i];
10291 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10292 					     hclge_dfx_reg_opcode_list[i]);
10293 		if (ret) {
10294 			dev_err(&hdev->pdev->dev,
10295 				"Get dfx reg fail, status is %d.\n", ret);
10296 			break;
10297 		}
10298 
10299 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10300 	}
10301 
10302 	kfree(desc_src);
10303 	return ret;
10304 }
10305 
10306 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10307 			      struct hnae3_knic_private_info *kinfo)
10308 {
10309 #define HCLGE_RING_REG_OFFSET		0x200
10310 #define HCLGE_RING_INT_REG_OFFSET	0x4
10311 
10312 	int i, j, reg_num, separator_num;
10313 	int data_num_sum;
10314 	u32 *reg = data;
10315 
10316 	/* fetching per-PF registers valus from PF PCIe register space */
10317 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10318 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10319 	for (i = 0; i < reg_num; i++)
10320 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10321 	for (i = 0; i < separator_num; i++)
10322 		*reg++ = SEPARATOR_VALUE;
10323 	data_num_sum = reg_num + separator_num;
10324 
10325 	reg_num = ARRAY_SIZE(common_reg_addr_list);
10326 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10327 	for (i = 0; i < reg_num; i++)
10328 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10329 	for (i = 0; i < separator_num; i++)
10330 		*reg++ = SEPARATOR_VALUE;
10331 	data_num_sum += reg_num + separator_num;
10332 
10333 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
10334 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10335 	for (j = 0; j < kinfo->num_tqps; j++) {
10336 		for (i = 0; i < reg_num; i++)
10337 			*reg++ = hclge_read_dev(&hdev->hw,
10338 						ring_reg_addr_list[i] +
10339 						HCLGE_RING_REG_OFFSET * j);
10340 		for (i = 0; i < separator_num; i++)
10341 			*reg++ = SEPARATOR_VALUE;
10342 	}
10343 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10344 
10345 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10346 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10347 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
10348 		for (i = 0; i < reg_num; i++)
10349 			*reg++ = hclge_read_dev(&hdev->hw,
10350 						tqp_intr_reg_addr_list[i] +
10351 						HCLGE_RING_INT_REG_OFFSET * j);
10352 		for (i = 0; i < separator_num; i++)
10353 			*reg++ = SEPARATOR_VALUE;
10354 	}
10355 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10356 
10357 	return data_num_sum;
10358 }
10359 
10360 static int hclge_get_regs_len(struct hnae3_handle *handle)
10361 {
10362 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10363 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10364 	struct hclge_vport *vport = hclge_get_vport(handle);
10365 	struct hclge_dev *hdev = vport->back;
10366 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10367 	int regs_lines_32_bit, regs_lines_64_bit;
10368 	int ret;
10369 
10370 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10371 	if (ret) {
10372 		dev_err(&hdev->pdev->dev,
10373 			"Get register number failed, ret = %d.\n", ret);
10374 		return ret;
10375 	}
10376 
10377 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10378 	if (ret) {
10379 		dev_err(&hdev->pdev->dev,
10380 			"Get dfx reg len failed, ret = %d.\n", ret);
10381 		return ret;
10382 	}
10383 
10384 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10385 		REG_SEPARATOR_LINE;
10386 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10387 		REG_SEPARATOR_LINE;
10388 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10389 		REG_SEPARATOR_LINE;
10390 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10391 		REG_SEPARATOR_LINE;
10392 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10393 		REG_SEPARATOR_LINE;
10394 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10395 		REG_SEPARATOR_LINE;
10396 
10397 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10398 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10399 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10400 }
10401 
10402 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10403 			   void *data)
10404 {
10405 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10406 	struct hclge_vport *vport = hclge_get_vport(handle);
10407 	struct hclge_dev *hdev = vport->back;
10408 	u32 regs_num_32_bit, regs_num_64_bit;
10409 	int i, reg_num, separator_num, ret;
10410 	u32 *reg = data;
10411 
10412 	*version = hdev->fw_version;
10413 
10414 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10415 	if (ret) {
10416 		dev_err(&hdev->pdev->dev,
10417 			"Get register number failed, ret = %d.\n", ret);
10418 		return;
10419 	}
10420 
10421 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10422 
10423 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10424 	if (ret) {
10425 		dev_err(&hdev->pdev->dev,
10426 			"Get 32 bit register failed, ret = %d.\n", ret);
10427 		return;
10428 	}
10429 	reg_num = regs_num_32_bit;
10430 	reg += reg_num;
10431 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10432 	for (i = 0; i < separator_num; i++)
10433 		*reg++ = SEPARATOR_VALUE;
10434 
10435 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10436 	if (ret) {
10437 		dev_err(&hdev->pdev->dev,
10438 			"Get 64 bit register failed, ret = %d.\n", ret);
10439 		return;
10440 	}
10441 	reg_num = regs_num_64_bit * 2;
10442 	reg += reg_num;
10443 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10444 	for (i = 0; i < separator_num; i++)
10445 		*reg++ = SEPARATOR_VALUE;
10446 
10447 	ret = hclge_get_dfx_reg(hdev, reg);
10448 	if (ret)
10449 		dev_err(&hdev->pdev->dev,
10450 			"Get dfx register failed, ret = %d.\n", ret);
10451 }
10452 
10453 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10454 {
10455 	struct hclge_set_led_state_cmd *req;
10456 	struct hclge_desc desc;
10457 	int ret;
10458 
10459 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10460 
10461 	req = (struct hclge_set_led_state_cmd *)desc.data;
10462 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10463 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10464 
10465 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10466 	if (ret)
10467 		dev_err(&hdev->pdev->dev,
10468 			"Send set led state cmd error, ret =%d\n", ret);
10469 
10470 	return ret;
10471 }
10472 
10473 enum hclge_led_status {
10474 	HCLGE_LED_OFF,
10475 	HCLGE_LED_ON,
10476 	HCLGE_LED_NO_CHANGE = 0xFF,
10477 };
10478 
10479 static int hclge_set_led_id(struct hnae3_handle *handle,
10480 			    enum ethtool_phys_id_state status)
10481 {
10482 	struct hclge_vport *vport = hclge_get_vport(handle);
10483 	struct hclge_dev *hdev = vport->back;
10484 
10485 	switch (status) {
10486 	case ETHTOOL_ID_ACTIVE:
10487 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
10488 	case ETHTOOL_ID_INACTIVE:
10489 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10490 	default:
10491 		return -EINVAL;
10492 	}
10493 }
10494 
10495 static void hclge_get_link_mode(struct hnae3_handle *handle,
10496 				unsigned long *supported,
10497 				unsigned long *advertising)
10498 {
10499 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10500 	struct hclge_vport *vport = hclge_get_vport(handle);
10501 	struct hclge_dev *hdev = vport->back;
10502 	unsigned int idx = 0;
10503 
10504 	for (; idx < size; idx++) {
10505 		supported[idx] = hdev->hw.mac.supported[idx];
10506 		advertising[idx] = hdev->hw.mac.advertising[idx];
10507 	}
10508 }
10509 
10510 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10511 {
10512 	struct hclge_vport *vport = hclge_get_vport(handle);
10513 	struct hclge_dev *hdev = vport->back;
10514 
10515 	return hclge_config_gro(hdev, enable);
10516 }
10517 
10518 static const struct hnae3_ae_ops hclge_ops = {
10519 	.init_ae_dev = hclge_init_ae_dev,
10520 	.uninit_ae_dev = hclge_uninit_ae_dev,
10521 	.flr_prepare = hclge_flr_prepare,
10522 	.flr_done = hclge_flr_done,
10523 	.init_client_instance = hclge_init_client_instance,
10524 	.uninit_client_instance = hclge_uninit_client_instance,
10525 	.map_ring_to_vector = hclge_map_ring_to_vector,
10526 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10527 	.get_vector = hclge_get_vector,
10528 	.put_vector = hclge_put_vector,
10529 	.set_promisc_mode = hclge_set_promisc_mode,
10530 	.set_loopback = hclge_set_loopback,
10531 	.start = hclge_ae_start,
10532 	.stop = hclge_ae_stop,
10533 	.client_start = hclge_client_start,
10534 	.client_stop = hclge_client_stop,
10535 	.get_status = hclge_get_status,
10536 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
10537 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10538 	.get_media_type = hclge_get_media_type,
10539 	.check_port_speed = hclge_check_port_speed,
10540 	.get_fec = hclge_get_fec,
10541 	.set_fec = hclge_set_fec,
10542 	.get_rss_key_size = hclge_get_rss_key_size,
10543 	.get_rss_indir_size = hclge_get_rss_indir_size,
10544 	.get_rss = hclge_get_rss,
10545 	.set_rss = hclge_set_rss,
10546 	.set_rss_tuple = hclge_set_rss_tuple,
10547 	.get_rss_tuple = hclge_get_rss_tuple,
10548 	.get_tc_size = hclge_get_tc_size,
10549 	.get_mac_addr = hclge_get_mac_addr,
10550 	.set_mac_addr = hclge_set_mac_addr,
10551 	.do_ioctl = hclge_do_ioctl,
10552 	.add_uc_addr = hclge_add_uc_addr,
10553 	.rm_uc_addr = hclge_rm_uc_addr,
10554 	.add_mc_addr = hclge_add_mc_addr,
10555 	.rm_mc_addr = hclge_rm_mc_addr,
10556 	.set_autoneg = hclge_set_autoneg,
10557 	.get_autoneg = hclge_get_autoneg,
10558 	.restart_autoneg = hclge_restart_autoneg,
10559 	.halt_autoneg = hclge_halt_autoneg,
10560 	.get_pauseparam = hclge_get_pauseparam,
10561 	.set_pauseparam = hclge_set_pauseparam,
10562 	.set_mtu = hclge_set_mtu,
10563 	.reset_queue = hclge_reset_tqp,
10564 	.get_stats = hclge_get_stats,
10565 	.get_mac_stats = hclge_get_mac_stat,
10566 	.update_stats = hclge_update_stats,
10567 	.get_strings = hclge_get_strings,
10568 	.get_sset_count = hclge_get_sset_count,
10569 	.get_fw_version = hclge_get_fw_version,
10570 	.get_mdix_mode = hclge_get_mdix_mode,
10571 	.enable_vlan_filter = hclge_enable_vlan_filter,
10572 	.set_vlan_filter = hclge_set_vlan_filter,
10573 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10574 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10575 	.reset_event = hclge_reset_event,
10576 	.get_reset_level = hclge_get_reset_level,
10577 	.set_default_reset_request = hclge_set_def_reset_request,
10578 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10579 	.set_channels = hclge_set_channels,
10580 	.get_channels = hclge_get_channels,
10581 	.get_regs_len = hclge_get_regs_len,
10582 	.get_regs = hclge_get_regs,
10583 	.set_led_id = hclge_set_led_id,
10584 	.get_link_mode = hclge_get_link_mode,
10585 	.add_fd_entry = hclge_add_fd_entry,
10586 	.del_fd_entry = hclge_del_fd_entry,
10587 	.del_all_fd_entries = hclge_del_all_fd_entries,
10588 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10589 	.get_fd_rule_info = hclge_get_fd_rule_info,
10590 	.get_fd_all_rules = hclge_get_all_rules,
10591 	.restore_fd_rules = hclge_restore_fd_entries,
10592 	.enable_fd = hclge_enable_fd,
10593 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
10594 	.dbg_run_cmd = hclge_dbg_run_cmd,
10595 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
10596 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
10597 	.ae_dev_resetting = hclge_ae_dev_resetting,
10598 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10599 	.set_gro_en = hclge_gro_en,
10600 	.get_global_queue_id = hclge_covert_handle_qid_global,
10601 	.set_timer_task = hclge_set_timer_task,
10602 	.mac_connect_phy = hclge_mac_connect_phy,
10603 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
10604 	.restore_vlan_table = hclge_restore_vlan_table,
10605 	.get_vf_config = hclge_get_vf_config,
10606 	.set_vf_link_state = hclge_set_vf_link_state,
10607 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
10608 	.set_vf_trust = hclge_set_vf_trust,
10609 	.set_vf_rate = hclge_set_vf_rate,
10610 	.set_vf_mac = hclge_set_vf_mac,
10611 };
10612 
10613 static struct hnae3_ae_algo ae_algo = {
10614 	.ops = &hclge_ops,
10615 	.pdev_id_table = ae_algo_pci_tbl,
10616 };
10617 
10618 static int hclge_init(void)
10619 {
10620 	pr_info("%s is initializing\n", HCLGE_NAME);
10621 
10622 	hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
10623 	if (!hclge_wq) {
10624 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
10625 		return -ENOMEM;
10626 	}
10627 
10628 	hnae3_register_ae_algo(&ae_algo);
10629 
10630 	return 0;
10631 }
10632 
10633 static void hclge_exit(void)
10634 {
10635 	hnae3_unregister_ae_algo(&ae_algo);
10636 	destroy_workqueue(hclge_wq);
10637 }
10638 module_init(hclge_init);
10639 module_exit(hclge_exit);
10640 
10641 MODULE_LICENSE("GPL");
10642 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10643 MODULE_DESCRIPTION("HCLGE Driver");
10644 MODULE_VERSION(HCLGE_MOD_VERSION);
10645