1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256U
31 #define HCLGE_BUF_MUL_BY	2
32 #define HCLGE_BUF_DIV_BY	2
33 #define NEED_RESERVE_TC_NUM	2
34 #define BUF_MAX_PERCENT		100
35 #define BUF_RESERVE_PERCENT	90
36 
37 #define HCLGE_RESET_MAX_FAIL_CNT	5
38 #define HCLGE_RESET_SYNC_TIME		100
39 #define HCLGE_PF_RESET_SYNC_TIME	20
40 #define HCLGE_PF_RESET_SYNC_CNT		1500
41 
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55 
56 #define HCLGE_LINK_STATUS_MS	10
57 
58 #define HCLGE_VF_VPORT_START_NUM	1
59 
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 						   unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 
75 static struct hnae3_ae_algo ae_algo;
76 
77 static struct workqueue_struct *hclge_wq;
78 
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 	/* required last entry */
88 	{0, }
89 };
90 
91 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
92 
93 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94 					 HCLGE_CMDQ_TX_ADDR_H_REG,
95 					 HCLGE_CMDQ_TX_DEPTH_REG,
96 					 HCLGE_CMDQ_TX_TAIL_REG,
97 					 HCLGE_CMDQ_TX_HEAD_REG,
98 					 HCLGE_CMDQ_RX_ADDR_L_REG,
99 					 HCLGE_CMDQ_RX_ADDR_H_REG,
100 					 HCLGE_CMDQ_RX_DEPTH_REG,
101 					 HCLGE_CMDQ_RX_TAIL_REG,
102 					 HCLGE_CMDQ_RX_HEAD_REG,
103 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
104 					 HCLGE_CMDQ_INTR_STS_REG,
105 					 HCLGE_CMDQ_INTR_EN_REG,
106 					 HCLGE_CMDQ_INTR_GEN_REG};
107 
108 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109 					   HCLGE_VECTOR0_OTER_EN_REG,
110 					   HCLGE_MISC_RESET_STS_REG,
111 					   HCLGE_MISC_VECTOR_INT_STS,
112 					   HCLGE_GLOBAL_RESET_REG,
113 					   HCLGE_FUN_RST_ING,
114 					   HCLGE_GRO_EN_REG};
115 
116 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117 					 HCLGE_RING_RX_ADDR_H_REG,
118 					 HCLGE_RING_RX_BD_NUM_REG,
119 					 HCLGE_RING_RX_BD_LENGTH_REG,
120 					 HCLGE_RING_RX_MERGE_EN_REG,
121 					 HCLGE_RING_RX_TAIL_REG,
122 					 HCLGE_RING_RX_HEAD_REG,
123 					 HCLGE_RING_RX_FBD_NUM_REG,
124 					 HCLGE_RING_RX_OFFSET_REG,
125 					 HCLGE_RING_RX_FBD_OFFSET_REG,
126 					 HCLGE_RING_RX_STASH_REG,
127 					 HCLGE_RING_RX_BD_ERR_REG,
128 					 HCLGE_RING_TX_ADDR_L_REG,
129 					 HCLGE_RING_TX_ADDR_H_REG,
130 					 HCLGE_RING_TX_BD_NUM_REG,
131 					 HCLGE_RING_TX_PRIORITY_REG,
132 					 HCLGE_RING_TX_TC_REG,
133 					 HCLGE_RING_TX_MERGE_EN_REG,
134 					 HCLGE_RING_TX_TAIL_REG,
135 					 HCLGE_RING_TX_HEAD_REG,
136 					 HCLGE_RING_TX_FBD_NUM_REG,
137 					 HCLGE_RING_TX_OFFSET_REG,
138 					 HCLGE_RING_TX_EBD_NUM_REG,
139 					 HCLGE_RING_TX_EBD_OFFSET_REG,
140 					 HCLGE_RING_TX_BD_ERR_REG,
141 					 HCLGE_RING_EN_REG};
142 
143 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144 					     HCLGE_TQP_INTR_GL0_REG,
145 					     HCLGE_TQP_INTR_GL1_REG,
146 					     HCLGE_TQP_INTR_GL2_REG,
147 					     HCLGE_TQP_INTR_RL_REG};
148 
149 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
150 	"App    Loopback test",
151 	"Serdes serial Loopback test",
152 	"Serdes parallel Loopback test",
153 	"Phy    Loopback test"
154 };
155 
156 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157 	{"mac_tx_mac_pause_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159 	{"mac_rx_mac_pause_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
161 	{"mac_tx_control_pkt_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163 	{"mac_rx_control_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165 	{"mac_tx_pfc_pkt_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
167 	{"mac_tx_pfc_pri0_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169 	{"mac_tx_pfc_pri1_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171 	{"mac_tx_pfc_pri2_pkt_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173 	{"mac_tx_pfc_pri3_pkt_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175 	{"mac_tx_pfc_pri4_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177 	{"mac_tx_pfc_pri5_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179 	{"mac_tx_pfc_pri6_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181 	{"mac_tx_pfc_pri7_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
183 	{"mac_rx_pfc_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
185 	{"mac_rx_pfc_pri0_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187 	{"mac_rx_pfc_pri1_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189 	{"mac_rx_pfc_pri2_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191 	{"mac_rx_pfc_pri3_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193 	{"mac_rx_pfc_pri4_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195 	{"mac_rx_pfc_pri5_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197 	{"mac_rx_pfc_pri6_pkt_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199 	{"mac_rx_pfc_pri7_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201 	{"mac_tx_total_pkt_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203 	{"mac_tx_total_oct_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205 	{"mac_tx_good_pkt_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207 	{"mac_tx_bad_pkt_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209 	{"mac_tx_good_oct_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211 	{"mac_tx_bad_oct_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213 	{"mac_tx_uni_pkt_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215 	{"mac_tx_multi_pkt_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217 	{"mac_tx_broad_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219 	{"mac_tx_undersize_pkt_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
221 	{"mac_tx_oversize_pkt_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
223 	{"mac_tx_64_oct_pkt_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225 	{"mac_tx_65_127_oct_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227 	{"mac_tx_128_255_oct_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229 	{"mac_tx_256_511_oct_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231 	{"mac_tx_512_1023_oct_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233 	{"mac_tx_1024_1518_oct_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
235 	{"mac_tx_1519_2047_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237 	{"mac_tx_2048_4095_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239 	{"mac_tx_4096_8191_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
241 	{"mac_tx_8192_9216_oct_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243 	{"mac_tx_9217_12287_oct_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245 	{"mac_tx_12288_16383_oct_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247 	{"mac_tx_1519_max_good_pkt_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249 	{"mac_tx_1519_max_bad_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
251 	{"mac_rx_total_pkt_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253 	{"mac_rx_total_oct_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255 	{"mac_rx_good_pkt_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257 	{"mac_rx_bad_pkt_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259 	{"mac_rx_good_oct_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261 	{"mac_rx_bad_oct_num",
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263 	{"mac_rx_uni_pkt_num",
264 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265 	{"mac_rx_multi_pkt_num",
266 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267 	{"mac_rx_broad_pkt_num",
268 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269 	{"mac_rx_undersize_pkt_num",
270 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
271 	{"mac_rx_oversize_pkt_num",
272 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
273 	{"mac_rx_64_oct_pkt_num",
274 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275 	{"mac_rx_65_127_oct_pkt_num",
276 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277 	{"mac_rx_128_255_oct_pkt_num",
278 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279 	{"mac_rx_256_511_oct_pkt_num",
280 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281 	{"mac_rx_512_1023_oct_pkt_num",
282 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283 	{"mac_rx_1024_1518_oct_pkt_num",
284 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
285 	{"mac_rx_1519_2047_oct_pkt_num",
286 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287 	{"mac_rx_2048_4095_oct_pkt_num",
288 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289 	{"mac_rx_4096_8191_oct_pkt_num",
290 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
291 	{"mac_rx_8192_9216_oct_pkt_num",
292 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293 	{"mac_rx_9217_12287_oct_pkt_num",
294 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295 	{"mac_rx_12288_16383_oct_pkt_num",
296 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297 	{"mac_rx_1519_max_good_pkt_num",
298 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299 	{"mac_rx_1519_max_bad_pkt_num",
300 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
301 
302 	{"mac_tx_fragment_pkt_num",
303 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304 	{"mac_tx_undermin_pkt_num",
305 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306 	{"mac_tx_jabber_pkt_num",
307 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308 	{"mac_tx_err_all_pkt_num",
309 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310 	{"mac_tx_from_app_good_pkt_num",
311 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312 	{"mac_tx_from_app_bad_pkt_num",
313 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314 	{"mac_rx_fragment_pkt_num",
315 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316 	{"mac_rx_undermin_pkt_num",
317 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318 	{"mac_rx_jabber_pkt_num",
319 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320 	{"mac_rx_fcs_err_pkt_num",
321 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322 	{"mac_rx_send_app_good_pkt_num",
323 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324 	{"mac_rx_send_app_bad_pkt_num",
325 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
326 };
327 
328 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
329 	{
330 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
331 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
332 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
333 		.i_port_bitmap = 0x1,
334 	},
335 };
336 
337 static const u8 hclge_hash_key[] = {
338 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343 };
344 
345 static const u32 hclge_dfx_bd_offset_list[] = {
346 	HCLGE_DFX_BIOS_BD_OFFSET,
347 	HCLGE_DFX_SSU_0_BD_OFFSET,
348 	HCLGE_DFX_SSU_1_BD_OFFSET,
349 	HCLGE_DFX_IGU_BD_OFFSET,
350 	HCLGE_DFX_RPU_0_BD_OFFSET,
351 	HCLGE_DFX_RPU_1_BD_OFFSET,
352 	HCLGE_DFX_NCSI_BD_OFFSET,
353 	HCLGE_DFX_RTC_BD_OFFSET,
354 	HCLGE_DFX_PPP_BD_OFFSET,
355 	HCLGE_DFX_RCB_BD_OFFSET,
356 	HCLGE_DFX_TQP_BD_OFFSET,
357 	HCLGE_DFX_SSU_2_BD_OFFSET
358 };
359 
360 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
362 	HCLGE_OPC_DFX_SSU_REG_0,
363 	HCLGE_OPC_DFX_SSU_REG_1,
364 	HCLGE_OPC_DFX_IGU_EGU_REG,
365 	HCLGE_OPC_DFX_RPU_REG_0,
366 	HCLGE_OPC_DFX_RPU_REG_1,
367 	HCLGE_OPC_DFX_NCSI_REG,
368 	HCLGE_OPC_DFX_RTC_REG,
369 	HCLGE_OPC_DFX_PPP_REG,
370 	HCLGE_OPC_DFX_RCB_REG,
371 	HCLGE_OPC_DFX_TQP_REG,
372 	HCLGE_OPC_DFX_SSU_REG_2
373 };
374 
375 static const struct key_info meta_data_key_info[] = {
376 	{ PACKET_TYPE_ID, 6},
377 	{ IP_FRAGEMENT, 1},
378 	{ ROCE_TYPE, 1},
379 	{ NEXT_KEY, 5},
380 	{ VLAN_NUMBER, 2},
381 	{ SRC_VPORT, 12},
382 	{ DST_VPORT, 12},
383 	{ TUNNEL_PACKET, 1},
384 };
385 
386 static const struct key_info tuple_key_info[] = {
387 	{ OUTER_DST_MAC, 48},
388 	{ OUTER_SRC_MAC, 48},
389 	{ OUTER_VLAN_TAG_FST, 16},
390 	{ OUTER_VLAN_TAG_SEC, 16},
391 	{ OUTER_ETH_TYPE, 16},
392 	{ OUTER_L2_RSV, 16},
393 	{ OUTER_IP_TOS, 8},
394 	{ OUTER_IP_PROTO, 8},
395 	{ OUTER_SRC_IP, 32},
396 	{ OUTER_DST_IP, 32},
397 	{ OUTER_L3_RSV, 16},
398 	{ OUTER_SRC_PORT, 16},
399 	{ OUTER_DST_PORT, 16},
400 	{ OUTER_L4_RSV, 32},
401 	{ OUTER_TUN_VNI, 24},
402 	{ OUTER_TUN_FLOW_ID, 8},
403 	{ INNER_DST_MAC, 48},
404 	{ INNER_SRC_MAC, 48},
405 	{ INNER_VLAN_TAG_FST, 16},
406 	{ INNER_VLAN_TAG_SEC, 16},
407 	{ INNER_ETH_TYPE, 16},
408 	{ INNER_L2_RSV, 16},
409 	{ INNER_IP_TOS, 8},
410 	{ INNER_IP_PROTO, 8},
411 	{ INNER_SRC_IP, 32},
412 	{ INNER_DST_IP, 32},
413 	{ INNER_L3_RSV, 16},
414 	{ INNER_SRC_PORT, 16},
415 	{ INNER_DST_PORT, 16},
416 	{ INNER_L4_RSV, 32},
417 };
418 
419 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
420 {
421 #define HCLGE_MAC_CMD_NUM 21
422 
423 	u64 *data = (u64 *)(&hdev->mac_stats);
424 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
425 	__le64 *desc_data;
426 	int i, k, n;
427 	int ret;
428 
429 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
431 	if (ret) {
432 		dev_err(&hdev->pdev->dev,
433 			"Get MAC pkt stats fail, status = %d.\n", ret);
434 
435 		return ret;
436 	}
437 
438 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
439 		/* for special opcode 0032, only the first desc has the head */
440 		if (unlikely(i == 0)) {
441 			desc_data = (__le64 *)(&desc[i].data[0]);
442 			n = HCLGE_RD_FIRST_STATS_NUM;
443 		} else {
444 			desc_data = (__le64 *)(&desc[i]);
445 			n = HCLGE_RD_OTHER_STATS_NUM;
446 		}
447 
448 		for (k = 0; k < n; k++) {
449 			*data += le64_to_cpu(*desc_data);
450 			data++;
451 			desc_data++;
452 		}
453 	}
454 
455 	return 0;
456 }
457 
458 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
459 {
460 	u64 *data = (u64 *)(&hdev->mac_stats);
461 	struct hclge_desc *desc;
462 	__le64 *desc_data;
463 	u16 i, k, n;
464 	int ret;
465 
466 	/* This may be called inside atomic sections,
467 	 * so GFP_ATOMIC is more suitalbe here
468 	 */
469 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
470 	if (!desc)
471 		return -ENOMEM;
472 
473 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
475 	if (ret) {
476 		kfree(desc);
477 		return ret;
478 	}
479 
480 	for (i = 0; i < desc_num; i++) {
481 		/* for special opcode 0034, only the first desc has the head */
482 		if (i == 0) {
483 			desc_data = (__le64 *)(&desc[i].data[0]);
484 			n = HCLGE_RD_FIRST_STATS_NUM;
485 		} else {
486 			desc_data = (__le64 *)(&desc[i]);
487 			n = HCLGE_RD_OTHER_STATS_NUM;
488 		}
489 
490 		for (k = 0; k < n; k++) {
491 			*data += le64_to_cpu(*desc_data);
492 			data++;
493 			desc_data++;
494 		}
495 	}
496 
497 	kfree(desc);
498 
499 	return 0;
500 }
501 
502 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
503 {
504 	struct hclge_desc desc;
505 	__le32 *desc_data;
506 	u32 reg_num;
507 	int ret;
508 
509 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
511 	if (ret)
512 		return ret;
513 
514 	desc_data = (__le32 *)(&desc.data[0]);
515 	reg_num = le32_to_cpu(*desc_data);
516 
517 	*desc_num = 1 + ((reg_num - 3) >> 2) +
518 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
519 
520 	return 0;
521 }
522 
523 static int hclge_mac_update_stats(struct hclge_dev *hdev)
524 {
525 	u32 desc_num;
526 	int ret;
527 
528 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
529 
530 	/* The firmware supports the new statistics acquisition method */
531 	if (!ret)
532 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
533 	else if (ret == -EOPNOTSUPP)
534 		ret = hclge_mac_update_stats_defective(hdev);
535 	else
536 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
537 
538 	return ret;
539 }
540 
541 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
542 {
543 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544 	struct hclge_vport *vport = hclge_get_vport(handle);
545 	struct hclge_dev *hdev = vport->back;
546 	struct hnae3_queue *queue;
547 	struct hclge_desc desc[1];
548 	struct hclge_tqp *tqp;
549 	int ret, i;
550 
551 	for (i = 0; i < kinfo->num_tqps; i++) {
552 		queue = handle->kinfo.tqp[i];
553 		tqp = container_of(queue, struct hclge_tqp, q);
554 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
555 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
556 					   true);
557 
558 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
559 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
560 		if (ret) {
561 			dev_err(&hdev->pdev->dev,
562 				"Query tqp stat fail, status = %d,queue = %d\n",
563 				ret, i);
564 			return ret;
565 		}
566 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
567 			le32_to_cpu(desc[0].data[1]);
568 	}
569 
570 	for (i = 0; i < kinfo->num_tqps; i++) {
571 		queue = handle->kinfo.tqp[i];
572 		tqp = container_of(queue, struct hclge_tqp, q);
573 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
574 		hclge_cmd_setup_basic_desc(&desc[0],
575 					   HCLGE_OPC_QUERY_TX_STATS,
576 					   true);
577 
578 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
579 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
580 		if (ret) {
581 			dev_err(&hdev->pdev->dev,
582 				"Query tqp stat fail, status = %d,queue = %d\n",
583 				ret, i);
584 			return ret;
585 		}
586 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
587 			le32_to_cpu(desc[0].data[1]);
588 	}
589 
590 	return 0;
591 }
592 
593 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
594 {
595 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596 	struct hclge_tqp *tqp;
597 	u64 *buff = data;
598 	int i;
599 
600 	for (i = 0; i < kinfo->num_tqps; i++) {
601 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
603 	}
604 
605 	for (i = 0; i < kinfo->num_tqps; i++) {
606 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
607 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
608 	}
609 
610 	return buff;
611 }
612 
613 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
614 {
615 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
616 
617 	/* each tqp has TX & RX two queues */
618 	return kinfo->num_tqps * (2);
619 }
620 
621 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
622 {
623 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
624 	u8 *buff = data;
625 	int i = 0;
626 
627 	for (i = 0; i < kinfo->num_tqps; i++) {
628 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629 			struct hclge_tqp, q);
630 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
631 			 tqp->index);
632 		buff = buff + ETH_GSTRING_LEN;
633 	}
634 
635 	for (i = 0; i < kinfo->num_tqps; i++) {
636 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637 			struct hclge_tqp, q);
638 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
639 			 tqp->index);
640 		buff = buff + ETH_GSTRING_LEN;
641 	}
642 
643 	return buff;
644 }
645 
646 static u64 *hclge_comm_get_stats(const void *comm_stats,
647 				 const struct hclge_comm_stats_str strs[],
648 				 int size, u64 *data)
649 {
650 	u64 *buf = data;
651 	u32 i;
652 
653 	for (i = 0; i < size; i++)
654 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
655 
656 	return buf + size;
657 }
658 
659 static u8 *hclge_comm_get_strings(u32 stringset,
660 				  const struct hclge_comm_stats_str strs[],
661 				  int size, u8 *data)
662 {
663 	char *buff = (char *)data;
664 	u32 i;
665 
666 	if (stringset != ETH_SS_STATS)
667 		return buff;
668 
669 	for (i = 0; i < size; i++) {
670 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
671 		buff = buff + ETH_GSTRING_LEN;
672 	}
673 
674 	return (u8 *)buff;
675 }
676 
677 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
678 {
679 	struct hnae3_handle *handle;
680 	int status;
681 
682 	handle = &hdev->vport[0].nic;
683 	if (handle->client) {
684 		status = hclge_tqps_update_stats(handle);
685 		if (status) {
686 			dev_err(&hdev->pdev->dev,
687 				"Update TQPS stats fail, status = %d.\n",
688 				status);
689 		}
690 	}
691 
692 	status = hclge_mac_update_stats(hdev);
693 	if (status)
694 		dev_err(&hdev->pdev->dev,
695 			"Update MAC stats fail, status = %d.\n", status);
696 }
697 
698 static void hclge_update_stats(struct hnae3_handle *handle,
699 			       struct net_device_stats *net_stats)
700 {
701 	struct hclge_vport *vport = hclge_get_vport(handle);
702 	struct hclge_dev *hdev = vport->back;
703 	int status;
704 
705 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706 		return;
707 
708 	status = hclge_mac_update_stats(hdev);
709 	if (status)
710 		dev_err(&hdev->pdev->dev,
711 			"Update MAC stats fail, status = %d.\n",
712 			status);
713 
714 	status = hclge_tqps_update_stats(handle);
715 	if (status)
716 		dev_err(&hdev->pdev->dev,
717 			"Update TQPS stats fail, status = %d.\n",
718 			status);
719 
720 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
721 }
722 
723 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
724 {
725 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726 		HNAE3_SUPPORT_PHY_LOOPBACK |\
727 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
729 
730 	struct hclge_vport *vport = hclge_get_vport(handle);
731 	struct hclge_dev *hdev = vport->back;
732 	int count = 0;
733 
734 	/* Loopback test support rules:
735 	 * mac: only GE mode support
736 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
737 	 * phy: only support when phy device exist on board
738 	 */
739 	if (stringset == ETH_SS_TEST) {
740 		/* clear loopback bit flags at first */
741 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
742 		if (hdev->pdev->revision >= 0x21 ||
743 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
746 			count += 1;
747 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
748 		}
749 
750 		count += 2;
751 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
753 
754 		if (hdev->hw.mac.phydev) {
755 			count += 1;
756 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
757 		}
758 
759 	} else if (stringset == ETH_SS_STATS) {
760 		count = ARRAY_SIZE(g_mac_stats_string) +
761 			hclge_tqps_get_sset_count(handle, stringset);
762 	}
763 
764 	return count;
765 }
766 
767 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
768 			      u8 *data)
769 {
770 	u8 *p = (char *)data;
771 	int size;
772 
773 	if (stringset == ETH_SS_STATS) {
774 		size = ARRAY_SIZE(g_mac_stats_string);
775 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
776 					   size, p);
777 		p = hclge_tqps_get_strings(handle, p);
778 	} else if (stringset == ETH_SS_TEST) {
779 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
780 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
781 			       ETH_GSTRING_LEN);
782 			p += ETH_GSTRING_LEN;
783 		}
784 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
785 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
786 			       ETH_GSTRING_LEN);
787 			p += ETH_GSTRING_LEN;
788 		}
789 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
790 			memcpy(p,
791 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
792 			       ETH_GSTRING_LEN);
793 			p += ETH_GSTRING_LEN;
794 		}
795 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
796 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
797 			       ETH_GSTRING_LEN);
798 			p += ETH_GSTRING_LEN;
799 		}
800 	}
801 }
802 
803 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
804 {
805 	struct hclge_vport *vport = hclge_get_vport(handle);
806 	struct hclge_dev *hdev = vport->back;
807 	u64 *p;
808 
809 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
810 				 ARRAY_SIZE(g_mac_stats_string), data);
811 	p = hclge_tqps_get_stats(handle, p);
812 }
813 
814 static void hclge_get_mac_stat(struct hnae3_handle *handle,
815 			       struct hns3_mac_stats *mac_stats)
816 {
817 	struct hclge_vport *vport = hclge_get_vport(handle);
818 	struct hclge_dev *hdev = vport->back;
819 
820 	hclge_update_stats(handle, NULL);
821 
822 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
823 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
824 }
825 
826 static int hclge_parse_func_status(struct hclge_dev *hdev,
827 				   struct hclge_func_status_cmd *status)
828 {
829 #define HCLGE_MAC_ID_MASK	0xF
830 
831 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
832 		return -EINVAL;
833 
834 	/* Set the pf to main pf */
835 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
836 		hdev->flag |= HCLGE_FLAG_MAIN;
837 	else
838 		hdev->flag &= ~HCLGE_FLAG_MAIN;
839 
840 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
841 	return 0;
842 }
843 
844 static int hclge_query_function_status(struct hclge_dev *hdev)
845 {
846 #define HCLGE_QUERY_MAX_CNT	5
847 
848 	struct hclge_func_status_cmd *req;
849 	struct hclge_desc desc;
850 	int timeout = 0;
851 	int ret;
852 
853 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
854 	req = (struct hclge_func_status_cmd *)desc.data;
855 
856 	do {
857 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
858 		if (ret) {
859 			dev_err(&hdev->pdev->dev,
860 				"query function status failed %d.\n", ret);
861 			return ret;
862 		}
863 
864 		/* Check pf reset is done */
865 		if (req->pf_state)
866 			break;
867 		usleep_range(1000, 2000);
868 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
869 
870 	return hclge_parse_func_status(hdev, req);
871 }
872 
873 static int hclge_query_pf_resource(struct hclge_dev *hdev)
874 {
875 	struct hclge_pf_res_cmd *req;
876 	struct hclge_desc desc;
877 	int ret;
878 
879 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
880 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
881 	if (ret) {
882 		dev_err(&hdev->pdev->dev,
883 			"query pf resource failed %d.\n", ret);
884 		return ret;
885 	}
886 
887 	req = (struct hclge_pf_res_cmd *)desc.data;
888 	hdev->num_tqps = le16_to_cpu(req->tqp_num);
889 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
890 
891 	if (req->tx_buf_size)
892 		hdev->tx_buf_size =
893 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
894 	else
895 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
896 
897 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
898 
899 	if (req->dv_buf_size)
900 		hdev->dv_buf_size =
901 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
902 	else
903 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
904 
905 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
906 
907 	if (hnae3_dev_roce_supported(hdev)) {
908 		hdev->roce_base_msix_offset =
909 		hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
910 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
911 		hdev->num_roce_msi =
912 		hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
913 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
914 
915 		/* nic's msix numbers is always equals to the roce's. */
916 		hdev->num_nic_msi = hdev->num_roce_msi;
917 
918 		/* PF should have NIC vectors and Roce vectors,
919 		 * NIC vectors are queued before Roce vectors.
920 		 */
921 		hdev->num_msi = hdev->num_roce_msi +
922 				hdev->roce_base_msix_offset;
923 	} else {
924 		hdev->num_msi =
925 		hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
926 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
927 
928 		hdev->num_nic_msi = hdev->num_msi;
929 	}
930 
931 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
932 		dev_err(&hdev->pdev->dev,
933 			"Just %u msi resources, not enough for pf(min:2).\n",
934 			hdev->num_nic_msi);
935 		return -EINVAL;
936 	}
937 
938 	return 0;
939 }
940 
941 static int hclge_parse_speed(int speed_cmd, int *speed)
942 {
943 	switch (speed_cmd) {
944 	case 6:
945 		*speed = HCLGE_MAC_SPEED_10M;
946 		break;
947 	case 7:
948 		*speed = HCLGE_MAC_SPEED_100M;
949 		break;
950 	case 0:
951 		*speed = HCLGE_MAC_SPEED_1G;
952 		break;
953 	case 1:
954 		*speed = HCLGE_MAC_SPEED_10G;
955 		break;
956 	case 2:
957 		*speed = HCLGE_MAC_SPEED_25G;
958 		break;
959 	case 3:
960 		*speed = HCLGE_MAC_SPEED_40G;
961 		break;
962 	case 4:
963 		*speed = HCLGE_MAC_SPEED_50G;
964 		break;
965 	case 5:
966 		*speed = HCLGE_MAC_SPEED_100G;
967 		break;
968 	default:
969 		return -EINVAL;
970 	}
971 
972 	return 0;
973 }
974 
975 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
976 {
977 	struct hclge_vport *vport = hclge_get_vport(handle);
978 	struct hclge_dev *hdev = vport->back;
979 	u32 speed_ability = hdev->hw.mac.speed_ability;
980 	u32 speed_bit = 0;
981 
982 	switch (speed) {
983 	case HCLGE_MAC_SPEED_10M:
984 		speed_bit = HCLGE_SUPPORT_10M_BIT;
985 		break;
986 	case HCLGE_MAC_SPEED_100M:
987 		speed_bit = HCLGE_SUPPORT_100M_BIT;
988 		break;
989 	case HCLGE_MAC_SPEED_1G:
990 		speed_bit = HCLGE_SUPPORT_1G_BIT;
991 		break;
992 	case HCLGE_MAC_SPEED_10G:
993 		speed_bit = HCLGE_SUPPORT_10G_BIT;
994 		break;
995 	case HCLGE_MAC_SPEED_25G:
996 		speed_bit = HCLGE_SUPPORT_25G_BIT;
997 		break;
998 	case HCLGE_MAC_SPEED_40G:
999 		speed_bit = HCLGE_SUPPORT_40G_BIT;
1000 		break;
1001 	case HCLGE_MAC_SPEED_50G:
1002 		speed_bit = HCLGE_SUPPORT_50G_BIT;
1003 		break;
1004 	case HCLGE_MAC_SPEED_100G:
1005 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1006 		break;
1007 	default:
1008 		return -EINVAL;
1009 	}
1010 
1011 	if (speed_bit & speed_ability)
1012 		return 0;
1013 
1014 	return -EINVAL;
1015 }
1016 
1017 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1018 {
1019 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1020 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1021 				 mac->supported);
1022 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1023 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1024 				 mac->supported);
1025 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1026 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1027 				 mac->supported);
1028 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1029 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1030 				 mac->supported);
1031 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1032 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1033 				 mac->supported);
1034 }
1035 
1036 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1037 {
1038 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1040 				 mac->supported);
1041 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1042 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1043 				 mac->supported);
1044 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1045 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1046 				 mac->supported);
1047 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1048 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1049 				 mac->supported);
1050 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1052 				 mac->supported);
1053 }
1054 
1055 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1056 {
1057 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1058 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1059 				 mac->supported);
1060 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1061 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1062 				 mac->supported);
1063 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1064 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1065 				 mac->supported);
1066 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1067 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1068 				 mac->supported);
1069 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1070 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1071 				 mac->supported);
1072 }
1073 
1074 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1075 {
1076 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1077 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1078 				 mac->supported);
1079 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1080 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1081 				 mac->supported);
1082 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1083 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1084 				 mac->supported);
1085 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1086 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1087 				 mac->supported);
1088 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1089 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1090 				 mac->supported);
1091 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1092 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1093 				 mac->supported);
1094 }
1095 
1096 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1097 {
1098 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1099 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1100 
1101 	switch (mac->speed) {
1102 	case HCLGE_MAC_SPEED_10G:
1103 	case HCLGE_MAC_SPEED_40G:
1104 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1105 				 mac->supported);
1106 		mac->fec_ability =
1107 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1108 		break;
1109 	case HCLGE_MAC_SPEED_25G:
1110 	case HCLGE_MAC_SPEED_50G:
1111 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1112 				 mac->supported);
1113 		mac->fec_ability =
1114 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1115 			BIT(HNAE3_FEC_AUTO);
1116 		break;
1117 	case HCLGE_MAC_SPEED_100G:
1118 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1119 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1120 		break;
1121 	default:
1122 		mac->fec_ability = 0;
1123 		break;
1124 	}
1125 }
1126 
1127 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1128 					u8 speed_ability)
1129 {
1130 	struct hclge_mac *mac = &hdev->hw.mac;
1131 
1132 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1133 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1134 				 mac->supported);
1135 
1136 	hclge_convert_setting_sr(mac, speed_ability);
1137 	hclge_convert_setting_lr(mac, speed_ability);
1138 	hclge_convert_setting_cr(mac, speed_ability);
1139 	if (hdev->pdev->revision >= 0x21)
1140 		hclge_convert_setting_fec(mac);
1141 
1142 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1143 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1144 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1145 }
1146 
1147 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1148 					    u8 speed_ability)
1149 {
1150 	struct hclge_mac *mac = &hdev->hw.mac;
1151 
1152 	hclge_convert_setting_kr(mac, speed_ability);
1153 	if (hdev->pdev->revision >= 0x21)
1154 		hclge_convert_setting_fec(mac);
1155 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1156 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1157 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1158 }
1159 
1160 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1161 					 u8 speed_ability)
1162 {
1163 	unsigned long *supported = hdev->hw.mac.supported;
1164 
1165 	/* default to support all speed for GE port */
1166 	if (!speed_ability)
1167 		speed_ability = HCLGE_SUPPORT_GE;
1168 
1169 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1171 				 supported);
1172 
1173 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1174 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1175 				 supported);
1176 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1177 				 supported);
1178 	}
1179 
1180 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1181 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1182 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1183 	}
1184 
1185 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1186 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1187 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1188 	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1189 }
1190 
1191 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1192 {
1193 	u8 media_type = hdev->hw.mac.media_type;
1194 
1195 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1196 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1197 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1198 		hclge_parse_copper_link_mode(hdev, speed_ability);
1199 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1200 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1201 }
1202 
1203 static u32 hclge_get_max_speed(u8 speed_ability)
1204 {
1205 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1206 		return HCLGE_MAC_SPEED_100G;
1207 
1208 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1209 		return HCLGE_MAC_SPEED_50G;
1210 
1211 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1212 		return HCLGE_MAC_SPEED_40G;
1213 
1214 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1215 		return HCLGE_MAC_SPEED_25G;
1216 
1217 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1218 		return HCLGE_MAC_SPEED_10G;
1219 
1220 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1221 		return HCLGE_MAC_SPEED_1G;
1222 
1223 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1224 		return HCLGE_MAC_SPEED_100M;
1225 
1226 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1227 		return HCLGE_MAC_SPEED_10M;
1228 
1229 	return HCLGE_MAC_SPEED_1G;
1230 }
1231 
1232 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1233 {
1234 	struct hclge_cfg_param_cmd *req;
1235 	u64 mac_addr_tmp_high;
1236 	u64 mac_addr_tmp;
1237 	unsigned int i;
1238 
1239 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1240 
1241 	/* get the configuration */
1242 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243 					      HCLGE_CFG_VMDQ_M,
1244 					      HCLGE_CFG_VMDQ_S);
1245 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1246 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1247 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1248 					    HCLGE_CFG_TQP_DESC_N_M,
1249 					    HCLGE_CFG_TQP_DESC_N_S);
1250 
1251 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1252 					HCLGE_CFG_PHY_ADDR_M,
1253 					HCLGE_CFG_PHY_ADDR_S);
1254 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1255 					  HCLGE_CFG_MEDIA_TP_M,
1256 					  HCLGE_CFG_MEDIA_TP_S);
1257 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1258 					  HCLGE_CFG_RX_BUF_LEN_M,
1259 					  HCLGE_CFG_RX_BUF_LEN_S);
1260 	/* get mac_address */
1261 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1262 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1263 					    HCLGE_CFG_MAC_ADDR_H_M,
1264 					    HCLGE_CFG_MAC_ADDR_H_S);
1265 
1266 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1267 
1268 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1269 					     HCLGE_CFG_DEFAULT_SPEED_M,
1270 					     HCLGE_CFG_DEFAULT_SPEED_S);
1271 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1272 					    HCLGE_CFG_RSS_SIZE_M,
1273 					    HCLGE_CFG_RSS_SIZE_S);
1274 
1275 	for (i = 0; i < ETH_ALEN; i++)
1276 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1277 
1278 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1279 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1280 
1281 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282 					     HCLGE_CFG_SPEED_ABILITY_M,
1283 					     HCLGE_CFG_SPEED_ABILITY_S);
1284 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1285 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1286 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1287 	if (!cfg->umv_space)
1288 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1289 }
1290 
1291 /* hclge_get_cfg: query the static parameter from flash
1292  * @hdev: pointer to struct hclge_dev
1293  * @hcfg: the config structure to be getted
1294  */
1295 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1296 {
1297 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1298 	struct hclge_cfg_param_cmd *req;
1299 	unsigned int i;
1300 	int ret;
1301 
1302 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1303 		u32 offset = 0;
1304 
1305 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1306 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1307 					   true);
1308 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1309 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1310 		/* Len should be united by 4 bytes when send to hardware */
1311 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1312 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1313 		req->offset = cpu_to_le32(offset);
1314 	}
1315 
1316 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1317 	if (ret) {
1318 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1319 		return ret;
1320 	}
1321 
1322 	hclge_parse_cfg(hcfg, desc);
1323 
1324 	return 0;
1325 }
1326 
1327 static int hclge_get_cap(struct hclge_dev *hdev)
1328 {
1329 	int ret;
1330 
1331 	ret = hclge_query_function_status(hdev);
1332 	if (ret) {
1333 		dev_err(&hdev->pdev->dev,
1334 			"query function status error %d.\n", ret);
1335 		return ret;
1336 	}
1337 
1338 	/* get pf resource */
1339 	return hclge_query_pf_resource(hdev);
1340 }
1341 
1342 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1343 {
1344 #define HCLGE_MIN_TX_DESC	64
1345 #define HCLGE_MIN_RX_DESC	64
1346 
1347 	if (!is_kdump_kernel())
1348 		return;
1349 
1350 	dev_info(&hdev->pdev->dev,
1351 		 "Running kdump kernel. Using minimal resources\n");
1352 
1353 	/* minimal queue pairs equals to the number of vports */
1354 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1355 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1356 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1357 }
1358 
1359 static int hclge_configure(struct hclge_dev *hdev)
1360 {
1361 	struct hclge_cfg cfg;
1362 	unsigned int i;
1363 	int ret;
1364 
1365 	ret = hclge_get_cfg(hdev, &cfg);
1366 	if (ret)
1367 		return ret;
1368 
1369 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1370 	hdev->base_tqp_pid = 0;
1371 	hdev->rss_size_max = cfg.rss_size_max;
1372 	hdev->rx_buf_len = cfg.rx_buf_len;
1373 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1374 	hdev->hw.mac.media_type = cfg.media_type;
1375 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1376 	hdev->num_tx_desc = cfg.tqp_desc_num;
1377 	hdev->num_rx_desc = cfg.tqp_desc_num;
1378 	hdev->tm_info.num_pg = 1;
1379 	hdev->tc_max = cfg.tc_num;
1380 	hdev->tm_info.hw_pfc_map = 0;
1381 	hdev->wanted_umv_size = cfg.umv_space;
1382 
1383 	if (hnae3_dev_fd_supported(hdev)) {
1384 		hdev->fd_en = true;
1385 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1386 	}
1387 
1388 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1389 	if (ret) {
1390 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1391 			cfg.default_speed, ret);
1392 		return ret;
1393 	}
1394 
1395 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1396 
1397 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1398 
1399 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1400 	    (hdev->tc_max < 1)) {
1401 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1402 			 hdev->tc_max);
1403 		hdev->tc_max = 1;
1404 	}
1405 
1406 	/* Dev does not support DCB */
1407 	if (!hnae3_dev_dcb_supported(hdev)) {
1408 		hdev->tc_max = 1;
1409 		hdev->pfc_max = 0;
1410 	} else {
1411 		hdev->pfc_max = hdev->tc_max;
1412 	}
1413 
1414 	hdev->tm_info.num_tc = 1;
1415 
1416 	/* Currently not support uncontiuous tc */
1417 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1418 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1419 
1420 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1421 
1422 	hclge_init_kdump_kernel_config(hdev);
1423 
1424 	/* Set the init affinity based on pci func number */
1425 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1426 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1427 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1428 			&hdev->affinity_mask);
1429 
1430 	return ret;
1431 }
1432 
1433 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1434 			    u16 tso_mss_max)
1435 {
1436 	struct hclge_cfg_tso_status_cmd *req;
1437 	struct hclge_desc desc;
1438 
1439 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1440 
1441 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1442 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1443 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1444 
1445 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1446 }
1447 
1448 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1449 {
1450 	struct hclge_cfg_gro_status_cmd *req;
1451 	struct hclge_desc desc;
1452 	int ret;
1453 
1454 	if (!hnae3_dev_gro_supported(hdev))
1455 		return 0;
1456 
1457 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1458 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1459 
1460 	req->gro_en = en ? 1 : 0;
1461 
1462 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1463 	if (ret)
1464 		dev_err(&hdev->pdev->dev,
1465 			"GRO hardware config cmd failed, ret = %d\n", ret);
1466 
1467 	return ret;
1468 }
1469 
1470 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1471 {
1472 	struct hclge_tqp *tqp;
1473 	int i;
1474 
1475 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1476 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1477 	if (!hdev->htqp)
1478 		return -ENOMEM;
1479 
1480 	tqp = hdev->htqp;
1481 
1482 	for (i = 0; i < hdev->num_tqps; i++) {
1483 		tqp->dev = &hdev->pdev->dev;
1484 		tqp->index = i;
1485 
1486 		tqp->q.ae_algo = &ae_algo;
1487 		tqp->q.buf_size = hdev->rx_buf_len;
1488 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1489 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1490 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1491 			i * HCLGE_TQP_REG_SIZE;
1492 
1493 		tqp++;
1494 	}
1495 
1496 	return 0;
1497 }
1498 
1499 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1500 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1501 {
1502 	struct hclge_tqp_map_cmd *req;
1503 	struct hclge_desc desc;
1504 	int ret;
1505 
1506 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1507 
1508 	req = (struct hclge_tqp_map_cmd *)desc.data;
1509 	req->tqp_id = cpu_to_le16(tqp_pid);
1510 	req->tqp_vf = func_id;
1511 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1512 	if (!is_pf)
1513 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1514 	req->tqp_vid = cpu_to_le16(tqp_vid);
1515 
1516 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1517 	if (ret)
1518 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1519 
1520 	return ret;
1521 }
1522 
1523 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1524 {
1525 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1526 	struct hclge_dev *hdev = vport->back;
1527 	int i, alloced;
1528 
1529 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1530 	     alloced < num_tqps; i++) {
1531 		if (!hdev->htqp[i].alloced) {
1532 			hdev->htqp[i].q.handle = &vport->nic;
1533 			hdev->htqp[i].q.tqp_index = alloced;
1534 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1535 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1536 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1537 			hdev->htqp[i].alloced = true;
1538 			alloced++;
1539 		}
1540 	}
1541 	vport->alloc_tqps = alloced;
1542 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1543 				vport->alloc_tqps / hdev->tm_info.num_tc);
1544 
1545 	/* ensure one to one mapping between irq and queue at default */
1546 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1547 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1548 
1549 	return 0;
1550 }
1551 
1552 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1553 			    u16 num_tx_desc, u16 num_rx_desc)
1554 
1555 {
1556 	struct hnae3_handle *nic = &vport->nic;
1557 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1558 	struct hclge_dev *hdev = vport->back;
1559 	int ret;
1560 
1561 	kinfo->num_tx_desc = num_tx_desc;
1562 	kinfo->num_rx_desc = num_rx_desc;
1563 
1564 	kinfo->rx_buf_len = hdev->rx_buf_len;
1565 
1566 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1567 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1568 	if (!kinfo->tqp)
1569 		return -ENOMEM;
1570 
1571 	ret = hclge_assign_tqp(vport, num_tqps);
1572 	if (ret)
1573 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1574 
1575 	return ret;
1576 }
1577 
1578 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1579 				  struct hclge_vport *vport)
1580 {
1581 	struct hnae3_handle *nic = &vport->nic;
1582 	struct hnae3_knic_private_info *kinfo;
1583 	u16 i;
1584 
1585 	kinfo = &nic->kinfo;
1586 	for (i = 0; i < vport->alloc_tqps; i++) {
1587 		struct hclge_tqp *q =
1588 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1589 		bool is_pf;
1590 		int ret;
1591 
1592 		is_pf = !(vport->vport_id);
1593 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1594 					     i, is_pf);
1595 		if (ret)
1596 			return ret;
1597 	}
1598 
1599 	return 0;
1600 }
1601 
1602 static int hclge_map_tqp(struct hclge_dev *hdev)
1603 {
1604 	struct hclge_vport *vport = hdev->vport;
1605 	u16 i, num_vport;
1606 
1607 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1608 	for (i = 0; i < num_vport; i++)	{
1609 		int ret;
1610 
1611 		ret = hclge_map_tqp_to_vport(hdev, vport);
1612 		if (ret)
1613 			return ret;
1614 
1615 		vport++;
1616 	}
1617 
1618 	return 0;
1619 }
1620 
1621 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1622 {
1623 	struct hnae3_handle *nic = &vport->nic;
1624 	struct hclge_dev *hdev = vport->back;
1625 	int ret;
1626 
1627 	nic->pdev = hdev->pdev;
1628 	nic->ae_algo = &ae_algo;
1629 	nic->numa_node_mask = hdev->numa_node_mask;
1630 
1631 	ret = hclge_knic_setup(vport, num_tqps,
1632 			       hdev->num_tx_desc, hdev->num_rx_desc);
1633 	if (ret)
1634 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1635 
1636 	return ret;
1637 }
1638 
1639 static int hclge_alloc_vport(struct hclge_dev *hdev)
1640 {
1641 	struct pci_dev *pdev = hdev->pdev;
1642 	struct hclge_vport *vport;
1643 	u32 tqp_main_vport;
1644 	u32 tqp_per_vport;
1645 	int num_vport, i;
1646 	int ret;
1647 
1648 	/* We need to alloc a vport for main NIC of PF */
1649 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1650 
1651 	if (hdev->num_tqps < num_vport) {
1652 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1653 			hdev->num_tqps, num_vport);
1654 		return -EINVAL;
1655 	}
1656 
1657 	/* Alloc the same number of TQPs for every vport */
1658 	tqp_per_vport = hdev->num_tqps / num_vport;
1659 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1660 
1661 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1662 			     GFP_KERNEL);
1663 	if (!vport)
1664 		return -ENOMEM;
1665 
1666 	hdev->vport = vport;
1667 	hdev->num_alloc_vport = num_vport;
1668 
1669 	if (IS_ENABLED(CONFIG_PCI_IOV))
1670 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1671 
1672 	for (i = 0; i < num_vport; i++) {
1673 		vport->back = hdev;
1674 		vport->vport_id = i;
1675 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1676 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1677 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1678 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1679 		INIT_LIST_HEAD(&vport->vlan_list);
1680 		INIT_LIST_HEAD(&vport->uc_mac_list);
1681 		INIT_LIST_HEAD(&vport->mc_mac_list);
1682 		spin_lock_init(&vport->mac_list_lock);
1683 
1684 		if (i == 0)
1685 			ret = hclge_vport_setup(vport, tqp_main_vport);
1686 		else
1687 			ret = hclge_vport_setup(vport, tqp_per_vport);
1688 		if (ret) {
1689 			dev_err(&pdev->dev,
1690 				"vport setup failed for vport %d, %d\n",
1691 				i, ret);
1692 			return ret;
1693 		}
1694 
1695 		vport++;
1696 	}
1697 
1698 	return 0;
1699 }
1700 
1701 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1702 				    struct hclge_pkt_buf_alloc *buf_alloc)
1703 {
1704 /* TX buffer size is unit by 128 byte */
1705 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1706 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1707 	struct hclge_tx_buff_alloc_cmd *req;
1708 	struct hclge_desc desc;
1709 	int ret;
1710 	u8 i;
1711 
1712 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1713 
1714 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1715 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1716 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1717 
1718 		req->tx_pkt_buff[i] =
1719 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1720 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1721 	}
1722 
1723 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1724 	if (ret)
1725 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1726 			ret);
1727 
1728 	return ret;
1729 }
1730 
1731 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1732 				 struct hclge_pkt_buf_alloc *buf_alloc)
1733 {
1734 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1735 
1736 	if (ret)
1737 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1738 
1739 	return ret;
1740 }
1741 
1742 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1743 {
1744 	unsigned int i;
1745 	u32 cnt = 0;
1746 
1747 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1748 		if (hdev->hw_tc_map & BIT(i))
1749 			cnt++;
1750 	return cnt;
1751 }
1752 
1753 /* Get the number of pfc enabled TCs, which have private buffer */
1754 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1755 				  struct hclge_pkt_buf_alloc *buf_alloc)
1756 {
1757 	struct hclge_priv_buf *priv;
1758 	unsigned int i;
1759 	int cnt = 0;
1760 
1761 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1762 		priv = &buf_alloc->priv_buf[i];
1763 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1764 		    priv->enable)
1765 			cnt++;
1766 	}
1767 
1768 	return cnt;
1769 }
1770 
1771 /* Get the number of pfc disabled TCs, which have private buffer */
1772 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1773 				     struct hclge_pkt_buf_alloc *buf_alloc)
1774 {
1775 	struct hclge_priv_buf *priv;
1776 	unsigned int i;
1777 	int cnt = 0;
1778 
1779 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1780 		priv = &buf_alloc->priv_buf[i];
1781 		if (hdev->hw_tc_map & BIT(i) &&
1782 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1783 		    priv->enable)
1784 			cnt++;
1785 	}
1786 
1787 	return cnt;
1788 }
1789 
1790 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1791 {
1792 	struct hclge_priv_buf *priv;
1793 	u32 rx_priv = 0;
1794 	int i;
1795 
1796 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1797 		priv = &buf_alloc->priv_buf[i];
1798 		if (priv->enable)
1799 			rx_priv += priv->buf_size;
1800 	}
1801 	return rx_priv;
1802 }
1803 
1804 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1805 {
1806 	u32 i, total_tx_size = 0;
1807 
1808 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1809 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1810 
1811 	return total_tx_size;
1812 }
1813 
1814 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1815 				struct hclge_pkt_buf_alloc *buf_alloc,
1816 				u32 rx_all)
1817 {
1818 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1819 	u32 tc_num = hclge_get_tc_num(hdev);
1820 	u32 shared_buf, aligned_mps;
1821 	u32 rx_priv;
1822 	int i;
1823 
1824 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1825 
1826 	if (hnae3_dev_dcb_supported(hdev))
1827 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1828 					hdev->dv_buf_size;
1829 	else
1830 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1831 					+ hdev->dv_buf_size;
1832 
1833 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1834 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1835 			     HCLGE_BUF_SIZE_UNIT);
1836 
1837 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1838 	if (rx_all < rx_priv + shared_std)
1839 		return false;
1840 
1841 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1842 	buf_alloc->s_buf.buf_size = shared_buf;
1843 	if (hnae3_dev_dcb_supported(hdev)) {
1844 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1845 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1846 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1847 				  HCLGE_BUF_SIZE_UNIT);
1848 	} else {
1849 		buf_alloc->s_buf.self.high = aligned_mps +
1850 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1851 		buf_alloc->s_buf.self.low = aligned_mps;
1852 	}
1853 
1854 	if (hnae3_dev_dcb_supported(hdev)) {
1855 		hi_thrd = shared_buf - hdev->dv_buf_size;
1856 
1857 		if (tc_num <= NEED_RESERVE_TC_NUM)
1858 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1859 					/ BUF_MAX_PERCENT;
1860 
1861 		if (tc_num)
1862 			hi_thrd = hi_thrd / tc_num;
1863 
1864 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1865 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1866 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1867 	} else {
1868 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1869 		lo_thrd = aligned_mps;
1870 	}
1871 
1872 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1873 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1874 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1875 	}
1876 
1877 	return true;
1878 }
1879 
1880 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1881 				struct hclge_pkt_buf_alloc *buf_alloc)
1882 {
1883 	u32 i, total_size;
1884 
1885 	total_size = hdev->pkt_buf_size;
1886 
1887 	/* alloc tx buffer for all enabled tc */
1888 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1889 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1890 
1891 		if (hdev->hw_tc_map & BIT(i)) {
1892 			if (total_size < hdev->tx_buf_size)
1893 				return -ENOMEM;
1894 
1895 			priv->tx_buf_size = hdev->tx_buf_size;
1896 		} else {
1897 			priv->tx_buf_size = 0;
1898 		}
1899 
1900 		total_size -= priv->tx_buf_size;
1901 	}
1902 
1903 	return 0;
1904 }
1905 
1906 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1907 				  struct hclge_pkt_buf_alloc *buf_alloc)
1908 {
1909 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1910 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1911 	unsigned int i;
1912 
1913 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1914 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1915 
1916 		priv->enable = 0;
1917 		priv->wl.low = 0;
1918 		priv->wl.high = 0;
1919 		priv->buf_size = 0;
1920 
1921 		if (!(hdev->hw_tc_map & BIT(i)))
1922 			continue;
1923 
1924 		priv->enable = 1;
1925 
1926 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1927 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1928 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
1929 						HCLGE_BUF_SIZE_UNIT);
1930 		} else {
1931 			priv->wl.low = 0;
1932 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1933 					aligned_mps;
1934 		}
1935 
1936 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1937 	}
1938 
1939 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1940 }
1941 
1942 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1943 					  struct hclge_pkt_buf_alloc *buf_alloc)
1944 {
1945 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1946 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1947 	int i;
1948 
1949 	/* let the last to be cleared first */
1950 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1951 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1952 		unsigned int mask = BIT((unsigned int)i);
1953 
1954 		if (hdev->hw_tc_map & mask &&
1955 		    !(hdev->tm_info.hw_pfc_map & mask)) {
1956 			/* Clear the no pfc TC private buffer */
1957 			priv->wl.low = 0;
1958 			priv->wl.high = 0;
1959 			priv->buf_size = 0;
1960 			priv->enable = 0;
1961 			no_pfc_priv_num--;
1962 		}
1963 
1964 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1965 		    no_pfc_priv_num == 0)
1966 			break;
1967 	}
1968 
1969 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1970 }
1971 
1972 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1973 					struct hclge_pkt_buf_alloc *buf_alloc)
1974 {
1975 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1976 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1977 	int i;
1978 
1979 	/* let the last to be cleared first */
1980 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1981 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1982 		unsigned int mask = BIT((unsigned int)i);
1983 
1984 		if (hdev->hw_tc_map & mask &&
1985 		    hdev->tm_info.hw_pfc_map & mask) {
1986 			/* Reduce the number of pfc TC with private buffer */
1987 			priv->wl.low = 0;
1988 			priv->enable = 0;
1989 			priv->wl.high = 0;
1990 			priv->buf_size = 0;
1991 			pfc_priv_num--;
1992 		}
1993 
1994 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1995 		    pfc_priv_num == 0)
1996 			break;
1997 	}
1998 
1999 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2000 }
2001 
2002 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2003 				      struct hclge_pkt_buf_alloc *buf_alloc)
2004 {
2005 #define COMPENSATE_BUFFER	0x3C00
2006 #define COMPENSATE_HALF_MPS_NUM	5
2007 #define PRIV_WL_GAP		0x1800
2008 
2009 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2010 	u32 tc_num = hclge_get_tc_num(hdev);
2011 	u32 half_mps = hdev->mps >> 1;
2012 	u32 min_rx_priv;
2013 	unsigned int i;
2014 
2015 	if (tc_num)
2016 		rx_priv = rx_priv / tc_num;
2017 
2018 	if (tc_num <= NEED_RESERVE_TC_NUM)
2019 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2020 
2021 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2022 			COMPENSATE_HALF_MPS_NUM * half_mps;
2023 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2024 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2025 
2026 	if (rx_priv < min_rx_priv)
2027 		return false;
2028 
2029 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2030 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2031 
2032 		priv->enable = 0;
2033 		priv->wl.low = 0;
2034 		priv->wl.high = 0;
2035 		priv->buf_size = 0;
2036 
2037 		if (!(hdev->hw_tc_map & BIT(i)))
2038 			continue;
2039 
2040 		priv->enable = 1;
2041 		priv->buf_size = rx_priv;
2042 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2043 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2044 	}
2045 
2046 	buf_alloc->s_buf.buf_size = 0;
2047 
2048 	return true;
2049 }
2050 
2051 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2052  * @hdev: pointer to struct hclge_dev
2053  * @buf_alloc: pointer to buffer calculation data
2054  * @return: 0: calculate sucessful, negative: fail
2055  */
2056 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2057 				struct hclge_pkt_buf_alloc *buf_alloc)
2058 {
2059 	/* When DCB is not supported, rx private buffer is not allocated. */
2060 	if (!hnae3_dev_dcb_supported(hdev)) {
2061 		u32 rx_all = hdev->pkt_buf_size;
2062 
2063 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2064 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2065 			return -ENOMEM;
2066 
2067 		return 0;
2068 	}
2069 
2070 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2071 		return 0;
2072 
2073 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2074 		return 0;
2075 
2076 	/* try to decrease the buffer size */
2077 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2078 		return 0;
2079 
2080 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2081 		return 0;
2082 
2083 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2084 		return 0;
2085 
2086 	return -ENOMEM;
2087 }
2088 
2089 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2090 				   struct hclge_pkt_buf_alloc *buf_alloc)
2091 {
2092 	struct hclge_rx_priv_buff_cmd *req;
2093 	struct hclge_desc desc;
2094 	int ret;
2095 	int i;
2096 
2097 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2098 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2099 
2100 	/* Alloc private buffer TCs */
2101 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2102 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2103 
2104 		req->buf_num[i] =
2105 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2106 		req->buf_num[i] |=
2107 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2108 	}
2109 
2110 	req->shared_buf =
2111 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2112 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2113 
2114 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2115 	if (ret)
2116 		dev_err(&hdev->pdev->dev,
2117 			"rx private buffer alloc cmd failed %d\n", ret);
2118 
2119 	return ret;
2120 }
2121 
2122 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2123 				   struct hclge_pkt_buf_alloc *buf_alloc)
2124 {
2125 	struct hclge_rx_priv_wl_buf *req;
2126 	struct hclge_priv_buf *priv;
2127 	struct hclge_desc desc[2];
2128 	int i, j;
2129 	int ret;
2130 
2131 	for (i = 0; i < 2; i++) {
2132 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2133 					   false);
2134 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2135 
2136 		/* The first descriptor set the NEXT bit to 1 */
2137 		if (i == 0)
2138 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2139 		else
2140 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2141 
2142 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2143 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2144 
2145 			priv = &buf_alloc->priv_buf[idx];
2146 			req->tc_wl[j].high =
2147 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2148 			req->tc_wl[j].high |=
2149 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2150 			req->tc_wl[j].low =
2151 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2152 			req->tc_wl[j].low |=
2153 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2154 		}
2155 	}
2156 
2157 	/* Send 2 descriptor at one time */
2158 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2159 	if (ret)
2160 		dev_err(&hdev->pdev->dev,
2161 			"rx private waterline config cmd failed %d\n",
2162 			ret);
2163 	return ret;
2164 }
2165 
2166 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2167 				    struct hclge_pkt_buf_alloc *buf_alloc)
2168 {
2169 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2170 	struct hclge_rx_com_thrd *req;
2171 	struct hclge_desc desc[2];
2172 	struct hclge_tc_thrd *tc;
2173 	int i, j;
2174 	int ret;
2175 
2176 	for (i = 0; i < 2; i++) {
2177 		hclge_cmd_setup_basic_desc(&desc[i],
2178 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2179 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2180 
2181 		/* The first descriptor set the NEXT bit to 1 */
2182 		if (i == 0)
2183 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2184 		else
2185 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2186 
2187 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2188 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2189 
2190 			req->com_thrd[j].high =
2191 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2192 			req->com_thrd[j].high |=
2193 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2194 			req->com_thrd[j].low =
2195 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2196 			req->com_thrd[j].low |=
2197 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2198 		}
2199 	}
2200 
2201 	/* Send 2 descriptors at one time */
2202 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2203 	if (ret)
2204 		dev_err(&hdev->pdev->dev,
2205 			"common threshold config cmd failed %d\n", ret);
2206 	return ret;
2207 }
2208 
2209 static int hclge_common_wl_config(struct hclge_dev *hdev,
2210 				  struct hclge_pkt_buf_alloc *buf_alloc)
2211 {
2212 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2213 	struct hclge_rx_com_wl *req;
2214 	struct hclge_desc desc;
2215 	int ret;
2216 
2217 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2218 
2219 	req = (struct hclge_rx_com_wl *)desc.data;
2220 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2221 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2222 
2223 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2224 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2225 
2226 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2227 	if (ret)
2228 		dev_err(&hdev->pdev->dev,
2229 			"common waterline config cmd failed %d\n", ret);
2230 
2231 	return ret;
2232 }
2233 
2234 int hclge_buffer_alloc(struct hclge_dev *hdev)
2235 {
2236 	struct hclge_pkt_buf_alloc *pkt_buf;
2237 	int ret;
2238 
2239 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2240 	if (!pkt_buf)
2241 		return -ENOMEM;
2242 
2243 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2244 	if (ret) {
2245 		dev_err(&hdev->pdev->dev,
2246 			"could not calc tx buffer size for all TCs %d\n", ret);
2247 		goto out;
2248 	}
2249 
2250 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2251 	if (ret) {
2252 		dev_err(&hdev->pdev->dev,
2253 			"could not alloc tx buffers %d\n", ret);
2254 		goto out;
2255 	}
2256 
2257 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2258 	if (ret) {
2259 		dev_err(&hdev->pdev->dev,
2260 			"could not calc rx priv buffer size for all TCs %d\n",
2261 			ret);
2262 		goto out;
2263 	}
2264 
2265 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2266 	if (ret) {
2267 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2268 			ret);
2269 		goto out;
2270 	}
2271 
2272 	if (hnae3_dev_dcb_supported(hdev)) {
2273 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2274 		if (ret) {
2275 			dev_err(&hdev->pdev->dev,
2276 				"could not configure rx private waterline %d\n",
2277 				ret);
2278 			goto out;
2279 		}
2280 
2281 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2282 		if (ret) {
2283 			dev_err(&hdev->pdev->dev,
2284 				"could not configure common threshold %d\n",
2285 				ret);
2286 			goto out;
2287 		}
2288 	}
2289 
2290 	ret = hclge_common_wl_config(hdev, pkt_buf);
2291 	if (ret)
2292 		dev_err(&hdev->pdev->dev,
2293 			"could not configure common waterline %d\n", ret);
2294 
2295 out:
2296 	kfree(pkt_buf);
2297 	return ret;
2298 }
2299 
2300 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2301 {
2302 	struct hnae3_handle *roce = &vport->roce;
2303 	struct hnae3_handle *nic = &vport->nic;
2304 
2305 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2306 
2307 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2308 	    vport->back->num_msi_left == 0)
2309 		return -EINVAL;
2310 
2311 	roce->rinfo.base_vector = vport->back->roce_base_vector;
2312 
2313 	roce->rinfo.netdev = nic->kinfo.netdev;
2314 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
2315 
2316 	roce->pdev = nic->pdev;
2317 	roce->ae_algo = nic->ae_algo;
2318 	roce->numa_node_mask = nic->numa_node_mask;
2319 
2320 	return 0;
2321 }
2322 
2323 static int hclge_init_msi(struct hclge_dev *hdev)
2324 {
2325 	struct pci_dev *pdev = hdev->pdev;
2326 	int vectors;
2327 	int i;
2328 
2329 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2330 					hdev->num_msi,
2331 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2332 	if (vectors < 0) {
2333 		dev_err(&pdev->dev,
2334 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2335 			vectors);
2336 		return vectors;
2337 	}
2338 	if (vectors < hdev->num_msi)
2339 		dev_warn(&hdev->pdev->dev,
2340 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2341 			 hdev->num_msi, vectors);
2342 
2343 	hdev->num_msi = vectors;
2344 	hdev->num_msi_left = vectors;
2345 
2346 	hdev->base_msi_vector = pdev->irq;
2347 	hdev->roce_base_vector = hdev->base_msi_vector +
2348 				hdev->roce_base_msix_offset;
2349 
2350 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2351 					   sizeof(u16), GFP_KERNEL);
2352 	if (!hdev->vector_status) {
2353 		pci_free_irq_vectors(pdev);
2354 		return -ENOMEM;
2355 	}
2356 
2357 	for (i = 0; i < hdev->num_msi; i++)
2358 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2359 
2360 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2361 					sizeof(int), GFP_KERNEL);
2362 	if (!hdev->vector_irq) {
2363 		pci_free_irq_vectors(pdev);
2364 		return -ENOMEM;
2365 	}
2366 
2367 	return 0;
2368 }
2369 
2370 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2371 {
2372 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2373 		duplex = HCLGE_MAC_FULL;
2374 
2375 	return duplex;
2376 }
2377 
2378 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2379 				      u8 duplex)
2380 {
2381 	struct hclge_config_mac_speed_dup_cmd *req;
2382 	struct hclge_desc desc;
2383 	int ret;
2384 
2385 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2386 
2387 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2388 
2389 	if (duplex)
2390 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2391 
2392 	switch (speed) {
2393 	case HCLGE_MAC_SPEED_10M:
2394 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2395 				HCLGE_CFG_SPEED_S, 6);
2396 		break;
2397 	case HCLGE_MAC_SPEED_100M:
2398 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2399 				HCLGE_CFG_SPEED_S, 7);
2400 		break;
2401 	case HCLGE_MAC_SPEED_1G:
2402 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2403 				HCLGE_CFG_SPEED_S, 0);
2404 		break;
2405 	case HCLGE_MAC_SPEED_10G:
2406 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2407 				HCLGE_CFG_SPEED_S, 1);
2408 		break;
2409 	case HCLGE_MAC_SPEED_25G:
2410 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2411 				HCLGE_CFG_SPEED_S, 2);
2412 		break;
2413 	case HCLGE_MAC_SPEED_40G:
2414 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2415 				HCLGE_CFG_SPEED_S, 3);
2416 		break;
2417 	case HCLGE_MAC_SPEED_50G:
2418 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2419 				HCLGE_CFG_SPEED_S, 4);
2420 		break;
2421 	case HCLGE_MAC_SPEED_100G:
2422 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2423 				HCLGE_CFG_SPEED_S, 5);
2424 		break;
2425 	default:
2426 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2427 		return -EINVAL;
2428 	}
2429 
2430 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2431 		      1);
2432 
2433 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2434 	if (ret) {
2435 		dev_err(&hdev->pdev->dev,
2436 			"mac speed/duplex config cmd failed %d.\n", ret);
2437 		return ret;
2438 	}
2439 
2440 	return 0;
2441 }
2442 
2443 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2444 {
2445 	struct hclge_mac *mac = &hdev->hw.mac;
2446 	int ret;
2447 
2448 	duplex = hclge_check_speed_dup(duplex, speed);
2449 	if (!mac->support_autoneg && mac->speed == speed &&
2450 	    mac->duplex == duplex)
2451 		return 0;
2452 
2453 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2454 	if (ret)
2455 		return ret;
2456 
2457 	hdev->hw.mac.speed = speed;
2458 	hdev->hw.mac.duplex = duplex;
2459 
2460 	return 0;
2461 }
2462 
2463 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2464 				     u8 duplex)
2465 {
2466 	struct hclge_vport *vport = hclge_get_vport(handle);
2467 	struct hclge_dev *hdev = vport->back;
2468 
2469 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2470 }
2471 
2472 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2473 {
2474 	struct hclge_config_auto_neg_cmd *req;
2475 	struct hclge_desc desc;
2476 	u32 flag = 0;
2477 	int ret;
2478 
2479 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2480 
2481 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2482 	if (enable)
2483 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2484 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2485 
2486 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2487 	if (ret)
2488 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2489 			ret);
2490 
2491 	return ret;
2492 }
2493 
2494 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2495 {
2496 	struct hclge_vport *vport = hclge_get_vport(handle);
2497 	struct hclge_dev *hdev = vport->back;
2498 
2499 	if (!hdev->hw.mac.support_autoneg) {
2500 		if (enable) {
2501 			dev_err(&hdev->pdev->dev,
2502 				"autoneg is not supported by current port\n");
2503 			return -EOPNOTSUPP;
2504 		} else {
2505 			return 0;
2506 		}
2507 	}
2508 
2509 	return hclge_set_autoneg_en(hdev, enable);
2510 }
2511 
2512 static int hclge_get_autoneg(struct hnae3_handle *handle)
2513 {
2514 	struct hclge_vport *vport = hclge_get_vport(handle);
2515 	struct hclge_dev *hdev = vport->back;
2516 	struct phy_device *phydev = hdev->hw.mac.phydev;
2517 
2518 	if (phydev)
2519 		return phydev->autoneg;
2520 
2521 	return hdev->hw.mac.autoneg;
2522 }
2523 
2524 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2525 {
2526 	struct hclge_vport *vport = hclge_get_vport(handle);
2527 	struct hclge_dev *hdev = vport->back;
2528 	int ret;
2529 
2530 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2531 
2532 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2533 	if (ret)
2534 		return ret;
2535 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2536 }
2537 
2538 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2539 {
2540 	struct hclge_vport *vport = hclge_get_vport(handle);
2541 	struct hclge_dev *hdev = vport->back;
2542 
2543 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2544 		return hclge_set_autoneg_en(hdev, !halt);
2545 
2546 	return 0;
2547 }
2548 
2549 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2550 {
2551 	struct hclge_config_fec_cmd *req;
2552 	struct hclge_desc desc;
2553 	int ret;
2554 
2555 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2556 
2557 	req = (struct hclge_config_fec_cmd *)desc.data;
2558 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2559 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2560 	if (fec_mode & BIT(HNAE3_FEC_RS))
2561 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2562 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2563 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2564 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2565 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2566 
2567 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2568 	if (ret)
2569 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2570 
2571 	return ret;
2572 }
2573 
2574 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2575 {
2576 	struct hclge_vport *vport = hclge_get_vport(handle);
2577 	struct hclge_dev *hdev = vport->back;
2578 	struct hclge_mac *mac = &hdev->hw.mac;
2579 	int ret;
2580 
2581 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2582 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2583 		return -EINVAL;
2584 	}
2585 
2586 	ret = hclge_set_fec_hw(hdev, fec_mode);
2587 	if (ret)
2588 		return ret;
2589 
2590 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2591 	return 0;
2592 }
2593 
2594 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2595 			  u8 *fec_mode)
2596 {
2597 	struct hclge_vport *vport = hclge_get_vport(handle);
2598 	struct hclge_dev *hdev = vport->back;
2599 	struct hclge_mac *mac = &hdev->hw.mac;
2600 
2601 	if (fec_ability)
2602 		*fec_ability = mac->fec_ability;
2603 	if (fec_mode)
2604 		*fec_mode = mac->fec_mode;
2605 }
2606 
2607 static int hclge_mac_init(struct hclge_dev *hdev)
2608 {
2609 	struct hclge_mac *mac = &hdev->hw.mac;
2610 	int ret;
2611 
2612 	hdev->support_sfp_query = true;
2613 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2614 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2615 					 hdev->hw.mac.duplex);
2616 	if (ret)
2617 		return ret;
2618 
2619 	if (hdev->hw.mac.support_autoneg) {
2620 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2621 		if (ret)
2622 			return ret;
2623 	}
2624 
2625 	mac->link = 0;
2626 
2627 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2628 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2629 		if (ret)
2630 			return ret;
2631 	}
2632 
2633 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2634 	if (ret) {
2635 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2636 		return ret;
2637 	}
2638 
2639 	ret = hclge_set_default_loopback(hdev);
2640 	if (ret)
2641 		return ret;
2642 
2643 	ret = hclge_buffer_alloc(hdev);
2644 	if (ret)
2645 		dev_err(&hdev->pdev->dev,
2646 			"allocate buffer fail, ret=%d\n", ret);
2647 
2648 	return ret;
2649 }
2650 
2651 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2652 {
2653 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2654 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2655 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2656 				    hclge_wq, &hdev->service_task, 0);
2657 }
2658 
2659 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2660 {
2661 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2662 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2663 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2664 				    hclge_wq, &hdev->service_task, 0);
2665 }
2666 
2667 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2668 {
2669 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2670 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2671 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2672 				    hclge_wq, &hdev->service_task,
2673 				    delay_time);
2674 }
2675 
2676 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2677 {
2678 	struct hclge_link_status_cmd *req;
2679 	struct hclge_desc desc;
2680 	int ret;
2681 
2682 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2683 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2684 	if (ret) {
2685 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2686 			ret);
2687 		return ret;
2688 	}
2689 
2690 	req = (struct hclge_link_status_cmd *)desc.data;
2691 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2692 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2693 
2694 	return 0;
2695 }
2696 
2697 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2698 {
2699 	struct phy_device *phydev = hdev->hw.mac.phydev;
2700 
2701 	*link_status = HCLGE_LINK_STATUS_DOWN;
2702 
2703 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2704 		return 0;
2705 
2706 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2707 		return 0;
2708 
2709 	return hclge_get_mac_link_status(hdev, link_status);
2710 }
2711 
2712 static void hclge_update_link_status(struct hclge_dev *hdev)
2713 {
2714 	struct hnae3_client *rclient = hdev->roce_client;
2715 	struct hnae3_client *client = hdev->nic_client;
2716 	struct hnae3_handle *rhandle;
2717 	struct hnae3_handle *handle;
2718 	int state;
2719 	int ret;
2720 	int i;
2721 
2722 	if (!client)
2723 		return;
2724 
2725 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2726 		return;
2727 
2728 	ret = hclge_get_mac_phy_link(hdev, &state);
2729 	if (ret) {
2730 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2731 		return;
2732 	}
2733 
2734 	if (state != hdev->hw.mac.link) {
2735 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2736 			handle = &hdev->vport[i].nic;
2737 			client->ops->link_status_change(handle, state);
2738 			hclge_config_mac_tnl_int(hdev, state);
2739 			rhandle = &hdev->vport[i].roce;
2740 			if (rclient && rclient->ops->link_status_change)
2741 				rclient->ops->link_status_change(rhandle,
2742 								 state);
2743 		}
2744 		hdev->hw.mac.link = state;
2745 	}
2746 
2747 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2748 }
2749 
2750 static void hclge_update_port_capability(struct hclge_mac *mac)
2751 {
2752 	/* update fec ability by speed */
2753 	hclge_convert_setting_fec(mac);
2754 
2755 	/* firmware can not identify back plane type, the media type
2756 	 * read from configuration can help deal it
2757 	 */
2758 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2759 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2760 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2761 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2762 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2763 
2764 	if (mac->support_autoneg) {
2765 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2766 		linkmode_copy(mac->advertising, mac->supported);
2767 	} else {
2768 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2769 				   mac->supported);
2770 		linkmode_zero(mac->advertising);
2771 	}
2772 }
2773 
2774 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2775 {
2776 	struct hclge_sfp_info_cmd *resp;
2777 	struct hclge_desc desc;
2778 	int ret;
2779 
2780 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2781 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2782 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2783 	if (ret == -EOPNOTSUPP) {
2784 		dev_warn(&hdev->pdev->dev,
2785 			 "IMP do not support get SFP speed %d\n", ret);
2786 		return ret;
2787 	} else if (ret) {
2788 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2789 		return ret;
2790 	}
2791 
2792 	*speed = le32_to_cpu(resp->speed);
2793 
2794 	return 0;
2795 }
2796 
2797 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2798 {
2799 	struct hclge_sfp_info_cmd *resp;
2800 	struct hclge_desc desc;
2801 	int ret;
2802 
2803 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2804 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2805 
2806 	resp->query_type = QUERY_ACTIVE_SPEED;
2807 
2808 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2809 	if (ret == -EOPNOTSUPP) {
2810 		dev_warn(&hdev->pdev->dev,
2811 			 "IMP does not support get SFP info %d\n", ret);
2812 		return ret;
2813 	} else if (ret) {
2814 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2815 		return ret;
2816 	}
2817 
2818 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
2819 	 * set to mac->speed.
2820 	 */
2821 	if (!le32_to_cpu(resp->speed))
2822 		return 0;
2823 
2824 	mac->speed = le32_to_cpu(resp->speed);
2825 	/* if resp->speed_ability is 0, it means it's an old version
2826 	 * firmware, do not update these params
2827 	 */
2828 	if (resp->speed_ability) {
2829 		mac->module_type = le32_to_cpu(resp->module_type);
2830 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
2831 		mac->autoneg = resp->autoneg;
2832 		mac->support_autoneg = resp->autoneg_ability;
2833 		mac->speed_type = QUERY_ACTIVE_SPEED;
2834 		if (!resp->active_fec)
2835 			mac->fec_mode = 0;
2836 		else
2837 			mac->fec_mode = BIT(resp->active_fec);
2838 	} else {
2839 		mac->speed_type = QUERY_SFP_SPEED;
2840 	}
2841 
2842 	return 0;
2843 }
2844 
2845 static int hclge_update_port_info(struct hclge_dev *hdev)
2846 {
2847 	struct hclge_mac *mac = &hdev->hw.mac;
2848 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
2849 	int ret;
2850 
2851 	/* get the port info from SFP cmd if not copper port */
2852 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2853 		return 0;
2854 
2855 	/* if IMP does not support get SFP/qSFP info, return directly */
2856 	if (!hdev->support_sfp_query)
2857 		return 0;
2858 
2859 	if (hdev->pdev->revision >= 0x21)
2860 		ret = hclge_get_sfp_info(hdev, mac);
2861 	else
2862 		ret = hclge_get_sfp_speed(hdev, &speed);
2863 
2864 	if (ret == -EOPNOTSUPP) {
2865 		hdev->support_sfp_query = false;
2866 		return ret;
2867 	} else if (ret) {
2868 		return ret;
2869 	}
2870 
2871 	if (hdev->pdev->revision >= 0x21) {
2872 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2873 			hclge_update_port_capability(mac);
2874 			return 0;
2875 		}
2876 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2877 					       HCLGE_MAC_FULL);
2878 	} else {
2879 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2880 			return 0; /* do nothing if no SFP */
2881 
2882 		/* must config full duplex for SFP */
2883 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2884 	}
2885 }
2886 
2887 static int hclge_get_status(struct hnae3_handle *handle)
2888 {
2889 	struct hclge_vport *vport = hclge_get_vport(handle);
2890 	struct hclge_dev *hdev = vport->back;
2891 
2892 	hclge_update_link_status(hdev);
2893 
2894 	return hdev->hw.mac.link;
2895 }
2896 
2897 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2898 {
2899 	if (!pci_num_vf(hdev->pdev)) {
2900 		dev_err(&hdev->pdev->dev,
2901 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
2902 		return NULL;
2903 	}
2904 
2905 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2906 		dev_err(&hdev->pdev->dev,
2907 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
2908 			vf, pci_num_vf(hdev->pdev));
2909 		return NULL;
2910 	}
2911 
2912 	/* VF start from 1 in vport */
2913 	vf += HCLGE_VF_VPORT_START_NUM;
2914 	return &hdev->vport[vf];
2915 }
2916 
2917 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2918 			       struct ifla_vf_info *ivf)
2919 {
2920 	struct hclge_vport *vport = hclge_get_vport(handle);
2921 	struct hclge_dev *hdev = vport->back;
2922 
2923 	vport = hclge_get_vf_vport(hdev, vf);
2924 	if (!vport)
2925 		return -EINVAL;
2926 
2927 	ivf->vf = vf;
2928 	ivf->linkstate = vport->vf_info.link_state;
2929 	ivf->spoofchk = vport->vf_info.spoofchk;
2930 	ivf->trusted = vport->vf_info.trusted;
2931 	ivf->min_tx_rate = 0;
2932 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2933 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2934 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2935 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2936 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
2937 
2938 	return 0;
2939 }
2940 
2941 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2942 				   int link_state)
2943 {
2944 	struct hclge_vport *vport = hclge_get_vport(handle);
2945 	struct hclge_dev *hdev = vport->back;
2946 
2947 	vport = hclge_get_vf_vport(hdev, vf);
2948 	if (!vport)
2949 		return -EINVAL;
2950 
2951 	vport->vf_info.link_state = link_state;
2952 
2953 	return 0;
2954 }
2955 
2956 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2957 {
2958 	u32 cmdq_src_reg, msix_src_reg;
2959 
2960 	/* fetch the events from their corresponding regs */
2961 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2962 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2963 
2964 	/* Assumption: If by any chance reset and mailbox events are reported
2965 	 * together then we will only process reset event in this go and will
2966 	 * defer the processing of the mailbox events. Since, we would have not
2967 	 * cleared RX CMDQ event this time we would receive again another
2968 	 * interrupt from H/W just for the mailbox.
2969 	 *
2970 	 * check for vector0 reset event sources
2971 	 */
2972 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
2973 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2974 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2975 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2976 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2977 		hdev->rst_stats.imp_rst_cnt++;
2978 		return HCLGE_VECTOR0_EVENT_RST;
2979 	}
2980 
2981 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
2982 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2983 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2984 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2985 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2986 		hdev->rst_stats.global_rst_cnt++;
2987 		return HCLGE_VECTOR0_EVENT_RST;
2988 	}
2989 
2990 	/* check for vector0 msix event source */
2991 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2992 		*clearval = msix_src_reg;
2993 		return HCLGE_VECTOR0_EVENT_ERR;
2994 	}
2995 
2996 	/* check for vector0 mailbox(=CMDQ RX) event source */
2997 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2998 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2999 		*clearval = cmdq_src_reg;
3000 		return HCLGE_VECTOR0_EVENT_MBX;
3001 	}
3002 
3003 	/* print other vector0 event source */
3004 	dev_info(&hdev->pdev->dev,
3005 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3006 		 cmdq_src_reg, msix_src_reg);
3007 	*clearval = msix_src_reg;
3008 
3009 	return HCLGE_VECTOR0_EVENT_OTHER;
3010 }
3011 
3012 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3013 				    u32 regclr)
3014 {
3015 	switch (event_type) {
3016 	case HCLGE_VECTOR0_EVENT_RST:
3017 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3018 		break;
3019 	case HCLGE_VECTOR0_EVENT_MBX:
3020 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3021 		break;
3022 	default:
3023 		break;
3024 	}
3025 }
3026 
3027 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3028 {
3029 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3030 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3031 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3032 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3033 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3034 }
3035 
3036 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3037 {
3038 	writel(enable ? 1 : 0, vector->addr);
3039 }
3040 
3041 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3042 {
3043 	struct hclge_dev *hdev = data;
3044 	u32 clearval = 0;
3045 	u32 event_cause;
3046 
3047 	hclge_enable_vector(&hdev->misc_vector, false);
3048 	event_cause = hclge_check_event_cause(hdev, &clearval);
3049 
3050 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
3051 	switch (event_cause) {
3052 	case HCLGE_VECTOR0_EVENT_ERR:
3053 		/* we do not know what type of reset is required now. This could
3054 		 * only be decided after we fetch the type of errors which
3055 		 * caused this event. Therefore, we will do below for now:
3056 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3057 		 *    have defered type of reset to be used.
3058 		 * 2. Schedule the reset serivce task.
3059 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3060 		 *    will fetch the correct type of reset.  This would be done
3061 		 *    by first decoding the types of errors.
3062 		 */
3063 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3064 		/* fall through */
3065 	case HCLGE_VECTOR0_EVENT_RST:
3066 		hclge_reset_task_schedule(hdev);
3067 		break;
3068 	case HCLGE_VECTOR0_EVENT_MBX:
3069 		/* If we are here then,
3070 		 * 1. Either we are not handling any mbx task and we are not
3071 		 *    scheduled as well
3072 		 *                        OR
3073 		 * 2. We could be handling a mbx task but nothing more is
3074 		 *    scheduled.
3075 		 * In both cases, we should schedule mbx task as there are more
3076 		 * mbx messages reported by this interrupt.
3077 		 */
3078 		hclge_mbx_task_schedule(hdev);
3079 		break;
3080 	default:
3081 		dev_warn(&hdev->pdev->dev,
3082 			 "received unknown or unhandled event of vector0\n");
3083 		break;
3084 	}
3085 
3086 	hclge_clear_event_cause(hdev, event_cause, clearval);
3087 
3088 	/* Enable interrupt if it is not cause by reset. And when
3089 	 * clearval equal to 0, it means interrupt status may be
3090 	 * cleared by hardware before driver reads status register.
3091 	 * For this case, vector0 interrupt also should be enabled.
3092 	 */
3093 	if (!clearval ||
3094 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3095 		hclge_enable_vector(&hdev->misc_vector, true);
3096 	}
3097 
3098 	return IRQ_HANDLED;
3099 }
3100 
3101 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3102 {
3103 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3104 		dev_warn(&hdev->pdev->dev,
3105 			 "vector(vector_id %d) has been freed.\n", vector_id);
3106 		return;
3107 	}
3108 
3109 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3110 	hdev->num_msi_left += 1;
3111 	hdev->num_msi_used -= 1;
3112 }
3113 
3114 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3115 {
3116 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3117 
3118 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3119 
3120 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3121 	hdev->vector_status[0] = 0;
3122 
3123 	hdev->num_msi_left -= 1;
3124 	hdev->num_msi_used += 1;
3125 }
3126 
3127 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3128 				      const cpumask_t *mask)
3129 {
3130 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3131 					      affinity_notify);
3132 
3133 	cpumask_copy(&hdev->affinity_mask, mask);
3134 }
3135 
3136 static void hclge_irq_affinity_release(struct kref *ref)
3137 {
3138 }
3139 
3140 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3141 {
3142 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3143 			      &hdev->affinity_mask);
3144 
3145 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3146 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3147 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3148 				  &hdev->affinity_notify);
3149 }
3150 
3151 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3152 {
3153 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3154 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3155 }
3156 
3157 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3158 {
3159 	int ret;
3160 
3161 	hclge_get_misc_vector(hdev);
3162 
3163 	/* this would be explicitly freed in the end */
3164 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3165 		 HCLGE_NAME, pci_name(hdev->pdev));
3166 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3167 			  0, hdev->misc_vector.name, hdev);
3168 	if (ret) {
3169 		hclge_free_vector(hdev, 0);
3170 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3171 			hdev->misc_vector.vector_irq);
3172 	}
3173 
3174 	return ret;
3175 }
3176 
3177 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3178 {
3179 	free_irq(hdev->misc_vector.vector_irq, hdev);
3180 	hclge_free_vector(hdev, 0);
3181 }
3182 
3183 int hclge_notify_client(struct hclge_dev *hdev,
3184 			enum hnae3_reset_notify_type type)
3185 {
3186 	struct hnae3_client *client = hdev->nic_client;
3187 	u16 i;
3188 
3189 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3190 		return 0;
3191 
3192 	if (!client->ops->reset_notify)
3193 		return -EOPNOTSUPP;
3194 
3195 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3196 		struct hnae3_handle *handle = &hdev->vport[i].nic;
3197 		int ret;
3198 
3199 		ret = client->ops->reset_notify(handle, type);
3200 		if (ret) {
3201 			dev_err(&hdev->pdev->dev,
3202 				"notify nic client failed %d(%d)\n", type, ret);
3203 			return ret;
3204 		}
3205 	}
3206 
3207 	return 0;
3208 }
3209 
3210 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3211 				    enum hnae3_reset_notify_type type)
3212 {
3213 	struct hnae3_client *client = hdev->roce_client;
3214 	int ret = 0;
3215 	u16 i;
3216 
3217 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3218 		return 0;
3219 
3220 	if (!client->ops->reset_notify)
3221 		return -EOPNOTSUPP;
3222 
3223 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3224 		struct hnae3_handle *handle = &hdev->vport[i].roce;
3225 
3226 		ret = client->ops->reset_notify(handle, type);
3227 		if (ret) {
3228 			dev_err(&hdev->pdev->dev,
3229 				"notify roce client failed %d(%d)",
3230 				type, ret);
3231 			return ret;
3232 		}
3233 	}
3234 
3235 	return ret;
3236 }
3237 
3238 static int hclge_reset_wait(struct hclge_dev *hdev)
3239 {
3240 #define HCLGE_RESET_WATI_MS	100
3241 #define HCLGE_RESET_WAIT_CNT	350
3242 
3243 	u32 val, reg, reg_bit;
3244 	u32 cnt = 0;
3245 
3246 	switch (hdev->reset_type) {
3247 	case HNAE3_IMP_RESET:
3248 		reg = HCLGE_GLOBAL_RESET_REG;
3249 		reg_bit = HCLGE_IMP_RESET_BIT;
3250 		break;
3251 	case HNAE3_GLOBAL_RESET:
3252 		reg = HCLGE_GLOBAL_RESET_REG;
3253 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3254 		break;
3255 	case HNAE3_FUNC_RESET:
3256 		reg = HCLGE_FUN_RST_ING;
3257 		reg_bit = HCLGE_FUN_RST_ING_B;
3258 		break;
3259 	default:
3260 		dev_err(&hdev->pdev->dev,
3261 			"Wait for unsupported reset type: %d\n",
3262 			hdev->reset_type);
3263 		return -EINVAL;
3264 	}
3265 
3266 	val = hclge_read_dev(&hdev->hw, reg);
3267 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3268 		msleep(HCLGE_RESET_WATI_MS);
3269 		val = hclge_read_dev(&hdev->hw, reg);
3270 		cnt++;
3271 	}
3272 
3273 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3274 		dev_warn(&hdev->pdev->dev,
3275 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3276 		return -EBUSY;
3277 	}
3278 
3279 	return 0;
3280 }
3281 
3282 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3283 {
3284 	struct hclge_vf_rst_cmd *req;
3285 	struct hclge_desc desc;
3286 
3287 	req = (struct hclge_vf_rst_cmd *)desc.data;
3288 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3289 	req->dest_vfid = func_id;
3290 
3291 	if (reset)
3292 		req->vf_rst = 0x1;
3293 
3294 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3295 }
3296 
3297 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3298 {
3299 	int i;
3300 
3301 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3302 		struct hclge_vport *vport = &hdev->vport[i];
3303 		int ret;
3304 
3305 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3306 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3307 		if (ret) {
3308 			dev_err(&hdev->pdev->dev,
3309 				"set vf(%u) rst failed %d!\n",
3310 				vport->vport_id, ret);
3311 			return ret;
3312 		}
3313 
3314 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3315 			continue;
3316 
3317 		/* Inform VF to process the reset.
3318 		 * hclge_inform_reset_assert_to_vf may fail if VF
3319 		 * driver is not loaded.
3320 		 */
3321 		ret = hclge_inform_reset_assert_to_vf(vport);
3322 		if (ret)
3323 			dev_warn(&hdev->pdev->dev,
3324 				 "inform reset to vf(%u) failed %d!\n",
3325 				 vport->vport_id, ret);
3326 	}
3327 
3328 	return 0;
3329 }
3330 
3331 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3332 {
3333 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3334 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3335 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3336 		return;
3337 
3338 	hclge_mbx_handler(hdev);
3339 
3340 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3341 }
3342 
3343 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3344 {
3345 	struct hclge_pf_rst_sync_cmd *req;
3346 	struct hclge_desc desc;
3347 	int cnt = 0;
3348 	int ret;
3349 
3350 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3351 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3352 
3353 	do {
3354 		/* vf need to down netdev by mbx during PF or FLR reset */
3355 		hclge_mailbox_service_task(hdev);
3356 
3357 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3358 		/* for compatible with old firmware, wait
3359 		 * 100 ms for VF to stop IO
3360 		 */
3361 		if (ret == -EOPNOTSUPP) {
3362 			msleep(HCLGE_RESET_SYNC_TIME);
3363 			return;
3364 		} else if (ret) {
3365 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3366 				 ret);
3367 			return;
3368 		} else if (req->all_vf_ready) {
3369 			return;
3370 		}
3371 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3372 		hclge_cmd_reuse_desc(&desc, true);
3373 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3374 
3375 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3376 }
3377 
3378 void hclge_report_hw_error(struct hclge_dev *hdev,
3379 			   enum hnae3_hw_error_type type)
3380 {
3381 	struct hnae3_client *client = hdev->nic_client;
3382 	u16 i;
3383 
3384 	if (!client || !client->ops->process_hw_error ||
3385 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3386 		return;
3387 
3388 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3389 		client->ops->process_hw_error(&hdev->vport[i].nic, type);
3390 }
3391 
3392 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3393 {
3394 	u32 reg_val;
3395 
3396 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3397 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3398 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3399 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3400 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3401 	}
3402 
3403 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3404 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3405 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3406 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3407 	}
3408 }
3409 
3410 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3411 {
3412 	struct hclge_desc desc;
3413 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3414 	int ret;
3415 
3416 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3417 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3418 	req->fun_reset_vfid = func_id;
3419 
3420 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3421 	if (ret)
3422 		dev_err(&hdev->pdev->dev,
3423 			"send function reset cmd fail, status =%d\n", ret);
3424 
3425 	return ret;
3426 }
3427 
3428 static void hclge_do_reset(struct hclge_dev *hdev)
3429 {
3430 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3431 	struct pci_dev *pdev = hdev->pdev;
3432 	u32 val;
3433 
3434 	if (hclge_get_hw_reset_stat(handle)) {
3435 		dev_info(&pdev->dev, "hardware reset not finish\n");
3436 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3437 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3438 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3439 		return;
3440 	}
3441 
3442 	switch (hdev->reset_type) {
3443 	case HNAE3_GLOBAL_RESET:
3444 		dev_info(&pdev->dev, "global reset requested\n");
3445 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3446 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3447 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3448 		break;
3449 	case HNAE3_FUNC_RESET:
3450 		dev_info(&pdev->dev, "PF reset requested\n");
3451 		/* schedule again to check later */
3452 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3453 		hclge_reset_task_schedule(hdev);
3454 		break;
3455 	default:
3456 		dev_warn(&pdev->dev,
3457 			 "unsupported reset type: %d\n", hdev->reset_type);
3458 		break;
3459 	}
3460 }
3461 
3462 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3463 						   unsigned long *addr)
3464 {
3465 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3466 	struct hclge_dev *hdev = ae_dev->priv;
3467 
3468 	/* first, resolve any unknown reset type to the known type(s) */
3469 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3470 		u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3471 					HCLGE_MISC_VECTOR_INT_STS);
3472 		/* we will intentionally ignore any errors from this function
3473 		 *  as we will end up in *some* reset request in any case
3474 		 */
3475 		if (hclge_handle_hw_msix_error(hdev, addr))
3476 			dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3477 				 msix_sts_reg);
3478 
3479 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3480 		/* We defered the clearing of the error event which caused
3481 		 * interrupt since it was not posssible to do that in
3482 		 * interrupt context (and this is the reason we introduced
3483 		 * new UNKNOWN reset type). Now, the errors have been
3484 		 * handled and cleared in hardware we can safely enable
3485 		 * interrupts. This is an exception to the norm.
3486 		 */
3487 		hclge_enable_vector(&hdev->misc_vector, true);
3488 	}
3489 
3490 	/* return the highest priority reset level amongst all */
3491 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3492 		rst_level = HNAE3_IMP_RESET;
3493 		clear_bit(HNAE3_IMP_RESET, addr);
3494 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3495 		clear_bit(HNAE3_FUNC_RESET, addr);
3496 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3497 		rst_level = HNAE3_GLOBAL_RESET;
3498 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3499 		clear_bit(HNAE3_FUNC_RESET, addr);
3500 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3501 		rst_level = HNAE3_FUNC_RESET;
3502 		clear_bit(HNAE3_FUNC_RESET, addr);
3503 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3504 		rst_level = HNAE3_FLR_RESET;
3505 		clear_bit(HNAE3_FLR_RESET, addr);
3506 	}
3507 
3508 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3509 	    rst_level < hdev->reset_type)
3510 		return HNAE3_NONE_RESET;
3511 
3512 	return rst_level;
3513 }
3514 
3515 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3516 {
3517 	u32 clearval = 0;
3518 
3519 	switch (hdev->reset_type) {
3520 	case HNAE3_IMP_RESET:
3521 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3522 		break;
3523 	case HNAE3_GLOBAL_RESET:
3524 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3525 		break;
3526 	default:
3527 		break;
3528 	}
3529 
3530 	if (!clearval)
3531 		return;
3532 
3533 	/* For revision 0x20, the reset interrupt source
3534 	 * can only be cleared after hardware reset done
3535 	 */
3536 	if (hdev->pdev->revision == 0x20)
3537 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3538 				clearval);
3539 
3540 	hclge_enable_vector(&hdev->misc_vector, true);
3541 }
3542 
3543 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3544 {
3545 	u32 reg_val;
3546 
3547 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3548 	if (enable)
3549 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3550 	else
3551 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3552 
3553 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3554 }
3555 
3556 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3557 {
3558 	int ret;
3559 
3560 	ret = hclge_set_all_vf_rst(hdev, true);
3561 	if (ret)
3562 		return ret;
3563 
3564 	hclge_func_reset_sync_vf(hdev);
3565 
3566 	return 0;
3567 }
3568 
3569 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3570 {
3571 	u32 reg_val;
3572 	int ret = 0;
3573 
3574 	switch (hdev->reset_type) {
3575 	case HNAE3_FUNC_RESET:
3576 		ret = hclge_func_reset_notify_vf(hdev);
3577 		if (ret)
3578 			return ret;
3579 
3580 		ret = hclge_func_reset_cmd(hdev, 0);
3581 		if (ret) {
3582 			dev_err(&hdev->pdev->dev,
3583 				"asserting function reset fail %d!\n", ret);
3584 			return ret;
3585 		}
3586 
3587 		/* After performaning pf reset, it is not necessary to do the
3588 		 * mailbox handling or send any command to firmware, because
3589 		 * any mailbox handling or command to firmware is only valid
3590 		 * after hclge_cmd_init is called.
3591 		 */
3592 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3593 		hdev->rst_stats.pf_rst_cnt++;
3594 		break;
3595 	case HNAE3_FLR_RESET:
3596 		ret = hclge_func_reset_notify_vf(hdev);
3597 		if (ret)
3598 			return ret;
3599 		break;
3600 	case HNAE3_IMP_RESET:
3601 		hclge_handle_imp_error(hdev);
3602 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3603 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3604 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3605 		break;
3606 	default:
3607 		break;
3608 	}
3609 
3610 	/* inform hardware that preparatory work is done */
3611 	msleep(HCLGE_RESET_SYNC_TIME);
3612 	hclge_reset_handshake(hdev, true);
3613 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3614 
3615 	return ret;
3616 }
3617 
3618 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3619 {
3620 #define MAX_RESET_FAIL_CNT 5
3621 
3622 	if (hdev->reset_pending) {
3623 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3624 			 hdev->reset_pending);
3625 		return true;
3626 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3627 		   HCLGE_RESET_INT_M) {
3628 		dev_info(&hdev->pdev->dev,
3629 			 "reset failed because new reset interrupt\n");
3630 		hclge_clear_reset_cause(hdev);
3631 		return false;
3632 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3633 		hdev->rst_stats.reset_fail_cnt++;
3634 		set_bit(hdev->reset_type, &hdev->reset_pending);
3635 		dev_info(&hdev->pdev->dev,
3636 			 "re-schedule reset task(%u)\n",
3637 			 hdev->rst_stats.reset_fail_cnt);
3638 		return true;
3639 	}
3640 
3641 	hclge_clear_reset_cause(hdev);
3642 
3643 	/* recover the handshake status when reset fail */
3644 	hclge_reset_handshake(hdev, true);
3645 
3646 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3647 
3648 	hclge_dbg_dump_rst_info(hdev);
3649 
3650 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3651 
3652 	return false;
3653 }
3654 
3655 static int hclge_set_rst_done(struct hclge_dev *hdev)
3656 {
3657 	struct hclge_pf_rst_done_cmd *req;
3658 	struct hclge_desc desc;
3659 	int ret;
3660 
3661 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
3662 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3663 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3664 
3665 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3666 	/* To be compatible with the old firmware, which does not support
3667 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3668 	 * return success
3669 	 */
3670 	if (ret == -EOPNOTSUPP) {
3671 		dev_warn(&hdev->pdev->dev,
3672 			 "current firmware does not support command(0x%x)!\n",
3673 			 HCLGE_OPC_PF_RST_DONE);
3674 		return 0;
3675 	} else if (ret) {
3676 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3677 			ret);
3678 	}
3679 
3680 	return ret;
3681 }
3682 
3683 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3684 {
3685 	int ret = 0;
3686 
3687 	switch (hdev->reset_type) {
3688 	case HNAE3_FUNC_RESET:
3689 		/* fall through */
3690 	case HNAE3_FLR_RESET:
3691 		ret = hclge_set_all_vf_rst(hdev, false);
3692 		break;
3693 	case HNAE3_GLOBAL_RESET:
3694 		/* fall through */
3695 	case HNAE3_IMP_RESET:
3696 		ret = hclge_set_rst_done(hdev);
3697 		break;
3698 	default:
3699 		break;
3700 	}
3701 
3702 	/* clear up the handshake status after re-initialize done */
3703 	hclge_reset_handshake(hdev, false);
3704 
3705 	return ret;
3706 }
3707 
3708 static int hclge_reset_stack(struct hclge_dev *hdev)
3709 {
3710 	int ret;
3711 
3712 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3713 	if (ret)
3714 		return ret;
3715 
3716 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3717 	if (ret)
3718 		return ret;
3719 
3720 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3721 }
3722 
3723 static int hclge_reset_prepare(struct hclge_dev *hdev)
3724 {
3725 	int ret;
3726 
3727 	hdev->rst_stats.reset_cnt++;
3728 	/* perform reset of the stack & ae device for a client */
3729 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3730 	if (ret)
3731 		return ret;
3732 
3733 	rtnl_lock();
3734 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3735 	rtnl_unlock();
3736 	if (ret)
3737 		return ret;
3738 
3739 	return hclge_reset_prepare_wait(hdev);
3740 }
3741 
3742 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3743 {
3744 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3745 	enum hnae3_reset_type reset_level;
3746 	int ret;
3747 
3748 	hdev->rst_stats.hw_reset_done_cnt++;
3749 
3750 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3751 	if (ret)
3752 		return ret;
3753 
3754 	rtnl_lock();
3755 	ret = hclge_reset_stack(hdev);
3756 	rtnl_unlock();
3757 	if (ret)
3758 		return ret;
3759 
3760 	hclge_clear_reset_cause(hdev);
3761 
3762 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3763 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3764 	 * times
3765 	 */
3766 	if (ret &&
3767 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3768 		return ret;
3769 
3770 	ret = hclge_reset_prepare_up(hdev);
3771 	if (ret)
3772 		return ret;
3773 
3774 	rtnl_lock();
3775 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3776 	rtnl_unlock();
3777 	if (ret)
3778 		return ret;
3779 
3780 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3781 	if (ret)
3782 		return ret;
3783 
3784 	hdev->last_reset_time = jiffies;
3785 	hdev->rst_stats.reset_fail_cnt = 0;
3786 	hdev->rst_stats.reset_done_cnt++;
3787 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3788 
3789 	/* if default_reset_request has a higher level reset request,
3790 	 * it should be handled as soon as possible. since some errors
3791 	 * need this kind of reset to fix.
3792 	 */
3793 	reset_level = hclge_get_reset_level(ae_dev,
3794 					    &hdev->default_reset_request);
3795 	if (reset_level != HNAE3_NONE_RESET)
3796 		set_bit(reset_level, &hdev->reset_request);
3797 
3798 	return 0;
3799 }
3800 
3801 static void hclge_reset(struct hclge_dev *hdev)
3802 {
3803 	if (hclge_reset_prepare(hdev))
3804 		goto err_reset;
3805 
3806 	if (hclge_reset_wait(hdev))
3807 		goto err_reset;
3808 
3809 	if (hclge_reset_rebuild(hdev))
3810 		goto err_reset;
3811 
3812 	return;
3813 
3814 err_reset:
3815 	if (hclge_reset_err_handle(hdev))
3816 		hclge_reset_task_schedule(hdev);
3817 }
3818 
3819 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3820 {
3821 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3822 	struct hclge_dev *hdev = ae_dev->priv;
3823 
3824 	/* We might end up getting called broadly because of 2 below cases:
3825 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3826 	 *    normalcy is to reset.
3827 	 * 2. A new reset request from the stack due to timeout
3828 	 *
3829 	 * For the first case,error event might not have ae handle available.
3830 	 * check if this is a new reset request and we are not here just because
3831 	 * last reset attempt did not succeed and watchdog hit us again. We will
3832 	 * know this if last reset request did not occur very recently (watchdog
3833 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3834 	 * In case of new request we reset the "reset level" to PF reset.
3835 	 * And if it is a repeat reset request of the most recent one then we
3836 	 * want to make sure we throttle the reset request. Therefore, we will
3837 	 * not allow it again before 3*HZ times.
3838 	 */
3839 	if (!handle)
3840 		handle = &hdev->vport[0].nic;
3841 
3842 	if (time_before(jiffies, (hdev->last_reset_time +
3843 				  HCLGE_RESET_INTERVAL))) {
3844 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3845 		return;
3846 	} else if (hdev->default_reset_request) {
3847 		hdev->reset_level =
3848 			hclge_get_reset_level(ae_dev,
3849 					      &hdev->default_reset_request);
3850 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3851 		hdev->reset_level = HNAE3_FUNC_RESET;
3852 	}
3853 
3854 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3855 		 hdev->reset_level);
3856 
3857 	/* request reset & schedule reset task */
3858 	set_bit(hdev->reset_level, &hdev->reset_request);
3859 	hclge_reset_task_schedule(hdev);
3860 
3861 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3862 		hdev->reset_level++;
3863 }
3864 
3865 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3866 					enum hnae3_reset_type rst_type)
3867 {
3868 	struct hclge_dev *hdev = ae_dev->priv;
3869 
3870 	set_bit(rst_type, &hdev->default_reset_request);
3871 }
3872 
3873 static void hclge_reset_timer(struct timer_list *t)
3874 {
3875 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3876 
3877 	/* if default_reset_request has no value, it means that this reset
3878 	 * request has already be handled, so just return here
3879 	 */
3880 	if (!hdev->default_reset_request)
3881 		return;
3882 
3883 	dev_info(&hdev->pdev->dev,
3884 		 "triggering reset in reset timer\n");
3885 	hclge_reset_event(hdev->pdev, NULL);
3886 }
3887 
3888 static void hclge_reset_subtask(struct hclge_dev *hdev)
3889 {
3890 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3891 
3892 	/* check if there is any ongoing reset in the hardware. This status can
3893 	 * be checked from reset_pending. If there is then, we need to wait for
3894 	 * hardware to complete reset.
3895 	 *    a. If we are able to figure out in reasonable time that hardware
3896 	 *       has fully resetted then, we can proceed with driver, client
3897 	 *       reset.
3898 	 *    b. else, we can come back later to check this status so re-sched
3899 	 *       now.
3900 	 */
3901 	hdev->last_reset_time = jiffies;
3902 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3903 	if (hdev->reset_type != HNAE3_NONE_RESET)
3904 		hclge_reset(hdev);
3905 
3906 	/* check if we got any *new* reset requests to be honored */
3907 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3908 	if (hdev->reset_type != HNAE3_NONE_RESET)
3909 		hclge_do_reset(hdev);
3910 
3911 	hdev->reset_type = HNAE3_NONE_RESET;
3912 }
3913 
3914 static void hclge_reset_service_task(struct hclge_dev *hdev)
3915 {
3916 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3917 		return;
3918 
3919 	down(&hdev->reset_sem);
3920 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3921 
3922 	hclge_reset_subtask(hdev);
3923 
3924 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3925 	up(&hdev->reset_sem);
3926 }
3927 
3928 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3929 {
3930 	int i;
3931 
3932 	/* start from vport 1 for PF is always alive */
3933 	for (i = 1; i < hdev->num_alloc_vport; i++) {
3934 		struct hclge_vport *vport = &hdev->vport[i];
3935 
3936 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3937 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3938 
3939 		/* If vf is not alive, set to default value */
3940 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3941 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3942 	}
3943 }
3944 
3945 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3946 {
3947 	unsigned long delta = round_jiffies_relative(HZ);
3948 
3949 	/* Always handle the link updating to make sure link state is
3950 	 * updated when it is triggered by mbx.
3951 	 */
3952 	hclge_update_link_status(hdev);
3953 	hclge_sync_mac_table(hdev);
3954 	hclge_sync_promisc_mode(hdev);
3955 
3956 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3957 		delta = jiffies - hdev->last_serv_processed;
3958 
3959 		if (delta < round_jiffies_relative(HZ)) {
3960 			delta = round_jiffies_relative(HZ) - delta;
3961 			goto out;
3962 		}
3963 	}
3964 
3965 	hdev->serv_processed_cnt++;
3966 	hclge_update_vport_alive(hdev);
3967 
3968 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3969 		hdev->last_serv_processed = jiffies;
3970 		goto out;
3971 	}
3972 
3973 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3974 		hclge_update_stats_for_all(hdev);
3975 
3976 	hclge_update_port_info(hdev);
3977 	hclge_sync_vlan_filter(hdev);
3978 
3979 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
3980 		hclge_rfs_filter_expire(hdev);
3981 
3982 	hdev->last_serv_processed = jiffies;
3983 
3984 out:
3985 	hclge_task_schedule(hdev, delta);
3986 }
3987 
3988 static void hclge_service_task(struct work_struct *work)
3989 {
3990 	struct hclge_dev *hdev =
3991 		container_of(work, struct hclge_dev, service_task.work);
3992 
3993 	hclge_reset_service_task(hdev);
3994 	hclge_mailbox_service_task(hdev);
3995 	hclge_periodic_service_task(hdev);
3996 
3997 	/* Handle reset and mbx again in case periodical task delays the
3998 	 * handling by calling hclge_task_schedule() in
3999 	 * hclge_periodic_service_task().
4000 	 */
4001 	hclge_reset_service_task(hdev);
4002 	hclge_mailbox_service_task(hdev);
4003 }
4004 
4005 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4006 {
4007 	/* VF handle has no client */
4008 	if (!handle->client)
4009 		return container_of(handle, struct hclge_vport, nic);
4010 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4011 		return container_of(handle, struct hclge_vport, roce);
4012 	else
4013 		return container_of(handle, struct hclge_vport, nic);
4014 }
4015 
4016 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4017 			    struct hnae3_vector_info *vector_info)
4018 {
4019 	struct hclge_vport *vport = hclge_get_vport(handle);
4020 	struct hnae3_vector_info *vector = vector_info;
4021 	struct hclge_dev *hdev = vport->back;
4022 	int alloc = 0;
4023 	int i, j;
4024 
4025 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4026 	vector_num = min(hdev->num_msi_left, vector_num);
4027 
4028 	for (j = 0; j < vector_num; j++) {
4029 		for (i = 1; i < hdev->num_msi; i++) {
4030 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4031 				vector->vector = pci_irq_vector(hdev->pdev, i);
4032 				vector->io_addr = hdev->hw.io_base +
4033 					HCLGE_VECTOR_REG_BASE +
4034 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
4035 					vport->vport_id *
4036 					HCLGE_VECTOR_VF_OFFSET;
4037 				hdev->vector_status[i] = vport->vport_id;
4038 				hdev->vector_irq[i] = vector->vector;
4039 
4040 				vector++;
4041 				alloc++;
4042 
4043 				break;
4044 			}
4045 		}
4046 	}
4047 	hdev->num_msi_left -= alloc;
4048 	hdev->num_msi_used += alloc;
4049 
4050 	return alloc;
4051 }
4052 
4053 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4054 {
4055 	int i;
4056 
4057 	for (i = 0; i < hdev->num_msi; i++)
4058 		if (vector == hdev->vector_irq[i])
4059 			return i;
4060 
4061 	return -EINVAL;
4062 }
4063 
4064 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4065 {
4066 	struct hclge_vport *vport = hclge_get_vport(handle);
4067 	struct hclge_dev *hdev = vport->back;
4068 	int vector_id;
4069 
4070 	vector_id = hclge_get_vector_index(hdev, vector);
4071 	if (vector_id < 0) {
4072 		dev_err(&hdev->pdev->dev,
4073 			"Get vector index fail. vector = %d\n", vector);
4074 		return vector_id;
4075 	}
4076 
4077 	hclge_free_vector(hdev, vector_id);
4078 
4079 	return 0;
4080 }
4081 
4082 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4083 {
4084 	return HCLGE_RSS_KEY_SIZE;
4085 }
4086 
4087 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4088 {
4089 	return HCLGE_RSS_IND_TBL_SIZE;
4090 }
4091 
4092 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4093 				  const u8 hfunc, const u8 *key)
4094 {
4095 	struct hclge_rss_config_cmd *req;
4096 	unsigned int key_offset = 0;
4097 	struct hclge_desc desc;
4098 	int key_counts;
4099 	int key_size;
4100 	int ret;
4101 
4102 	key_counts = HCLGE_RSS_KEY_SIZE;
4103 	req = (struct hclge_rss_config_cmd *)desc.data;
4104 
4105 	while (key_counts) {
4106 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4107 					   false);
4108 
4109 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4110 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4111 
4112 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4113 		memcpy(req->hash_key,
4114 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4115 
4116 		key_counts -= key_size;
4117 		key_offset++;
4118 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4119 		if (ret) {
4120 			dev_err(&hdev->pdev->dev,
4121 				"Configure RSS config fail, status = %d\n",
4122 				ret);
4123 			return ret;
4124 		}
4125 	}
4126 	return 0;
4127 }
4128 
4129 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4130 {
4131 	struct hclge_rss_indirection_table_cmd *req;
4132 	struct hclge_desc desc;
4133 	int i, j;
4134 	int ret;
4135 
4136 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4137 
4138 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4139 		hclge_cmd_setup_basic_desc
4140 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4141 
4142 		req->start_table_index =
4143 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4144 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4145 
4146 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4147 			req->rss_result[j] =
4148 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4149 
4150 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4151 		if (ret) {
4152 			dev_err(&hdev->pdev->dev,
4153 				"Configure rss indir table fail,status = %d\n",
4154 				ret);
4155 			return ret;
4156 		}
4157 	}
4158 	return 0;
4159 }
4160 
4161 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4162 				 u16 *tc_size, u16 *tc_offset)
4163 {
4164 	struct hclge_rss_tc_mode_cmd *req;
4165 	struct hclge_desc desc;
4166 	int ret;
4167 	int i;
4168 
4169 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4170 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4171 
4172 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4173 		u16 mode = 0;
4174 
4175 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4176 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4177 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4178 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4179 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4180 
4181 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4182 	}
4183 
4184 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4185 	if (ret)
4186 		dev_err(&hdev->pdev->dev,
4187 			"Configure rss tc mode fail, status = %d\n", ret);
4188 
4189 	return ret;
4190 }
4191 
4192 static void hclge_get_rss_type(struct hclge_vport *vport)
4193 {
4194 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4195 	    vport->rss_tuple_sets.ipv4_udp_en ||
4196 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4197 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4198 	    vport->rss_tuple_sets.ipv6_udp_en ||
4199 	    vport->rss_tuple_sets.ipv6_sctp_en)
4200 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4201 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4202 		 vport->rss_tuple_sets.ipv6_fragment_en)
4203 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4204 	else
4205 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4206 }
4207 
4208 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4209 {
4210 	struct hclge_rss_input_tuple_cmd *req;
4211 	struct hclge_desc desc;
4212 	int ret;
4213 
4214 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4215 
4216 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4217 
4218 	/* Get the tuple cfg from pf */
4219 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4220 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4221 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4222 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4223 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4224 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4225 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4226 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4227 	hclge_get_rss_type(&hdev->vport[0]);
4228 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4229 	if (ret)
4230 		dev_err(&hdev->pdev->dev,
4231 			"Configure rss input fail, status = %d\n", ret);
4232 	return ret;
4233 }
4234 
4235 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4236 			 u8 *key, u8 *hfunc)
4237 {
4238 	struct hclge_vport *vport = hclge_get_vport(handle);
4239 	int i;
4240 
4241 	/* Get hash algorithm */
4242 	if (hfunc) {
4243 		switch (vport->rss_algo) {
4244 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4245 			*hfunc = ETH_RSS_HASH_TOP;
4246 			break;
4247 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4248 			*hfunc = ETH_RSS_HASH_XOR;
4249 			break;
4250 		default:
4251 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4252 			break;
4253 		}
4254 	}
4255 
4256 	/* Get the RSS Key required by the user */
4257 	if (key)
4258 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4259 
4260 	/* Get indirect table */
4261 	if (indir)
4262 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4263 			indir[i] =  vport->rss_indirection_tbl[i];
4264 
4265 	return 0;
4266 }
4267 
4268 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4269 			 const  u8 *key, const  u8 hfunc)
4270 {
4271 	struct hclge_vport *vport = hclge_get_vport(handle);
4272 	struct hclge_dev *hdev = vport->back;
4273 	u8 hash_algo;
4274 	int ret, i;
4275 
4276 	/* Set the RSS Hash Key if specififed by the user */
4277 	if (key) {
4278 		switch (hfunc) {
4279 		case ETH_RSS_HASH_TOP:
4280 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4281 			break;
4282 		case ETH_RSS_HASH_XOR:
4283 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4284 			break;
4285 		case ETH_RSS_HASH_NO_CHANGE:
4286 			hash_algo = vport->rss_algo;
4287 			break;
4288 		default:
4289 			return -EINVAL;
4290 		}
4291 
4292 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4293 		if (ret)
4294 			return ret;
4295 
4296 		/* Update the shadow RSS key with user specified qids */
4297 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4298 		vport->rss_algo = hash_algo;
4299 	}
4300 
4301 	/* Update the shadow RSS table with user specified qids */
4302 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4303 		vport->rss_indirection_tbl[i] = indir[i];
4304 
4305 	/* Update the hardware */
4306 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4307 }
4308 
4309 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4310 {
4311 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4312 
4313 	if (nfc->data & RXH_L4_B_2_3)
4314 		hash_sets |= HCLGE_D_PORT_BIT;
4315 	else
4316 		hash_sets &= ~HCLGE_D_PORT_BIT;
4317 
4318 	if (nfc->data & RXH_IP_SRC)
4319 		hash_sets |= HCLGE_S_IP_BIT;
4320 	else
4321 		hash_sets &= ~HCLGE_S_IP_BIT;
4322 
4323 	if (nfc->data & RXH_IP_DST)
4324 		hash_sets |= HCLGE_D_IP_BIT;
4325 	else
4326 		hash_sets &= ~HCLGE_D_IP_BIT;
4327 
4328 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4329 		hash_sets |= HCLGE_V_TAG_BIT;
4330 
4331 	return hash_sets;
4332 }
4333 
4334 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4335 			       struct ethtool_rxnfc *nfc)
4336 {
4337 	struct hclge_vport *vport = hclge_get_vport(handle);
4338 	struct hclge_dev *hdev = vport->back;
4339 	struct hclge_rss_input_tuple_cmd *req;
4340 	struct hclge_desc desc;
4341 	u8 tuple_sets;
4342 	int ret;
4343 
4344 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4345 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4346 		return -EINVAL;
4347 
4348 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4349 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4350 
4351 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4352 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4353 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4354 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4355 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4356 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4357 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4358 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4359 
4360 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4361 	switch (nfc->flow_type) {
4362 	case TCP_V4_FLOW:
4363 		req->ipv4_tcp_en = tuple_sets;
4364 		break;
4365 	case TCP_V6_FLOW:
4366 		req->ipv6_tcp_en = tuple_sets;
4367 		break;
4368 	case UDP_V4_FLOW:
4369 		req->ipv4_udp_en = tuple_sets;
4370 		break;
4371 	case UDP_V6_FLOW:
4372 		req->ipv6_udp_en = tuple_sets;
4373 		break;
4374 	case SCTP_V4_FLOW:
4375 		req->ipv4_sctp_en = tuple_sets;
4376 		break;
4377 	case SCTP_V6_FLOW:
4378 		if ((nfc->data & RXH_L4_B_0_1) ||
4379 		    (nfc->data & RXH_L4_B_2_3))
4380 			return -EINVAL;
4381 
4382 		req->ipv6_sctp_en = tuple_sets;
4383 		break;
4384 	case IPV4_FLOW:
4385 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4386 		break;
4387 	case IPV6_FLOW:
4388 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4389 		break;
4390 	default:
4391 		return -EINVAL;
4392 	}
4393 
4394 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4395 	if (ret) {
4396 		dev_err(&hdev->pdev->dev,
4397 			"Set rss tuple fail, status = %d\n", ret);
4398 		return ret;
4399 	}
4400 
4401 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4402 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4403 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4404 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4405 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4406 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4407 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4408 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4409 	hclge_get_rss_type(vport);
4410 	return 0;
4411 }
4412 
4413 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4414 			       struct ethtool_rxnfc *nfc)
4415 {
4416 	struct hclge_vport *vport = hclge_get_vport(handle);
4417 	u8 tuple_sets;
4418 
4419 	nfc->data = 0;
4420 
4421 	switch (nfc->flow_type) {
4422 	case TCP_V4_FLOW:
4423 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4424 		break;
4425 	case UDP_V4_FLOW:
4426 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4427 		break;
4428 	case TCP_V6_FLOW:
4429 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4430 		break;
4431 	case UDP_V6_FLOW:
4432 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4433 		break;
4434 	case SCTP_V4_FLOW:
4435 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4436 		break;
4437 	case SCTP_V6_FLOW:
4438 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4439 		break;
4440 	case IPV4_FLOW:
4441 	case IPV6_FLOW:
4442 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4443 		break;
4444 	default:
4445 		return -EINVAL;
4446 	}
4447 
4448 	if (!tuple_sets)
4449 		return 0;
4450 
4451 	if (tuple_sets & HCLGE_D_PORT_BIT)
4452 		nfc->data |= RXH_L4_B_2_3;
4453 	if (tuple_sets & HCLGE_S_PORT_BIT)
4454 		nfc->data |= RXH_L4_B_0_1;
4455 	if (tuple_sets & HCLGE_D_IP_BIT)
4456 		nfc->data |= RXH_IP_DST;
4457 	if (tuple_sets & HCLGE_S_IP_BIT)
4458 		nfc->data |= RXH_IP_SRC;
4459 
4460 	return 0;
4461 }
4462 
4463 static int hclge_get_tc_size(struct hnae3_handle *handle)
4464 {
4465 	struct hclge_vport *vport = hclge_get_vport(handle);
4466 	struct hclge_dev *hdev = vport->back;
4467 
4468 	return hdev->rss_size_max;
4469 }
4470 
4471 int hclge_rss_init_hw(struct hclge_dev *hdev)
4472 {
4473 	struct hclge_vport *vport = hdev->vport;
4474 	u8 *rss_indir = vport[0].rss_indirection_tbl;
4475 	u16 rss_size = vport[0].alloc_rss_size;
4476 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4477 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4478 	u8 *key = vport[0].rss_hash_key;
4479 	u8 hfunc = vport[0].rss_algo;
4480 	u16 tc_valid[HCLGE_MAX_TC_NUM];
4481 	u16 roundup_size;
4482 	unsigned int i;
4483 	int ret;
4484 
4485 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4486 	if (ret)
4487 		return ret;
4488 
4489 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4490 	if (ret)
4491 		return ret;
4492 
4493 	ret = hclge_set_rss_input_tuple(hdev);
4494 	if (ret)
4495 		return ret;
4496 
4497 	/* Each TC have the same queue size, and tc_size set to hardware is
4498 	 * the log2 of roundup power of two of rss_size, the acutal queue
4499 	 * size is limited by indirection table.
4500 	 */
4501 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4502 		dev_err(&hdev->pdev->dev,
4503 			"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4504 			rss_size);
4505 		return -EINVAL;
4506 	}
4507 
4508 	roundup_size = roundup_pow_of_two(rss_size);
4509 	roundup_size = ilog2(roundup_size);
4510 
4511 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4512 		tc_valid[i] = 0;
4513 
4514 		if (!(hdev->hw_tc_map & BIT(i)))
4515 			continue;
4516 
4517 		tc_valid[i] = 1;
4518 		tc_size[i] = roundup_size;
4519 		tc_offset[i] = rss_size * i;
4520 	}
4521 
4522 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4523 }
4524 
4525 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4526 {
4527 	struct hclge_vport *vport = hdev->vport;
4528 	int i, j;
4529 
4530 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4531 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4532 			vport[j].rss_indirection_tbl[i] =
4533 				i % vport[j].alloc_rss_size;
4534 	}
4535 }
4536 
4537 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4538 {
4539 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4540 	struct hclge_vport *vport = hdev->vport;
4541 
4542 	if (hdev->pdev->revision >= 0x21)
4543 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4544 
4545 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4546 		vport[i].rss_tuple_sets.ipv4_tcp_en =
4547 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4548 		vport[i].rss_tuple_sets.ipv4_udp_en =
4549 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4550 		vport[i].rss_tuple_sets.ipv4_sctp_en =
4551 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4552 		vport[i].rss_tuple_sets.ipv4_fragment_en =
4553 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4554 		vport[i].rss_tuple_sets.ipv6_tcp_en =
4555 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4556 		vport[i].rss_tuple_sets.ipv6_udp_en =
4557 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4558 		vport[i].rss_tuple_sets.ipv6_sctp_en =
4559 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4560 		vport[i].rss_tuple_sets.ipv6_fragment_en =
4561 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4562 
4563 		vport[i].rss_algo = rss_algo;
4564 
4565 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
4566 		       HCLGE_RSS_KEY_SIZE);
4567 	}
4568 
4569 	hclge_rss_indir_init_cfg(hdev);
4570 }
4571 
4572 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4573 				int vector_id, bool en,
4574 				struct hnae3_ring_chain_node *ring_chain)
4575 {
4576 	struct hclge_dev *hdev = vport->back;
4577 	struct hnae3_ring_chain_node *node;
4578 	struct hclge_desc desc;
4579 	struct hclge_ctrl_vector_chain_cmd *req =
4580 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
4581 	enum hclge_cmd_status status;
4582 	enum hclge_opcode_type op;
4583 	u16 tqp_type_and_id;
4584 	int i;
4585 
4586 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4587 	hclge_cmd_setup_basic_desc(&desc, op, false);
4588 	req->int_vector_id = vector_id;
4589 
4590 	i = 0;
4591 	for (node = ring_chain; node; node = node->next) {
4592 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4593 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4594 				HCLGE_INT_TYPE_S,
4595 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4596 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4597 				HCLGE_TQP_ID_S, node->tqp_index);
4598 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4599 				HCLGE_INT_GL_IDX_S,
4600 				hnae3_get_field(node->int_gl_idx,
4601 						HNAE3_RING_GL_IDX_M,
4602 						HNAE3_RING_GL_IDX_S));
4603 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4604 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4605 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4606 			req->vfid = vport->vport_id;
4607 
4608 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
4609 			if (status) {
4610 				dev_err(&hdev->pdev->dev,
4611 					"Map TQP fail, status is %d.\n",
4612 					status);
4613 				return -EIO;
4614 			}
4615 			i = 0;
4616 
4617 			hclge_cmd_setup_basic_desc(&desc,
4618 						   op,
4619 						   false);
4620 			req->int_vector_id = vector_id;
4621 		}
4622 	}
4623 
4624 	if (i > 0) {
4625 		req->int_cause_num = i;
4626 		req->vfid = vport->vport_id;
4627 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
4628 		if (status) {
4629 			dev_err(&hdev->pdev->dev,
4630 				"Map TQP fail, status is %d.\n", status);
4631 			return -EIO;
4632 		}
4633 	}
4634 
4635 	return 0;
4636 }
4637 
4638 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4639 				    struct hnae3_ring_chain_node *ring_chain)
4640 {
4641 	struct hclge_vport *vport = hclge_get_vport(handle);
4642 	struct hclge_dev *hdev = vport->back;
4643 	int vector_id;
4644 
4645 	vector_id = hclge_get_vector_index(hdev, vector);
4646 	if (vector_id < 0) {
4647 		dev_err(&hdev->pdev->dev,
4648 			"failed to get vector index. vector=%d\n", vector);
4649 		return vector_id;
4650 	}
4651 
4652 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4653 }
4654 
4655 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4656 				       struct hnae3_ring_chain_node *ring_chain)
4657 {
4658 	struct hclge_vport *vport = hclge_get_vport(handle);
4659 	struct hclge_dev *hdev = vport->back;
4660 	int vector_id, ret;
4661 
4662 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4663 		return 0;
4664 
4665 	vector_id = hclge_get_vector_index(hdev, vector);
4666 	if (vector_id < 0) {
4667 		dev_err(&handle->pdev->dev,
4668 			"Get vector index fail. ret =%d\n", vector_id);
4669 		return vector_id;
4670 	}
4671 
4672 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4673 	if (ret)
4674 		dev_err(&handle->pdev->dev,
4675 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4676 			vector_id, ret);
4677 
4678 	return ret;
4679 }
4680 
4681 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4682 				      struct hclge_promisc_param *param)
4683 {
4684 	struct hclge_promisc_cfg_cmd *req;
4685 	struct hclge_desc desc;
4686 	int ret;
4687 
4688 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4689 
4690 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
4691 	req->vf_id = param->vf_id;
4692 
4693 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4694 	 * pdev revision(0x20), new revision support them. The
4695 	 * value of this two fields will not return error when driver
4696 	 * send command to fireware in revision(0x20).
4697 	 */
4698 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4699 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4700 
4701 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4702 	if (ret)
4703 		dev_err(&hdev->pdev->dev,
4704 			"failed to set vport %d promisc mode, ret = %d.\n",
4705 			param->vf_id, ret);
4706 
4707 	return ret;
4708 }
4709 
4710 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4711 				     bool en_uc, bool en_mc, bool en_bc,
4712 				     int vport_id)
4713 {
4714 	if (!param)
4715 		return;
4716 
4717 	memset(param, 0, sizeof(struct hclge_promisc_param));
4718 	if (en_uc)
4719 		param->enable = HCLGE_PROMISC_EN_UC;
4720 	if (en_mc)
4721 		param->enable |= HCLGE_PROMISC_EN_MC;
4722 	if (en_bc)
4723 		param->enable |= HCLGE_PROMISC_EN_BC;
4724 	param->vf_id = vport_id;
4725 }
4726 
4727 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4728 				 bool en_mc_pmc, bool en_bc_pmc)
4729 {
4730 	struct hclge_dev *hdev = vport->back;
4731 	struct hclge_promisc_param param;
4732 
4733 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4734 				 vport->vport_id);
4735 	return hclge_cmd_set_promisc_mode(hdev, &param);
4736 }
4737 
4738 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4739 				  bool en_mc_pmc)
4740 {
4741 	struct hclge_vport *vport = hclge_get_vport(handle);
4742 	bool en_bc_pmc = true;
4743 
4744 	/* For revision 0x20, if broadcast promisc enabled, vlan filter is
4745 	 * always bypassed. So broadcast promisc should be disabled until
4746 	 * user enable promisc mode
4747 	 */
4748 	if (handle->pdev->revision == 0x20)
4749 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4750 
4751 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4752 					    en_bc_pmc);
4753 }
4754 
4755 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4756 {
4757 	struct hclge_vport *vport = hclge_get_vport(handle);
4758 	struct hclge_dev *hdev = vport->back;
4759 
4760 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4761 }
4762 
4763 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4764 {
4765 	struct hclge_get_fd_mode_cmd *req;
4766 	struct hclge_desc desc;
4767 	int ret;
4768 
4769 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4770 
4771 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
4772 
4773 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4774 	if (ret) {
4775 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4776 		return ret;
4777 	}
4778 
4779 	*fd_mode = req->mode;
4780 
4781 	return ret;
4782 }
4783 
4784 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4785 				   u32 *stage1_entry_num,
4786 				   u32 *stage2_entry_num,
4787 				   u16 *stage1_counter_num,
4788 				   u16 *stage2_counter_num)
4789 {
4790 	struct hclge_get_fd_allocation_cmd *req;
4791 	struct hclge_desc desc;
4792 	int ret;
4793 
4794 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4795 
4796 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4797 
4798 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4799 	if (ret) {
4800 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4801 			ret);
4802 		return ret;
4803 	}
4804 
4805 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4806 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4807 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4808 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4809 
4810 	return ret;
4811 }
4812 
4813 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4814 				   enum HCLGE_FD_STAGE stage_num)
4815 {
4816 	struct hclge_set_fd_key_config_cmd *req;
4817 	struct hclge_fd_key_cfg *stage;
4818 	struct hclge_desc desc;
4819 	int ret;
4820 
4821 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4822 
4823 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4824 	stage = &hdev->fd_cfg.key_cfg[stage_num];
4825 	req->stage = stage_num;
4826 	req->key_select = stage->key_sel;
4827 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4828 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4829 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4830 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4831 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4832 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4833 
4834 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4835 	if (ret)
4836 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4837 
4838 	return ret;
4839 }
4840 
4841 static int hclge_init_fd_config(struct hclge_dev *hdev)
4842 {
4843 #define LOW_2_WORDS		0x03
4844 	struct hclge_fd_key_cfg *key_cfg;
4845 	int ret;
4846 
4847 	if (!hnae3_dev_fd_supported(hdev))
4848 		return 0;
4849 
4850 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4851 	if (ret)
4852 		return ret;
4853 
4854 	switch (hdev->fd_cfg.fd_mode) {
4855 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4856 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4857 		break;
4858 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4859 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4860 		break;
4861 	default:
4862 		dev_err(&hdev->pdev->dev,
4863 			"Unsupported flow director mode %u\n",
4864 			hdev->fd_cfg.fd_mode);
4865 		return -EOPNOTSUPP;
4866 	}
4867 
4868 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4869 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4870 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4871 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4872 	key_cfg->outer_sipv6_word_en = 0;
4873 	key_cfg->outer_dipv6_word_en = 0;
4874 
4875 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4876 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4877 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4878 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4879 
4880 	/* If use max 400bit key, we can support tuples for ether type */
4881 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
4882 		key_cfg->tuple_active |=
4883 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4884 
4885 	/* roce_type is used to filter roce frames
4886 	 * dst_vport is used to specify the rule
4887 	 */
4888 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4889 
4890 	ret = hclge_get_fd_allocation(hdev,
4891 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4892 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4893 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4894 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4895 	if (ret)
4896 		return ret;
4897 
4898 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4899 }
4900 
4901 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4902 				int loc, u8 *key, bool is_add)
4903 {
4904 	struct hclge_fd_tcam_config_1_cmd *req1;
4905 	struct hclge_fd_tcam_config_2_cmd *req2;
4906 	struct hclge_fd_tcam_config_3_cmd *req3;
4907 	struct hclge_desc desc[3];
4908 	int ret;
4909 
4910 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4911 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4912 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4913 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4914 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4915 
4916 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4917 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4918 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4919 
4920 	req1->stage = stage;
4921 	req1->xy_sel = sel_x ? 1 : 0;
4922 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4923 	req1->index = cpu_to_le32(loc);
4924 	req1->entry_vld = sel_x ? is_add : 0;
4925 
4926 	if (key) {
4927 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4928 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4929 		       sizeof(req2->tcam_data));
4930 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4931 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4932 	}
4933 
4934 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
4935 	if (ret)
4936 		dev_err(&hdev->pdev->dev,
4937 			"config tcam key fail, ret=%d\n",
4938 			ret);
4939 
4940 	return ret;
4941 }
4942 
4943 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4944 			      struct hclge_fd_ad_data *action)
4945 {
4946 	struct hclge_fd_ad_config_cmd *req;
4947 	struct hclge_desc desc;
4948 	u64 ad_data = 0;
4949 	int ret;
4950 
4951 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4952 
4953 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
4954 	req->index = cpu_to_le32(loc);
4955 	req->stage = stage;
4956 
4957 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4958 		      action->write_rule_id_to_bd);
4959 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4960 			action->rule_id);
4961 	ad_data <<= 32;
4962 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4963 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4964 		      action->forward_to_direct_queue);
4965 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4966 			action->queue_id);
4967 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4968 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4969 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4970 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4971 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4972 			action->counter_id);
4973 
4974 	req->ad_data = cpu_to_le64(ad_data);
4975 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4976 	if (ret)
4977 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4978 
4979 	return ret;
4980 }
4981 
4982 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4983 				   struct hclge_fd_rule *rule)
4984 {
4985 	u16 tmp_x_s, tmp_y_s;
4986 	u32 tmp_x_l, tmp_y_l;
4987 	int i;
4988 
4989 	if (rule->unused_tuple & tuple_bit)
4990 		return true;
4991 
4992 	switch (tuple_bit) {
4993 	case BIT(INNER_DST_MAC):
4994 		for (i = 0; i < ETH_ALEN; i++) {
4995 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4996 			       rule->tuples_mask.dst_mac[i]);
4997 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4998 			       rule->tuples_mask.dst_mac[i]);
4999 		}
5000 
5001 		return true;
5002 	case BIT(INNER_SRC_MAC):
5003 		for (i = 0; i < ETH_ALEN; i++) {
5004 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5005 			       rule->tuples.src_mac[i]);
5006 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5007 			       rule->tuples.src_mac[i]);
5008 		}
5009 
5010 		return true;
5011 	case BIT(INNER_VLAN_TAG_FST):
5012 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5013 		       rule->tuples_mask.vlan_tag1);
5014 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5015 		       rule->tuples_mask.vlan_tag1);
5016 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5017 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5018 
5019 		return true;
5020 	case BIT(INNER_ETH_TYPE):
5021 		calc_x(tmp_x_s, rule->tuples.ether_proto,
5022 		       rule->tuples_mask.ether_proto);
5023 		calc_y(tmp_y_s, rule->tuples.ether_proto,
5024 		       rule->tuples_mask.ether_proto);
5025 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5026 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5027 
5028 		return true;
5029 	case BIT(INNER_IP_TOS):
5030 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5031 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5032 
5033 		return true;
5034 	case BIT(INNER_IP_PROTO):
5035 		calc_x(*key_x, rule->tuples.ip_proto,
5036 		       rule->tuples_mask.ip_proto);
5037 		calc_y(*key_y, rule->tuples.ip_proto,
5038 		       rule->tuples_mask.ip_proto);
5039 
5040 		return true;
5041 	case BIT(INNER_SRC_IP):
5042 		calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5043 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5044 		calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5045 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5046 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5047 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5048 
5049 		return true;
5050 	case BIT(INNER_DST_IP):
5051 		calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5052 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5053 		calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5054 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5055 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5056 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5057 
5058 		return true;
5059 	case BIT(INNER_SRC_PORT):
5060 		calc_x(tmp_x_s, rule->tuples.src_port,
5061 		       rule->tuples_mask.src_port);
5062 		calc_y(tmp_y_s, rule->tuples.src_port,
5063 		       rule->tuples_mask.src_port);
5064 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5065 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5066 
5067 		return true;
5068 	case BIT(INNER_DST_PORT):
5069 		calc_x(tmp_x_s, rule->tuples.dst_port,
5070 		       rule->tuples_mask.dst_port);
5071 		calc_y(tmp_y_s, rule->tuples.dst_port,
5072 		       rule->tuples_mask.dst_port);
5073 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5074 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5075 
5076 		return true;
5077 	default:
5078 		return false;
5079 	}
5080 }
5081 
5082 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5083 				 u8 vf_id, u8 network_port_id)
5084 {
5085 	u32 port_number = 0;
5086 
5087 	if (port_type == HOST_PORT) {
5088 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5089 				pf_id);
5090 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5091 				vf_id);
5092 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5093 	} else {
5094 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5095 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5096 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5097 	}
5098 
5099 	return port_number;
5100 }
5101 
5102 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5103 				       __le32 *key_x, __le32 *key_y,
5104 				       struct hclge_fd_rule *rule)
5105 {
5106 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5107 	u8 cur_pos = 0, tuple_size, shift_bits;
5108 	unsigned int i;
5109 
5110 	for (i = 0; i < MAX_META_DATA; i++) {
5111 		tuple_size = meta_data_key_info[i].key_length;
5112 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5113 
5114 		switch (tuple_bit) {
5115 		case BIT(ROCE_TYPE):
5116 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5117 			cur_pos += tuple_size;
5118 			break;
5119 		case BIT(DST_VPORT):
5120 			port_number = hclge_get_port_number(HOST_PORT, 0,
5121 							    rule->vf_id, 0);
5122 			hnae3_set_field(meta_data,
5123 					GENMASK(cur_pos + tuple_size, cur_pos),
5124 					cur_pos, port_number);
5125 			cur_pos += tuple_size;
5126 			break;
5127 		default:
5128 			break;
5129 		}
5130 	}
5131 
5132 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5133 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5134 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5135 
5136 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5137 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5138 }
5139 
5140 /* A complete key is combined with meta data key and tuple key.
5141  * Meta data key is stored at the MSB region, and tuple key is stored at
5142  * the LSB region, unused bits will be filled 0.
5143  */
5144 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5145 			    struct hclge_fd_rule *rule)
5146 {
5147 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5148 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5149 	u8 *cur_key_x, *cur_key_y;
5150 	u8 meta_data_region;
5151 	u8 tuple_size;
5152 	int ret;
5153 	u32 i;
5154 
5155 	memset(key_x, 0, sizeof(key_x));
5156 	memset(key_y, 0, sizeof(key_y));
5157 	cur_key_x = key_x;
5158 	cur_key_y = key_y;
5159 
5160 	for (i = 0 ; i < MAX_TUPLE; i++) {
5161 		bool tuple_valid;
5162 		u32 check_tuple;
5163 
5164 		tuple_size = tuple_key_info[i].key_length / 8;
5165 		check_tuple = key_cfg->tuple_active & BIT(i);
5166 
5167 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5168 						     cur_key_y, rule);
5169 		if (tuple_valid) {
5170 			cur_key_x += tuple_size;
5171 			cur_key_y += tuple_size;
5172 		}
5173 	}
5174 
5175 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5176 			MAX_META_DATA_LENGTH / 8;
5177 
5178 	hclge_fd_convert_meta_data(key_cfg,
5179 				   (__le32 *)(key_x + meta_data_region),
5180 				   (__le32 *)(key_y + meta_data_region),
5181 				   rule);
5182 
5183 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5184 				   true);
5185 	if (ret) {
5186 		dev_err(&hdev->pdev->dev,
5187 			"fd key_y config fail, loc=%u, ret=%d\n",
5188 			rule->queue_id, ret);
5189 		return ret;
5190 	}
5191 
5192 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5193 				   true);
5194 	if (ret)
5195 		dev_err(&hdev->pdev->dev,
5196 			"fd key_x config fail, loc=%u, ret=%d\n",
5197 			rule->queue_id, ret);
5198 	return ret;
5199 }
5200 
5201 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5202 			       struct hclge_fd_rule *rule)
5203 {
5204 	struct hclge_fd_ad_data ad_data;
5205 
5206 	ad_data.ad_id = rule->location;
5207 
5208 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5209 		ad_data.drop_packet = true;
5210 		ad_data.forward_to_direct_queue = false;
5211 		ad_data.queue_id = 0;
5212 	} else {
5213 		ad_data.drop_packet = false;
5214 		ad_data.forward_to_direct_queue = true;
5215 		ad_data.queue_id = rule->queue_id;
5216 	}
5217 
5218 	ad_data.use_counter = false;
5219 	ad_data.counter_id = 0;
5220 
5221 	ad_data.use_next_stage = false;
5222 	ad_data.next_input_key = 0;
5223 
5224 	ad_data.write_rule_id_to_bd = true;
5225 	ad_data.rule_id = rule->location;
5226 
5227 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5228 }
5229 
5230 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5231 				       u32 *unused_tuple)
5232 {
5233 	if (!spec || !unused_tuple)
5234 		return -EINVAL;
5235 
5236 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5237 
5238 	if (!spec->ip4src)
5239 		*unused_tuple |= BIT(INNER_SRC_IP);
5240 
5241 	if (!spec->ip4dst)
5242 		*unused_tuple |= BIT(INNER_DST_IP);
5243 
5244 	if (!spec->psrc)
5245 		*unused_tuple |= BIT(INNER_SRC_PORT);
5246 
5247 	if (!spec->pdst)
5248 		*unused_tuple |= BIT(INNER_DST_PORT);
5249 
5250 	if (!spec->tos)
5251 		*unused_tuple |= BIT(INNER_IP_TOS);
5252 
5253 	return 0;
5254 }
5255 
5256 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5257 				    u32 *unused_tuple)
5258 {
5259 	if (!spec || !unused_tuple)
5260 		return -EINVAL;
5261 
5262 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5263 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5264 
5265 	if (!spec->ip4src)
5266 		*unused_tuple |= BIT(INNER_SRC_IP);
5267 
5268 	if (!spec->ip4dst)
5269 		*unused_tuple |= BIT(INNER_DST_IP);
5270 
5271 	if (!spec->tos)
5272 		*unused_tuple |= BIT(INNER_IP_TOS);
5273 
5274 	if (!spec->proto)
5275 		*unused_tuple |= BIT(INNER_IP_PROTO);
5276 
5277 	if (spec->l4_4_bytes)
5278 		return -EOPNOTSUPP;
5279 
5280 	if (spec->ip_ver != ETH_RX_NFC_IP4)
5281 		return -EOPNOTSUPP;
5282 
5283 	return 0;
5284 }
5285 
5286 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5287 				       u32 *unused_tuple)
5288 {
5289 	if (!spec || !unused_tuple)
5290 		return -EINVAL;
5291 
5292 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5293 		BIT(INNER_IP_TOS);
5294 
5295 	/* check whether src/dst ip address used */
5296 	if (!spec->ip6src[0] && !spec->ip6src[1] &&
5297 	    !spec->ip6src[2] && !spec->ip6src[3])
5298 		*unused_tuple |= BIT(INNER_SRC_IP);
5299 
5300 	if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5301 	    !spec->ip6dst[2] && !spec->ip6dst[3])
5302 		*unused_tuple |= BIT(INNER_DST_IP);
5303 
5304 	if (!spec->psrc)
5305 		*unused_tuple |= BIT(INNER_SRC_PORT);
5306 
5307 	if (!spec->pdst)
5308 		*unused_tuple |= BIT(INNER_DST_PORT);
5309 
5310 	if (spec->tclass)
5311 		return -EOPNOTSUPP;
5312 
5313 	return 0;
5314 }
5315 
5316 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5317 				    u32 *unused_tuple)
5318 {
5319 	if (!spec || !unused_tuple)
5320 		return -EINVAL;
5321 
5322 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5323 		BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5324 
5325 	/* check whether src/dst ip address used */
5326 	if (!spec->ip6src[0] && !spec->ip6src[1] &&
5327 	    !spec->ip6src[2] && !spec->ip6src[3])
5328 		*unused_tuple |= BIT(INNER_SRC_IP);
5329 
5330 	if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5331 	    !spec->ip6dst[2] && !spec->ip6dst[3])
5332 		*unused_tuple |= BIT(INNER_DST_IP);
5333 
5334 	if (!spec->l4_proto)
5335 		*unused_tuple |= BIT(INNER_IP_PROTO);
5336 
5337 	if (spec->tclass)
5338 		return -EOPNOTSUPP;
5339 
5340 	if (spec->l4_4_bytes)
5341 		return -EOPNOTSUPP;
5342 
5343 	return 0;
5344 }
5345 
5346 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5347 {
5348 	if (!spec || !unused_tuple)
5349 		return -EINVAL;
5350 
5351 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5352 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5353 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5354 
5355 	if (is_zero_ether_addr(spec->h_source))
5356 		*unused_tuple |= BIT(INNER_SRC_MAC);
5357 
5358 	if (is_zero_ether_addr(spec->h_dest))
5359 		*unused_tuple |= BIT(INNER_DST_MAC);
5360 
5361 	if (!spec->h_proto)
5362 		*unused_tuple |= BIT(INNER_ETH_TYPE);
5363 
5364 	return 0;
5365 }
5366 
5367 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5368 				    struct ethtool_rx_flow_spec *fs,
5369 				    u32 *unused_tuple)
5370 {
5371 	if (fs->flow_type & FLOW_EXT) {
5372 		if (fs->h_ext.vlan_etype) {
5373 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5374 			return -EOPNOTSUPP;
5375 		}
5376 
5377 		if (!fs->h_ext.vlan_tci)
5378 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5379 
5380 		if (fs->m_ext.vlan_tci &&
5381 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5382 			dev_err(&hdev->pdev->dev,
5383 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5384 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5385 			return -EINVAL;
5386 		}
5387 	} else {
5388 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5389 	}
5390 
5391 	if (fs->flow_type & FLOW_MAC_EXT) {
5392 		if (hdev->fd_cfg.fd_mode !=
5393 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5394 			dev_err(&hdev->pdev->dev,
5395 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
5396 			return -EOPNOTSUPP;
5397 		}
5398 
5399 		if (is_zero_ether_addr(fs->h_ext.h_dest))
5400 			*unused_tuple |= BIT(INNER_DST_MAC);
5401 		else
5402 			*unused_tuple &= ~BIT(INNER_DST_MAC);
5403 	}
5404 
5405 	return 0;
5406 }
5407 
5408 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5409 			       struct ethtool_rx_flow_spec *fs,
5410 			       u32 *unused_tuple)
5411 {
5412 	u32 flow_type;
5413 	int ret;
5414 
5415 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5416 		dev_err(&hdev->pdev->dev,
5417 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
5418 			fs->location,
5419 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5420 		return -EINVAL;
5421 	}
5422 
5423 	if ((fs->flow_type & FLOW_EXT) &&
5424 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5425 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5426 		return -EOPNOTSUPP;
5427 	}
5428 
5429 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5430 	switch (flow_type) {
5431 	case SCTP_V4_FLOW:
5432 	case TCP_V4_FLOW:
5433 	case UDP_V4_FLOW:
5434 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5435 						  unused_tuple);
5436 		break;
5437 	case IP_USER_FLOW:
5438 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5439 					       unused_tuple);
5440 		break;
5441 	case SCTP_V6_FLOW:
5442 	case TCP_V6_FLOW:
5443 	case UDP_V6_FLOW:
5444 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5445 						  unused_tuple);
5446 		break;
5447 	case IPV6_USER_FLOW:
5448 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5449 					       unused_tuple);
5450 		break;
5451 	case ETHER_FLOW:
5452 		if (hdev->fd_cfg.fd_mode !=
5453 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5454 			dev_err(&hdev->pdev->dev,
5455 				"ETHER_FLOW is not supported in current fd mode!\n");
5456 			return -EOPNOTSUPP;
5457 		}
5458 
5459 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5460 						 unused_tuple);
5461 		break;
5462 	default:
5463 		dev_err(&hdev->pdev->dev,
5464 			"unsupported protocol type, protocol type = %#x\n",
5465 			flow_type);
5466 		return -EOPNOTSUPP;
5467 	}
5468 
5469 	if (ret) {
5470 		dev_err(&hdev->pdev->dev,
5471 			"failed to check flow union tuple, ret = %d\n",
5472 			ret);
5473 		return ret;
5474 	}
5475 
5476 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5477 }
5478 
5479 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5480 {
5481 	struct hclge_fd_rule *rule = NULL;
5482 	struct hlist_node *node2;
5483 
5484 	spin_lock_bh(&hdev->fd_rule_lock);
5485 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5486 		if (rule->location >= location)
5487 			break;
5488 	}
5489 
5490 	spin_unlock_bh(&hdev->fd_rule_lock);
5491 
5492 	return  rule && rule->location == location;
5493 }
5494 
5495 /* make sure being called after lock up with fd_rule_lock */
5496 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5497 				     struct hclge_fd_rule *new_rule,
5498 				     u16 location,
5499 				     bool is_add)
5500 {
5501 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
5502 	struct hlist_node *node2;
5503 
5504 	if (is_add && !new_rule)
5505 		return -EINVAL;
5506 
5507 	hlist_for_each_entry_safe(rule, node2,
5508 				  &hdev->fd_rule_list, rule_node) {
5509 		if (rule->location >= location)
5510 			break;
5511 		parent = rule;
5512 	}
5513 
5514 	if (rule && rule->location == location) {
5515 		hlist_del(&rule->rule_node);
5516 		kfree(rule);
5517 		hdev->hclge_fd_rule_num--;
5518 
5519 		if (!is_add) {
5520 			if (!hdev->hclge_fd_rule_num)
5521 				hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5522 			clear_bit(location, hdev->fd_bmap);
5523 
5524 			return 0;
5525 		}
5526 	} else if (!is_add) {
5527 		dev_err(&hdev->pdev->dev,
5528 			"delete fail, rule %u is inexistent\n",
5529 			location);
5530 		return -EINVAL;
5531 	}
5532 
5533 	INIT_HLIST_NODE(&new_rule->rule_node);
5534 
5535 	if (parent)
5536 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5537 	else
5538 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5539 
5540 	set_bit(location, hdev->fd_bmap);
5541 	hdev->hclge_fd_rule_num++;
5542 	hdev->fd_active_type = new_rule->rule_type;
5543 
5544 	return 0;
5545 }
5546 
5547 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5548 			      struct ethtool_rx_flow_spec *fs,
5549 			      struct hclge_fd_rule *rule)
5550 {
5551 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5552 
5553 	switch (flow_type) {
5554 	case SCTP_V4_FLOW:
5555 	case TCP_V4_FLOW:
5556 	case UDP_V4_FLOW:
5557 		rule->tuples.src_ip[IPV4_INDEX] =
5558 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5559 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5560 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5561 
5562 		rule->tuples.dst_ip[IPV4_INDEX] =
5563 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5564 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5565 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5566 
5567 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5568 		rule->tuples_mask.src_port =
5569 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5570 
5571 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5572 		rule->tuples_mask.dst_port =
5573 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5574 
5575 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5576 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5577 
5578 		rule->tuples.ether_proto = ETH_P_IP;
5579 		rule->tuples_mask.ether_proto = 0xFFFF;
5580 
5581 		break;
5582 	case IP_USER_FLOW:
5583 		rule->tuples.src_ip[IPV4_INDEX] =
5584 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5585 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5586 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5587 
5588 		rule->tuples.dst_ip[IPV4_INDEX] =
5589 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5590 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5591 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5592 
5593 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5594 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5595 
5596 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5597 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5598 
5599 		rule->tuples.ether_proto = ETH_P_IP;
5600 		rule->tuples_mask.ether_proto = 0xFFFF;
5601 
5602 		break;
5603 	case SCTP_V6_FLOW:
5604 	case TCP_V6_FLOW:
5605 	case UDP_V6_FLOW:
5606 		be32_to_cpu_array(rule->tuples.src_ip,
5607 				  fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5608 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5609 				  fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5610 
5611 		be32_to_cpu_array(rule->tuples.dst_ip,
5612 				  fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5613 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5614 				  fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5615 
5616 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5617 		rule->tuples_mask.src_port =
5618 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5619 
5620 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5621 		rule->tuples_mask.dst_port =
5622 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5623 
5624 		rule->tuples.ether_proto = ETH_P_IPV6;
5625 		rule->tuples_mask.ether_proto = 0xFFFF;
5626 
5627 		break;
5628 	case IPV6_USER_FLOW:
5629 		be32_to_cpu_array(rule->tuples.src_ip,
5630 				  fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5631 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5632 				  fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5633 
5634 		be32_to_cpu_array(rule->tuples.dst_ip,
5635 				  fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5636 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5637 				  fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5638 
5639 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5640 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5641 
5642 		rule->tuples.ether_proto = ETH_P_IPV6;
5643 		rule->tuples_mask.ether_proto = 0xFFFF;
5644 
5645 		break;
5646 	case ETHER_FLOW:
5647 		ether_addr_copy(rule->tuples.src_mac,
5648 				fs->h_u.ether_spec.h_source);
5649 		ether_addr_copy(rule->tuples_mask.src_mac,
5650 				fs->m_u.ether_spec.h_source);
5651 
5652 		ether_addr_copy(rule->tuples.dst_mac,
5653 				fs->h_u.ether_spec.h_dest);
5654 		ether_addr_copy(rule->tuples_mask.dst_mac,
5655 				fs->m_u.ether_spec.h_dest);
5656 
5657 		rule->tuples.ether_proto =
5658 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
5659 		rule->tuples_mask.ether_proto =
5660 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
5661 
5662 		break;
5663 	default:
5664 		return -EOPNOTSUPP;
5665 	}
5666 
5667 	switch (flow_type) {
5668 	case SCTP_V4_FLOW:
5669 	case SCTP_V6_FLOW:
5670 		rule->tuples.ip_proto = IPPROTO_SCTP;
5671 		rule->tuples_mask.ip_proto = 0xFF;
5672 		break;
5673 	case TCP_V4_FLOW:
5674 	case TCP_V6_FLOW:
5675 		rule->tuples.ip_proto = IPPROTO_TCP;
5676 		rule->tuples_mask.ip_proto = 0xFF;
5677 		break;
5678 	case UDP_V4_FLOW:
5679 	case UDP_V6_FLOW:
5680 		rule->tuples.ip_proto = IPPROTO_UDP;
5681 		rule->tuples_mask.ip_proto = 0xFF;
5682 		break;
5683 	default:
5684 		break;
5685 	}
5686 
5687 	if (fs->flow_type & FLOW_EXT) {
5688 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5689 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5690 	}
5691 
5692 	if (fs->flow_type & FLOW_MAC_EXT) {
5693 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5694 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5695 	}
5696 
5697 	return 0;
5698 }
5699 
5700 /* make sure being called after lock up with fd_rule_lock */
5701 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5702 				struct hclge_fd_rule *rule)
5703 {
5704 	int ret;
5705 
5706 	if (!rule) {
5707 		dev_err(&hdev->pdev->dev,
5708 			"The flow director rule is NULL\n");
5709 		return -EINVAL;
5710 	}
5711 
5712 	/* it will never fail here, so needn't to check return value */
5713 	hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5714 
5715 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5716 	if (ret)
5717 		goto clear_rule;
5718 
5719 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5720 	if (ret)
5721 		goto clear_rule;
5722 
5723 	return 0;
5724 
5725 clear_rule:
5726 	hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5727 	return ret;
5728 }
5729 
5730 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5731 			      struct ethtool_rxnfc *cmd)
5732 {
5733 	struct hclge_vport *vport = hclge_get_vport(handle);
5734 	struct hclge_dev *hdev = vport->back;
5735 	u16 dst_vport_id = 0, q_index = 0;
5736 	struct ethtool_rx_flow_spec *fs;
5737 	struct hclge_fd_rule *rule;
5738 	u32 unused = 0;
5739 	u8 action;
5740 	int ret;
5741 
5742 	if (!hnae3_dev_fd_supported(hdev)) {
5743 		dev_err(&hdev->pdev->dev,
5744 			"flow table director is not supported\n");
5745 		return -EOPNOTSUPP;
5746 	}
5747 
5748 	if (!hdev->fd_en) {
5749 		dev_err(&hdev->pdev->dev,
5750 			"please enable flow director first\n");
5751 		return -EOPNOTSUPP;
5752 	}
5753 
5754 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5755 
5756 	ret = hclge_fd_check_spec(hdev, fs, &unused);
5757 	if (ret)
5758 		return ret;
5759 
5760 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5761 		action = HCLGE_FD_ACTION_DROP_PACKET;
5762 	} else {
5763 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5764 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5765 		u16 tqps;
5766 
5767 		if (vf > hdev->num_req_vfs) {
5768 			dev_err(&hdev->pdev->dev,
5769 				"Error: vf id (%u) > max vf num (%u)\n",
5770 				vf, hdev->num_req_vfs);
5771 			return -EINVAL;
5772 		}
5773 
5774 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5775 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5776 
5777 		if (ring >= tqps) {
5778 			dev_err(&hdev->pdev->dev,
5779 				"Error: queue id (%u) > max tqp num (%u)\n",
5780 				ring, tqps - 1);
5781 			return -EINVAL;
5782 		}
5783 
5784 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5785 		q_index = ring;
5786 	}
5787 
5788 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5789 	if (!rule)
5790 		return -ENOMEM;
5791 
5792 	ret = hclge_fd_get_tuple(hdev, fs, rule);
5793 	if (ret) {
5794 		kfree(rule);
5795 		return ret;
5796 	}
5797 
5798 	rule->flow_type = fs->flow_type;
5799 	rule->location = fs->location;
5800 	rule->unused_tuple = unused;
5801 	rule->vf_id = dst_vport_id;
5802 	rule->queue_id = q_index;
5803 	rule->action = action;
5804 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
5805 
5806 	/* to avoid rule conflict, when user configure rule by ethtool,
5807 	 * we need to clear all arfs rules
5808 	 */
5809 	hclge_clear_arfs_rules(handle);
5810 
5811 	spin_lock_bh(&hdev->fd_rule_lock);
5812 	ret = hclge_fd_config_rule(hdev, rule);
5813 
5814 	spin_unlock_bh(&hdev->fd_rule_lock);
5815 
5816 	return ret;
5817 }
5818 
5819 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5820 			      struct ethtool_rxnfc *cmd)
5821 {
5822 	struct hclge_vport *vport = hclge_get_vport(handle);
5823 	struct hclge_dev *hdev = vport->back;
5824 	struct ethtool_rx_flow_spec *fs;
5825 	int ret;
5826 
5827 	if (!hnae3_dev_fd_supported(hdev))
5828 		return -EOPNOTSUPP;
5829 
5830 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5831 
5832 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5833 		return -EINVAL;
5834 
5835 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
5836 		dev_err(&hdev->pdev->dev,
5837 			"Delete fail, rule %u is inexistent\n", fs->location);
5838 		return -ENOENT;
5839 	}
5840 
5841 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5842 				   NULL, false);
5843 	if (ret)
5844 		return ret;
5845 
5846 	spin_lock_bh(&hdev->fd_rule_lock);
5847 	ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5848 
5849 	spin_unlock_bh(&hdev->fd_rule_lock);
5850 
5851 	return ret;
5852 }
5853 
5854 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5855 				     bool clear_list)
5856 {
5857 	struct hclge_vport *vport = hclge_get_vport(handle);
5858 	struct hclge_dev *hdev = vport->back;
5859 	struct hclge_fd_rule *rule;
5860 	struct hlist_node *node;
5861 	u16 location;
5862 
5863 	if (!hnae3_dev_fd_supported(hdev))
5864 		return;
5865 
5866 	spin_lock_bh(&hdev->fd_rule_lock);
5867 	for_each_set_bit(location, hdev->fd_bmap,
5868 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5869 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5870 				     NULL, false);
5871 
5872 	if (clear_list) {
5873 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5874 					  rule_node) {
5875 			hlist_del(&rule->rule_node);
5876 			kfree(rule);
5877 		}
5878 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5879 		hdev->hclge_fd_rule_num = 0;
5880 		bitmap_zero(hdev->fd_bmap,
5881 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5882 	}
5883 
5884 	spin_unlock_bh(&hdev->fd_rule_lock);
5885 }
5886 
5887 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5888 {
5889 	struct hclge_vport *vport = hclge_get_vport(handle);
5890 	struct hclge_dev *hdev = vport->back;
5891 	struct hclge_fd_rule *rule;
5892 	struct hlist_node *node;
5893 	int ret;
5894 
5895 	/* Return ok here, because reset error handling will check this
5896 	 * return value. If error is returned here, the reset process will
5897 	 * fail.
5898 	 */
5899 	if (!hnae3_dev_fd_supported(hdev))
5900 		return 0;
5901 
5902 	/* if fd is disabled, should not restore it when reset */
5903 	if (!hdev->fd_en)
5904 		return 0;
5905 
5906 	spin_lock_bh(&hdev->fd_rule_lock);
5907 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5908 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5909 		if (!ret)
5910 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5911 
5912 		if (ret) {
5913 			dev_warn(&hdev->pdev->dev,
5914 				 "Restore rule %u failed, remove it\n",
5915 				 rule->location);
5916 			clear_bit(rule->location, hdev->fd_bmap);
5917 			hlist_del(&rule->rule_node);
5918 			kfree(rule);
5919 			hdev->hclge_fd_rule_num--;
5920 		}
5921 	}
5922 
5923 	if (hdev->hclge_fd_rule_num)
5924 		hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5925 
5926 	spin_unlock_bh(&hdev->fd_rule_lock);
5927 
5928 	return 0;
5929 }
5930 
5931 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5932 				 struct ethtool_rxnfc *cmd)
5933 {
5934 	struct hclge_vport *vport = hclge_get_vport(handle);
5935 	struct hclge_dev *hdev = vport->back;
5936 
5937 	if (!hnae3_dev_fd_supported(hdev))
5938 		return -EOPNOTSUPP;
5939 
5940 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
5941 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5942 
5943 	return 0;
5944 }
5945 
5946 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
5947 				     struct ethtool_tcpip4_spec *spec,
5948 				     struct ethtool_tcpip4_spec *spec_mask)
5949 {
5950 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5951 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5952 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5953 
5954 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5955 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5956 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5957 
5958 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
5959 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5960 			0 : cpu_to_be16(rule->tuples_mask.src_port);
5961 
5962 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
5963 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
5964 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
5965 
5966 	spec->tos = rule->tuples.ip_tos;
5967 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
5968 			0 : rule->tuples_mask.ip_tos;
5969 }
5970 
5971 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
5972 				  struct ethtool_usrip4_spec *spec,
5973 				  struct ethtool_usrip4_spec *spec_mask)
5974 {
5975 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5976 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5977 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5978 
5979 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5980 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5981 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5982 
5983 	spec->tos = rule->tuples.ip_tos;
5984 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
5985 			0 : rule->tuples_mask.ip_tos;
5986 
5987 	spec->proto = rule->tuples.ip_proto;
5988 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5989 			0 : rule->tuples_mask.ip_proto;
5990 
5991 	spec->ip_ver = ETH_RX_NFC_IP4;
5992 }
5993 
5994 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
5995 				     struct ethtool_tcpip6_spec *spec,
5996 				     struct ethtool_tcpip6_spec *spec_mask)
5997 {
5998 	cpu_to_be32_array(spec->ip6src,
5999 			  rule->tuples.src_ip, IPV6_SIZE);
6000 	cpu_to_be32_array(spec->ip6dst,
6001 			  rule->tuples.dst_ip, IPV6_SIZE);
6002 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6003 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6004 	else
6005 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6006 				  IPV6_SIZE);
6007 
6008 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6009 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6010 	else
6011 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6012 				  IPV6_SIZE);
6013 
6014 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6015 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6016 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6017 
6018 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6019 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6020 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6021 }
6022 
6023 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6024 				  struct ethtool_usrip6_spec *spec,
6025 				  struct ethtool_usrip6_spec *spec_mask)
6026 {
6027 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6028 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6029 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6030 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6031 	else
6032 		cpu_to_be32_array(spec_mask->ip6src,
6033 				  rule->tuples_mask.src_ip, IPV6_SIZE);
6034 
6035 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6036 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6037 	else
6038 		cpu_to_be32_array(spec_mask->ip6dst,
6039 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
6040 
6041 	spec->l4_proto = rule->tuples.ip_proto;
6042 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6043 			0 : rule->tuples_mask.ip_proto;
6044 }
6045 
6046 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6047 				    struct ethhdr *spec,
6048 				    struct ethhdr *spec_mask)
6049 {
6050 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6051 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6052 
6053 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6054 		eth_zero_addr(spec_mask->h_source);
6055 	else
6056 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6057 
6058 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
6059 		eth_zero_addr(spec_mask->h_dest);
6060 	else
6061 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6062 
6063 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6064 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6065 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6066 }
6067 
6068 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6069 				  struct hclge_fd_rule *rule)
6070 {
6071 	if (fs->flow_type & FLOW_EXT) {
6072 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6073 		fs->m_ext.vlan_tci =
6074 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6075 				cpu_to_be16(VLAN_VID_MASK) :
6076 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
6077 	}
6078 
6079 	if (fs->flow_type & FLOW_MAC_EXT) {
6080 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6081 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6082 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6083 		else
6084 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6085 					rule->tuples_mask.dst_mac);
6086 	}
6087 }
6088 
6089 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6090 				  struct ethtool_rxnfc *cmd)
6091 {
6092 	struct hclge_vport *vport = hclge_get_vport(handle);
6093 	struct hclge_fd_rule *rule = NULL;
6094 	struct hclge_dev *hdev = vport->back;
6095 	struct ethtool_rx_flow_spec *fs;
6096 	struct hlist_node *node2;
6097 
6098 	if (!hnae3_dev_fd_supported(hdev))
6099 		return -EOPNOTSUPP;
6100 
6101 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6102 
6103 	spin_lock_bh(&hdev->fd_rule_lock);
6104 
6105 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6106 		if (rule->location >= fs->location)
6107 			break;
6108 	}
6109 
6110 	if (!rule || fs->location != rule->location) {
6111 		spin_unlock_bh(&hdev->fd_rule_lock);
6112 
6113 		return -ENOENT;
6114 	}
6115 
6116 	fs->flow_type = rule->flow_type;
6117 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6118 	case SCTP_V4_FLOW:
6119 	case TCP_V4_FLOW:
6120 	case UDP_V4_FLOW:
6121 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6122 					 &fs->m_u.tcp_ip4_spec);
6123 		break;
6124 	case IP_USER_FLOW:
6125 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6126 				      &fs->m_u.usr_ip4_spec);
6127 		break;
6128 	case SCTP_V6_FLOW:
6129 	case TCP_V6_FLOW:
6130 	case UDP_V6_FLOW:
6131 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6132 					 &fs->m_u.tcp_ip6_spec);
6133 		break;
6134 	case IPV6_USER_FLOW:
6135 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6136 				      &fs->m_u.usr_ip6_spec);
6137 		break;
6138 	/* The flow type of fd rule has been checked before adding in to rule
6139 	 * list. As other flow types have been handled, it must be ETHER_FLOW
6140 	 * for the default case
6141 	 */
6142 	default:
6143 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6144 					&fs->m_u.ether_spec);
6145 		break;
6146 	}
6147 
6148 	hclge_fd_get_ext_info(fs, rule);
6149 
6150 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6151 		fs->ring_cookie = RX_CLS_FLOW_DISC;
6152 	} else {
6153 		u64 vf_id;
6154 
6155 		fs->ring_cookie = rule->queue_id;
6156 		vf_id = rule->vf_id;
6157 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6158 		fs->ring_cookie |= vf_id;
6159 	}
6160 
6161 	spin_unlock_bh(&hdev->fd_rule_lock);
6162 
6163 	return 0;
6164 }
6165 
6166 static int hclge_get_all_rules(struct hnae3_handle *handle,
6167 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
6168 {
6169 	struct hclge_vport *vport = hclge_get_vport(handle);
6170 	struct hclge_dev *hdev = vport->back;
6171 	struct hclge_fd_rule *rule;
6172 	struct hlist_node *node2;
6173 	int cnt = 0;
6174 
6175 	if (!hnae3_dev_fd_supported(hdev))
6176 		return -EOPNOTSUPP;
6177 
6178 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6179 
6180 	spin_lock_bh(&hdev->fd_rule_lock);
6181 	hlist_for_each_entry_safe(rule, node2,
6182 				  &hdev->fd_rule_list, rule_node) {
6183 		if (cnt == cmd->rule_cnt) {
6184 			spin_unlock_bh(&hdev->fd_rule_lock);
6185 			return -EMSGSIZE;
6186 		}
6187 
6188 		rule_locs[cnt] = rule->location;
6189 		cnt++;
6190 	}
6191 
6192 	spin_unlock_bh(&hdev->fd_rule_lock);
6193 
6194 	cmd->rule_cnt = cnt;
6195 
6196 	return 0;
6197 }
6198 
6199 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6200 				     struct hclge_fd_rule_tuples *tuples)
6201 {
6202 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6203 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6204 
6205 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6206 	tuples->ip_proto = fkeys->basic.ip_proto;
6207 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6208 
6209 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6210 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6211 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6212 	} else {
6213 		int i;
6214 
6215 		for (i = 0; i < IPV6_SIZE; i++) {
6216 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6217 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6218 		}
6219 	}
6220 }
6221 
6222 /* traverse all rules, check whether an existed rule has the same tuples */
6223 static struct hclge_fd_rule *
6224 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6225 			  const struct hclge_fd_rule_tuples *tuples)
6226 {
6227 	struct hclge_fd_rule *rule = NULL;
6228 	struct hlist_node *node;
6229 
6230 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6231 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6232 			return rule;
6233 	}
6234 
6235 	return NULL;
6236 }
6237 
6238 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6239 				     struct hclge_fd_rule *rule)
6240 {
6241 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6242 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6243 			     BIT(INNER_SRC_PORT);
6244 	rule->action = 0;
6245 	rule->vf_id = 0;
6246 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6247 	if (tuples->ether_proto == ETH_P_IP) {
6248 		if (tuples->ip_proto == IPPROTO_TCP)
6249 			rule->flow_type = TCP_V4_FLOW;
6250 		else
6251 			rule->flow_type = UDP_V4_FLOW;
6252 	} else {
6253 		if (tuples->ip_proto == IPPROTO_TCP)
6254 			rule->flow_type = TCP_V6_FLOW;
6255 		else
6256 			rule->flow_type = UDP_V6_FLOW;
6257 	}
6258 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6259 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6260 }
6261 
6262 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6263 				      u16 flow_id, struct flow_keys *fkeys)
6264 {
6265 	struct hclge_vport *vport = hclge_get_vport(handle);
6266 	struct hclge_fd_rule_tuples new_tuples;
6267 	struct hclge_dev *hdev = vport->back;
6268 	struct hclge_fd_rule *rule;
6269 	u16 tmp_queue_id;
6270 	u16 bit_id;
6271 	int ret;
6272 
6273 	if (!hnae3_dev_fd_supported(hdev))
6274 		return -EOPNOTSUPP;
6275 
6276 	memset(&new_tuples, 0, sizeof(new_tuples));
6277 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6278 
6279 	spin_lock_bh(&hdev->fd_rule_lock);
6280 
6281 	/* when there is already fd rule existed add by user,
6282 	 * arfs should not work
6283 	 */
6284 	if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6285 		spin_unlock_bh(&hdev->fd_rule_lock);
6286 		return -EOPNOTSUPP;
6287 	}
6288 
6289 	/* check is there flow director filter existed for this flow,
6290 	 * if not, create a new filter for it;
6291 	 * if filter exist with different queue id, modify the filter;
6292 	 * if filter exist with same queue id, do nothing
6293 	 */
6294 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6295 	if (!rule) {
6296 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6297 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6298 			spin_unlock_bh(&hdev->fd_rule_lock);
6299 			return -ENOSPC;
6300 		}
6301 
6302 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6303 		if (!rule) {
6304 			spin_unlock_bh(&hdev->fd_rule_lock);
6305 			return -ENOMEM;
6306 		}
6307 
6308 		set_bit(bit_id, hdev->fd_bmap);
6309 		rule->location = bit_id;
6310 		rule->flow_id = flow_id;
6311 		rule->queue_id = queue_id;
6312 		hclge_fd_build_arfs_rule(&new_tuples, rule);
6313 		ret = hclge_fd_config_rule(hdev, rule);
6314 
6315 		spin_unlock_bh(&hdev->fd_rule_lock);
6316 
6317 		if (ret)
6318 			return ret;
6319 
6320 		return rule->location;
6321 	}
6322 
6323 	spin_unlock_bh(&hdev->fd_rule_lock);
6324 
6325 	if (rule->queue_id == queue_id)
6326 		return rule->location;
6327 
6328 	tmp_queue_id = rule->queue_id;
6329 	rule->queue_id = queue_id;
6330 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6331 	if (ret) {
6332 		rule->queue_id = tmp_queue_id;
6333 		return ret;
6334 	}
6335 
6336 	return rule->location;
6337 }
6338 
6339 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6340 {
6341 #ifdef CONFIG_RFS_ACCEL
6342 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6343 	struct hclge_fd_rule *rule;
6344 	struct hlist_node *node;
6345 	HLIST_HEAD(del_list);
6346 
6347 	spin_lock_bh(&hdev->fd_rule_lock);
6348 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6349 		spin_unlock_bh(&hdev->fd_rule_lock);
6350 		return;
6351 	}
6352 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6353 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6354 					rule->flow_id, rule->location)) {
6355 			hlist_del_init(&rule->rule_node);
6356 			hlist_add_head(&rule->rule_node, &del_list);
6357 			hdev->hclge_fd_rule_num--;
6358 			clear_bit(rule->location, hdev->fd_bmap);
6359 		}
6360 	}
6361 	spin_unlock_bh(&hdev->fd_rule_lock);
6362 
6363 	hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6364 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6365 				     rule->location, NULL, false);
6366 		kfree(rule);
6367 	}
6368 #endif
6369 }
6370 
6371 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6372 {
6373 #ifdef CONFIG_RFS_ACCEL
6374 	struct hclge_vport *vport = hclge_get_vport(handle);
6375 	struct hclge_dev *hdev = vport->back;
6376 
6377 	if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6378 		hclge_del_all_fd_entries(handle, true);
6379 #endif
6380 }
6381 
6382 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6383 {
6384 	struct hclge_vport *vport = hclge_get_vport(handle);
6385 	struct hclge_dev *hdev = vport->back;
6386 
6387 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6388 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6389 }
6390 
6391 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6392 {
6393 	struct hclge_vport *vport = hclge_get_vport(handle);
6394 	struct hclge_dev *hdev = vport->back;
6395 
6396 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6397 }
6398 
6399 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6400 {
6401 	struct hclge_vport *vport = hclge_get_vport(handle);
6402 	struct hclge_dev *hdev = vport->back;
6403 
6404 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6405 }
6406 
6407 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6408 {
6409 	struct hclge_vport *vport = hclge_get_vport(handle);
6410 	struct hclge_dev *hdev = vport->back;
6411 
6412 	return hdev->rst_stats.hw_reset_done_cnt;
6413 }
6414 
6415 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6416 {
6417 	struct hclge_vport *vport = hclge_get_vport(handle);
6418 	struct hclge_dev *hdev = vport->back;
6419 	bool clear;
6420 
6421 	hdev->fd_en = enable;
6422 	clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6423 	if (!enable)
6424 		hclge_del_all_fd_entries(handle, clear);
6425 	else
6426 		hclge_restore_fd_entries(handle);
6427 }
6428 
6429 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6430 {
6431 	struct hclge_desc desc;
6432 	struct hclge_config_mac_mode_cmd *req =
6433 		(struct hclge_config_mac_mode_cmd *)desc.data;
6434 	u32 loop_en = 0;
6435 	int ret;
6436 
6437 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6438 
6439 	if (enable) {
6440 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6441 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6442 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6443 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6444 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6445 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6446 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6447 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6448 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6449 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6450 	}
6451 
6452 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6453 
6454 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6455 	if (ret)
6456 		dev_err(&hdev->pdev->dev,
6457 			"mac enable fail, ret =%d.\n", ret);
6458 }
6459 
6460 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6461 				     u8 switch_param, u8 param_mask)
6462 {
6463 	struct hclge_mac_vlan_switch_cmd *req;
6464 	struct hclge_desc desc;
6465 	u32 func_id;
6466 	int ret;
6467 
6468 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6469 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6470 
6471 	/* read current config parameter */
6472 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6473 				   true);
6474 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6475 	req->func_id = cpu_to_le32(func_id);
6476 
6477 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6478 	if (ret) {
6479 		dev_err(&hdev->pdev->dev,
6480 			"read mac vlan switch parameter fail, ret = %d\n", ret);
6481 		return ret;
6482 	}
6483 
6484 	/* modify and write new config parameter */
6485 	hclge_cmd_reuse_desc(&desc, false);
6486 	req->switch_param = (req->switch_param & param_mask) | switch_param;
6487 	req->param_mask = param_mask;
6488 
6489 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6490 	if (ret)
6491 		dev_err(&hdev->pdev->dev,
6492 			"set mac vlan switch parameter fail, ret = %d\n", ret);
6493 	return ret;
6494 }
6495 
6496 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6497 				       int link_ret)
6498 {
6499 #define HCLGE_PHY_LINK_STATUS_NUM  200
6500 
6501 	struct phy_device *phydev = hdev->hw.mac.phydev;
6502 	int i = 0;
6503 	int ret;
6504 
6505 	do {
6506 		ret = phy_read_status(phydev);
6507 		if (ret) {
6508 			dev_err(&hdev->pdev->dev,
6509 				"phy update link status fail, ret = %d\n", ret);
6510 			return;
6511 		}
6512 
6513 		if (phydev->link == link_ret)
6514 			break;
6515 
6516 		msleep(HCLGE_LINK_STATUS_MS);
6517 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6518 }
6519 
6520 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6521 {
6522 #define HCLGE_MAC_LINK_STATUS_NUM  100
6523 
6524 	int link_status;
6525 	int i = 0;
6526 	int ret;
6527 
6528 	do {
6529 		ret = hclge_get_mac_link_status(hdev, &link_status);
6530 		if (ret)
6531 			return ret;
6532 		if (link_status == link_ret)
6533 			return 0;
6534 
6535 		msleep(HCLGE_LINK_STATUS_MS);
6536 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6537 	return -EBUSY;
6538 }
6539 
6540 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6541 					  bool is_phy)
6542 {
6543 	int link_ret;
6544 
6545 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6546 
6547 	if (is_phy)
6548 		hclge_phy_link_status_wait(hdev, link_ret);
6549 
6550 	return hclge_mac_link_status_wait(hdev, link_ret);
6551 }
6552 
6553 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6554 {
6555 	struct hclge_config_mac_mode_cmd *req;
6556 	struct hclge_desc desc;
6557 	u32 loop_en;
6558 	int ret;
6559 
6560 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6561 	/* 1 Read out the MAC mode config at first */
6562 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6563 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6564 	if (ret) {
6565 		dev_err(&hdev->pdev->dev,
6566 			"mac loopback get fail, ret =%d.\n", ret);
6567 		return ret;
6568 	}
6569 
6570 	/* 2 Then setup the loopback flag */
6571 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6572 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6573 
6574 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6575 
6576 	/* 3 Config mac work mode with loopback flag
6577 	 * and its original configure parameters
6578 	 */
6579 	hclge_cmd_reuse_desc(&desc, false);
6580 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6581 	if (ret)
6582 		dev_err(&hdev->pdev->dev,
6583 			"mac loopback set fail, ret =%d.\n", ret);
6584 	return ret;
6585 }
6586 
6587 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6588 				     enum hnae3_loop loop_mode)
6589 {
6590 #define HCLGE_SERDES_RETRY_MS	10
6591 #define HCLGE_SERDES_RETRY_NUM	100
6592 
6593 	struct hclge_serdes_lb_cmd *req;
6594 	struct hclge_desc desc;
6595 	int ret, i = 0;
6596 	u8 loop_mode_b;
6597 
6598 	req = (struct hclge_serdes_lb_cmd *)desc.data;
6599 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6600 
6601 	switch (loop_mode) {
6602 	case HNAE3_LOOP_SERIAL_SERDES:
6603 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6604 		break;
6605 	case HNAE3_LOOP_PARALLEL_SERDES:
6606 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6607 		break;
6608 	default:
6609 		dev_err(&hdev->pdev->dev,
6610 			"unsupported serdes loopback mode %d\n", loop_mode);
6611 		return -ENOTSUPP;
6612 	}
6613 
6614 	if (en) {
6615 		req->enable = loop_mode_b;
6616 		req->mask = loop_mode_b;
6617 	} else {
6618 		req->mask = loop_mode_b;
6619 	}
6620 
6621 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6622 	if (ret) {
6623 		dev_err(&hdev->pdev->dev,
6624 			"serdes loopback set fail, ret = %d\n", ret);
6625 		return ret;
6626 	}
6627 
6628 	do {
6629 		msleep(HCLGE_SERDES_RETRY_MS);
6630 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6631 					   true);
6632 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6633 		if (ret) {
6634 			dev_err(&hdev->pdev->dev,
6635 				"serdes loopback get, ret = %d\n", ret);
6636 			return ret;
6637 		}
6638 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
6639 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6640 
6641 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6642 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6643 		return -EBUSY;
6644 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6645 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6646 		return -EIO;
6647 	}
6648 	return ret;
6649 }
6650 
6651 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6652 				     enum hnae3_loop loop_mode)
6653 {
6654 	int ret;
6655 
6656 	ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6657 	if (ret)
6658 		return ret;
6659 
6660 	hclge_cfg_mac_mode(hdev, en);
6661 
6662 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6663 	if (ret)
6664 		dev_err(&hdev->pdev->dev,
6665 			"serdes loopback config mac mode timeout\n");
6666 
6667 	return ret;
6668 }
6669 
6670 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6671 				     struct phy_device *phydev)
6672 {
6673 	int ret;
6674 
6675 	if (!phydev->suspended) {
6676 		ret = phy_suspend(phydev);
6677 		if (ret)
6678 			return ret;
6679 	}
6680 
6681 	ret = phy_resume(phydev);
6682 	if (ret)
6683 		return ret;
6684 
6685 	return phy_loopback(phydev, true);
6686 }
6687 
6688 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6689 				      struct phy_device *phydev)
6690 {
6691 	int ret;
6692 
6693 	ret = phy_loopback(phydev, false);
6694 	if (ret)
6695 		return ret;
6696 
6697 	return phy_suspend(phydev);
6698 }
6699 
6700 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6701 {
6702 	struct phy_device *phydev = hdev->hw.mac.phydev;
6703 	int ret;
6704 
6705 	if (!phydev)
6706 		return -ENOTSUPP;
6707 
6708 	if (en)
6709 		ret = hclge_enable_phy_loopback(hdev, phydev);
6710 	else
6711 		ret = hclge_disable_phy_loopback(hdev, phydev);
6712 	if (ret) {
6713 		dev_err(&hdev->pdev->dev,
6714 			"set phy loopback fail, ret = %d\n", ret);
6715 		return ret;
6716 	}
6717 
6718 	hclge_cfg_mac_mode(hdev, en);
6719 
6720 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6721 	if (ret)
6722 		dev_err(&hdev->pdev->dev,
6723 			"phy loopback config mac mode timeout\n");
6724 
6725 	return ret;
6726 }
6727 
6728 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6729 			    int stream_id, bool enable)
6730 {
6731 	struct hclge_desc desc;
6732 	struct hclge_cfg_com_tqp_queue_cmd *req =
6733 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6734 	int ret;
6735 
6736 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6737 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6738 	req->stream_id = cpu_to_le16(stream_id);
6739 	if (enable)
6740 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6741 
6742 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6743 	if (ret)
6744 		dev_err(&hdev->pdev->dev,
6745 			"Tqp enable fail, status =%d.\n", ret);
6746 	return ret;
6747 }
6748 
6749 static int hclge_set_loopback(struct hnae3_handle *handle,
6750 			      enum hnae3_loop loop_mode, bool en)
6751 {
6752 	struct hclge_vport *vport = hclge_get_vport(handle);
6753 	struct hnae3_knic_private_info *kinfo;
6754 	struct hclge_dev *hdev = vport->back;
6755 	int i, ret;
6756 
6757 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6758 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6759 	 * the same, the packets are looped back in the SSU. If SSU loopback
6760 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6761 	 */
6762 	if (hdev->pdev->revision >= 0x21) {
6763 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6764 
6765 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6766 						HCLGE_SWITCH_ALW_LPBK_MASK);
6767 		if (ret)
6768 			return ret;
6769 	}
6770 
6771 	switch (loop_mode) {
6772 	case HNAE3_LOOP_APP:
6773 		ret = hclge_set_app_loopback(hdev, en);
6774 		break;
6775 	case HNAE3_LOOP_SERIAL_SERDES:
6776 	case HNAE3_LOOP_PARALLEL_SERDES:
6777 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6778 		break;
6779 	case HNAE3_LOOP_PHY:
6780 		ret = hclge_set_phy_loopback(hdev, en);
6781 		break;
6782 	default:
6783 		ret = -ENOTSUPP;
6784 		dev_err(&hdev->pdev->dev,
6785 			"loop_mode %d is not supported\n", loop_mode);
6786 		break;
6787 	}
6788 
6789 	if (ret)
6790 		return ret;
6791 
6792 	kinfo = &vport->nic.kinfo;
6793 	for (i = 0; i < kinfo->num_tqps; i++) {
6794 		ret = hclge_tqp_enable(hdev, i, 0, en);
6795 		if (ret)
6796 			return ret;
6797 	}
6798 
6799 	return 0;
6800 }
6801 
6802 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6803 {
6804 	int ret;
6805 
6806 	ret = hclge_set_app_loopback(hdev, false);
6807 	if (ret)
6808 		return ret;
6809 
6810 	ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6811 	if (ret)
6812 		return ret;
6813 
6814 	return hclge_cfg_serdes_loopback(hdev, false,
6815 					 HNAE3_LOOP_PARALLEL_SERDES);
6816 }
6817 
6818 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6819 {
6820 	struct hclge_vport *vport = hclge_get_vport(handle);
6821 	struct hnae3_knic_private_info *kinfo;
6822 	struct hnae3_queue *queue;
6823 	struct hclge_tqp *tqp;
6824 	int i;
6825 
6826 	kinfo = &vport->nic.kinfo;
6827 	for (i = 0; i < kinfo->num_tqps; i++) {
6828 		queue = handle->kinfo.tqp[i];
6829 		tqp = container_of(queue, struct hclge_tqp, q);
6830 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6831 	}
6832 }
6833 
6834 static void hclge_flush_link_update(struct hclge_dev *hdev)
6835 {
6836 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
6837 
6838 	unsigned long last = hdev->serv_processed_cnt;
6839 	int i = 0;
6840 
6841 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6842 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6843 	       last == hdev->serv_processed_cnt)
6844 		usleep_range(1, 1);
6845 }
6846 
6847 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6848 {
6849 	struct hclge_vport *vport = hclge_get_vport(handle);
6850 	struct hclge_dev *hdev = vport->back;
6851 
6852 	if (enable) {
6853 		hclge_task_schedule(hdev, 0);
6854 	} else {
6855 		/* Set the DOWN flag here to disable link updating */
6856 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
6857 
6858 		/* flush memory to make sure DOWN is seen by service task */
6859 		smp_mb__before_atomic();
6860 		hclge_flush_link_update(hdev);
6861 	}
6862 }
6863 
6864 static int hclge_ae_start(struct hnae3_handle *handle)
6865 {
6866 	struct hclge_vport *vport = hclge_get_vport(handle);
6867 	struct hclge_dev *hdev = vport->back;
6868 
6869 	/* mac enable */
6870 	hclge_cfg_mac_mode(hdev, true);
6871 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6872 	hdev->hw.mac.link = 0;
6873 
6874 	/* reset tqp stats */
6875 	hclge_reset_tqp_stats(handle);
6876 
6877 	hclge_mac_start_phy(hdev);
6878 
6879 	return 0;
6880 }
6881 
6882 static void hclge_ae_stop(struct hnae3_handle *handle)
6883 {
6884 	struct hclge_vport *vport = hclge_get_vport(handle);
6885 	struct hclge_dev *hdev = vport->back;
6886 	int i;
6887 
6888 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
6889 
6890 	hclge_clear_arfs_rules(handle);
6891 
6892 	/* If it is not PF reset, the firmware will disable the MAC,
6893 	 * so it only need to stop phy here.
6894 	 */
6895 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6896 	    hdev->reset_type != HNAE3_FUNC_RESET) {
6897 		hclge_mac_stop_phy(hdev);
6898 		hclge_update_link_status(hdev);
6899 		return;
6900 	}
6901 
6902 	for (i = 0; i < handle->kinfo.num_tqps; i++)
6903 		hclge_reset_tqp(handle, i);
6904 
6905 	hclge_config_mac_tnl_int(hdev, false);
6906 
6907 	/* Mac disable */
6908 	hclge_cfg_mac_mode(hdev, false);
6909 
6910 	hclge_mac_stop_phy(hdev);
6911 
6912 	/* reset tqp stats */
6913 	hclge_reset_tqp_stats(handle);
6914 	hclge_update_link_status(hdev);
6915 }
6916 
6917 int hclge_vport_start(struct hclge_vport *vport)
6918 {
6919 	struct hclge_dev *hdev = vport->back;
6920 
6921 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6922 	vport->last_active_jiffies = jiffies;
6923 
6924 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
6925 		if (vport->vport_id) {
6926 			hclge_restore_mac_table_common(vport);
6927 			hclge_restore_vport_vlan_table(vport);
6928 		} else {
6929 			hclge_restore_hw_table(hdev);
6930 		}
6931 	}
6932 
6933 	clear_bit(vport->vport_id, hdev->vport_config_block);
6934 
6935 	return 0;
6936 }
6937 
6938 void hclge_vport_stop(struct hclge_vport *vport)
6939 {
6940 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6941 }
6942 
6943 static int hclge_client_start(struct hnae3_handle *handle)
6944 {
6945 	struct hclge_vport *vport = hclge_get_vport(handle);
6946 
6947 	return hclge_vport_start(vport);
6948 }
6949 
6950 static void hclge_client_stop(struct hnae3_handle *handle)
6951 {
6952 	struct hclge_vport *vport = hclge_get_vport(handle);
6953 
6954 	hclge_vport_stop(vport);
6955 }
6956 
6957 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6958 					 u16 cmdq_resp, u8  resp_code,
6959 					 enum hclge_mac_vlan_tbl_opcode op)
6960 {
6961 	struct hclge_dev *hdev = vport->back;
6962 
6963 	if (cmdq_resp) {
6964 		dev_err(&hdev->pdev->dev,
6965 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6966 			cmdq_resp);
6967 		return -EIO;
6968 	}
6969 
6970 	if (op == HCLGE_MAC_VLAN_ADD) {
6971 		if (!resp_code || resp_code == 1)
6972 			return 0;
6973 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
6974 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
6975 			return -ENOSPC;
6976 
6977 		dev_err(&hdev->pdev->dev,
6978 			"add mac addr failed for undefined, code=%u.\n",
6979 			resp_code);
6980 		return -EIO;
6981 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
6982 		if (!resp_code) {
6983 			return 0;
6984 		} else if (resp_code == 1) {
6985 			dev_dbg(&hdev->pdev->dev,
6986 				"remove mac addr failed for miss.\n");
6987 			return -ENOENT;
6988 		}
6989 
6990 		dev_err(&hdev->pdev->dev,
6991 			"remove mac addr failed for undefined, code=%u.\n",
6992 			resp_code);
6993 		return -EIO;
6994 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
6995 		if (!resp_code) {
6996 			return 0;
6997 		} else if (resp_code == 1) {
6998 			dev_dbg(&hdev->pdev->dev,
6999 				"lookup mac addr failed for miss.\n");
7000 			return -ENOENT;
7001 		}
7002 
7003 		dev_err(&hdev->pdev->dev,
7004 			"lookup mac addr failed for undefined, code=%u.\n",
7005 			resp_code);
7006 		return -EIO;
7007 	}
7008 
7009 	dev_err(&hdev->pdev->dev,
7010 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7011 
7012 	return -EINVAL;
7013 }
7014 
7015 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7016 {
7017 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7018 
7019 	unsigned int word_num;
7020 	unsigned int bit_num;
7021 
7022 	if (vfid > 255 || vfid < 0)
7023 		return -EIO;
7024 
7025 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7026 		word_num = vfid / 32;
7027 		bit_num  = vfid % 32;
7028 		if (clr)
7029 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7030 		else
7031 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7032 	} else {
7033 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7034 		bit_num  = vfid % 32;
7035 		if (clr)
7036 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7037 		else
7038 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7039 	}
7040 
7041 	return 0;
7042 }
7043 
7044 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7045 {
7046 #define HCLGE_DESC_NUMBER 3
7047 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7048 	int i, j;
7049 
7050 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7051 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7052 			if (desc[i].data[j])
7053 				return false;
7054 
7055 	return true;
7056 }
7057 
7058 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7059 				   const u8 *addr, bool is_mc)
7060 {
7061 	const unsigned char *mac_addr = addr;
7062 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7063 		       (mac_addr[0]) | (mac_addr[1] << 8);
7064 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
7065 
7066 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7067 	if (is_mc) {
7068 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7069 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7070 	}
7071 
7072 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7073 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7074 }
7075 
7076 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7077 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
7078 {
7079 	struct hclge_dev *hdev = vport->back;
7080 	struct hclge_desc desc;
7081 	u8 resp_code;
7082 	u16 retval;
7083 	int ret;
7084 
7085 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7086 
7087 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7088 
7089 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7090 	if (ret) {
7091 		dev_err(&hdev->pdev->dev,
7092 			"del mac addr failed for cmd_send, ret =%d.\n",
7093 			ret);
7094 		return ret;
7095 	}
7096 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7097 	retval = le16_to_cpu(desc.retval);
7098 
7099 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7100 					     HCLGE_MAC_VLAN_REMOVE);
7101 }
7102 
7103 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7104 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
7105 				     struct hclge_desc *desc,
7106 				     bool is_mc)
7107 {
7108 	struct hclge_dev *hdev = vport->back;
7109 	u8 resp_code;
7110 	u16 retval;
7111 	int ret;
7112 
7113 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7114 	if (is_mc) {
7115 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7116 		memcpy(desc[0].data,
7117 		       req,
7118 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7119 		hclge_cmd_setup_basic_desc(&desc[1],
7120 					   HCLGE_OPC_MAC_VLAN_ADD,
7121 					   true);
7122 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7123 		hclge_cmd_setup_basic_desc(&desc[2],
7124 					   HCLGE_OPC_MAC_VLAN_ADD,
7125 					   true);
7126 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
7127 	} else {
7128 		memcpy(desc[0].data,
7129 		       req,
7130 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7131 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
7132 	}
7133 	if (ret) {
7134 		dev_err(&hdev->pdev->dev,
7135 			"lookup mac addr failed for cmd_send, ret =%d.\n",
7136 			ret);
7137 		return ret;
7138 	}
7139 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7140 	retval = le16_to_cpu(desc[0].retval);
7141 
7142 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7143 					     HCLGE_MAC_VLAN_LKUP);
7144 }
7145 
7146 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7147 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
7148 				  struct hclge_desc *mc_desc)
7149 {
7150 	struct hclge_dev *hdev = vport->back;
7151 	int cfg_status;
7152 	u8 resp_code;
7153 	u16 retval;
7154 	int ret;
7155 
7156 	if (!mc_desc) {
7157 		struct hclge_desc desc;
7158 
7159 		hclge_cmd_setup_basic_desc(&desc,
7160 					   HCLGE_OPC_MAC_VLAN_ADD,
7161 					   false);
7162 		memcpy(desc.data, req,
7163 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7164 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7165 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7166 		retval = le16_to_cpu(desc.retval);
7167 
7168 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7169 							   resp_code,
7170 							   HCLGE_MAC_VLAN_ADD);
7171 	} else {
7172 		hclge_cmd_reuse_desc(&mc_desc[0], false);
7173 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7174 		hclge_cmd_reuse_desc(&mc_desc[1], false);
7175 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7176 		hclge_cmd_reuse_desc(&mc_desc[2], false);
7177 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7178 		memcpy(mc_desc[0].data, req,
7179 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7180 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7181 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7182 		retval = le16_to_cpu(mc_desc[0].retval);
7183 
7184 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7185 							   resp_code,
7186 							   HCLGE_MAC_VLAN_ADD);
7187 	}
7188 
7189 	if (ret) {
7190 		dev_err(&hdev->pdev->dev,
7191 			"add mac addr failed for cmd_send, ret =%d.\n",
7192 			ret);
7193 		return ret;
7194 	}
7195 
7196 	return cfg_status;
7197 }
7198 
7199 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7200 			       u16 *allocated_size)
7201 {
7202 	struct hclge_umv_spc_alc_cmd *req;
7203 	struct hclge_desc desc;
7204 	int ret;
7205 
7206 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7207 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7208 
7209 	req->space_size = cpu_to_le32(space_size);
7210 
7211 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7212 	if (ret) {
7213 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7214 			ret);
7215 		return ret;
7216 	}
7217 
7218 	*allocated_size = le32_to_cpu(desc.data[1]);
7219 
7220 	return 0;
7221 }
7222 
7223 static int hclge_init_umv_space(struct hclge_dev *hdev)
7224 {
7225 	u16 allocated_size = 0;
7226 	int ret;
7227 
7228 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7229 	if (ret)
7230 		return ret;
7231 
7232 	if (allocated_size < hdev->wanted_umv_size)
7233 		dev_warn(&hdev->pdev->dev,
7234 			 "failed to alloc umv space, want %u, get %u\n",
7235 			 hdev->wanted_umv_size, allocated_size);
7236 
7237 	hdev->max_umv_size = allocated_size;
7238 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7239 	hdev->share_umv_size = hdev->priv_umv_size +
7240 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7241 
7242 	return 0;
7243 }
7244 
7245 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7246 {
7247 	struct hclge_vport *vport;
7248 	int i;
7249 
7250 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7251 		vport = &hdev->vport[i];
7252 		vport->used_umv_num = 0;
7253 	}
7254 
7255 	mutex_lock(&hdev->vport_lock);
7256 	hdev->share_umv_size = hdev->priv_umv_size +
7257 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7258 	mutex_unlock(&hdev->vport_lock);
7259 }
7260 
7261 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7262 {
7263 	struct hclge_dev *hdev = vport->back;
7264 	bool is_full;
7265 
7266 	if (need_lock)
7267 		mutex_lock(&hdev->vport_lock);
7268 
7269 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7270 		   hdev->share_umv_size == 0);
7271 
7272 	if (need_lock)
7273 		mutex_unlock(&hdev->vport_lock);
7274 
7275 	return is_full;
7276 }
7277 
7278 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7279 {
7280 	struct hclge_dev *hdev = vport->back;
7281 
7282 	if (is_free) {
7283 		if (vport->used_umv_num > hdev->priv_umv_size)
7284 			hdev->share_umv_size++;
7285 
7286 		if (vport->used_umv_num > 0)
7287 			vport->used_umv_num--;
7288 	} else {
7289 		if (vport->used_umv_num >= hdev->priv_umv_size &&
7290 		    hdev->share_umv_size > 0)
7291 			hdev->share_umv_size--;
7292 		vport->used_umv_num++;
7293 	}
7294 }
7295 
7296 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7297 						  const u8 *mac_addr)
7298 {
7299 	struct hclge_mac_node *mac_node, *tmp;
7300 
7301 	list_for_each_entry_safe(mac_node, tmp, list, node)
7302 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7303 			return mac_node;
7304 
7305 	return NULL;
7306 }
7307 
7308 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7309 				  enum HCLGE_MAC_NODE_STATE state)
7310 {
7311 	switch (state) {
7312 	/* from set_rx_mode or tmp_add_list */
7313 	case HCLGE_MAC_TO_ADD:
7314 		if (mac_node->state == HCLGE_MAC_TO_DEL)
7315 			mac_node->state = HCLGE_MAC_ACTIVE;
7316 		break;
7317 	/* only from set_rx_mode */
7318 	case HCLGE_MAC_TO_DEL:
7319 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
7320 			list_del(&mac_node->node);
7321 			kfree(mac_node);
7322 		} else {
7323 			mac_node->state = HCLGE_MAC_TO_DEL;
7324 		}
7325 		break;
7326 	/* only from tmp_add_list, the mac_node->state won't be
7327 	 * ACTIVE.
7328 	 */
7329 	case HCLGE_MAC_ACTIVE:
7330 		if (mac_node->state == HCLGE_MAC_TO_ADD)
7331 			mac_node->state = HCLGE_MAC_ACTIVE;
7332 
7333 		break;
7334 	}
7335 }
7336 
7337 int hclge_update_mac_list(struct hclge_vport *vport,
7338 			  enum HCLGE_MAC_NODE_STATE state,
7339 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
7340 			  const unsigned char *addr)
7341 {
7342 	struct hclge_dev *hdev = vport->back;
7343 	struct hclge_mac_node *mac_node;
7344 	struct list_head *list;
7345 
7346 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7347 		&vport->uc_mac_list : &vport->mc_mac_list;
7348 
7349 	spin_lock_bh(&vport->mac_list_lock);
7350 
7351 	/* if the mac addr is already in the mac list, no need to add a new
7352 	 * one into it, just check the mac addr state, convert it to a new
7353 	 * new state, or just remove it, or do nothing.
7354 	 */
7355 	mac_node = hclge_find_mac_node(list, addr);
7356 	if (mac_node) {
7357 		hclge_update_mac_node(mac_node, state);
7358 		spin_unlock_bh(&vport->mac_list_lock);
7359 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7360 		return 0;
7361 	}
7362 
7363 	/* if this address is never added, unnecessary to delete */
7364 	if (state == HCLGE_MAC_TO_DEL) {
7365 		spin_unlock_bh(&vport->mac_list_lock);
7366 		dev_err(&hdev->pdev->dev,
7367 			"failed to delete address %pM from mac list\n",
7368 			addr);
7369 		return -ENOENT;
7370 	}
7371 
7372 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7373 	if (!mac_node) {
7374 		spin_unlock_bh(&vport->mac_list_lock);
7375 		return -ENOMEM;
7376 	}
7377 
7378 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7379 
7380 	mac_node->state = state;
7381 	ether_addr_copy(mac_node->mac_addr, addr);
7382 	list_add_tail(&mac_node->node, list);
7383 
7384 	spin_unlock_bh(&vport->mac_list_lock);
7385 
7386 	return 0;
7387 }
7388 
7389 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7390 			     const unsigned char *addr)
7391 {
7392 	struct hclge_vport *vport = hclge_get_vport(handle);
7393 
7394 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7395 				     addr);
7396 }
7397 
7398 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7399 			     const unsigned char *addr)
7400 {
7401 	struct hclge_dev *hdev = vport->back;
7402 	struct hclge_mac_vlan_tbl_entry_cmd req;
7403 	struct hclge_desc desc;
7404 	u16 egress_port = 0;
7405 	int ret;
7406 
7407 	/* mac addr check */
7408 	if (is_zero_ether_addr(addr) ||
7409 	    is_broadcast_ether_addr(addr) ||
7410 	    is_multicast_ether_addr(addr)) {
7411 		dev_err(&hdev->pdev->dev,
7412 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7413 			 addr, is_zero_ether_addr(addr),
7414 			 is_broadcast_ether_addr(addr),
7415 			 is_multicast_ether_addr(addr));
7416 		return -EINVAL;
7417 	}
7418 
7419 	memset(&req, 0, sizeof(req));
7420 
7421 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7422 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7423 
7424 	req.egress_port = cpu_to_le16(egress_port);
7425 
7426 	hclge_prepare_mac_addr(&req, addr, false);
7427 
7428 	/* Lookup the mac address in the mac_vlan table, and add
7429 	 * it if the entry is inexistent. Repeated unicast entry
7430 	 * is not allowed in the mac vlan table.
7431 	 */
7432 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7433 	if (ret == -ENOENT) {
7434 		mutex_lock(&hdev->vport_lock);
7435 		if (!hclge_is_umv_space_full(vport, false)) {
7436 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7437 			if (!ret)
7438 				hclge_update_umv_space(vport, false);
7439 			mutex_unlock(&hdev->vport_lock);
7440 			return ret;
7441 		}
7442 		mutex_unlock(&hdev->vport_lock);
7443 
7444 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7445 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7446 				hdev->priv_umv_size);
7447 
7448 		return -ENOSPC;
7449 	}
7450 
7451 	/* check if we just hit the duplicate */
7452 	if (!ret) {
7453 		dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7454 			 vport->vport_id, addr);
7455 		return 0;
7456 	}
7457 
7458 	dev_err(&hdev->pdev->dev,
7459 		"PF failed to add unicast entry(%pM) in the MAC table\n",
7460 		addr);
7461 
7462 	return ret;
7463 }
7464 
7465 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7466 			    const unsigned char *addr)
7467 {
7468 	struct hclge_vport *vport = hclge_get_vport(handle);
7469 
7470 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7471 				     addr);
7472 }
7473 
7474 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7475 			    const unsigned char *addr)
7476 {
7477 	struct hclge_dev *hdev = vport->back;
7478 	struct hclge_mac_vlan_tbl_entry_cmd req;
7479 	int ret;
7480 
7481 	/* mac addr check */
7482 	if (is_zero_ether_addr(addr) ||
7483 	    is_broadcast_ether_addr(addr) ||
7484 	    is_multicast_ether_addr(addr)) {
7485 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7486 			addr);
7487 		return -EINVAL;
7488 	}
7489 
7490 	memset(&req, 0, sizeof(req));
7491 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7492 	hclge_prepare_mac_addr(&req, addr, false);
7493 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
7494 	if (!ret) {
7495 		mutex_lock(&hdev->vport_lock);
7496 		hclge_update_umv_space(vport, true);
7497 		mutex_unlock(&hdev->vport_lock);
7498 	} else if (ret == -ENOENT) {
7499 		ret = 0;
7500 	}
7501 
7502 	return ret;
7503 }
7504 
7505 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7506 			     const unsigned char *addr)
7507 {
7508 	struct hclge_vport *vport = hclge_get_vport(handle);
7509 
7510 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7511 				     addr);
7512 }
7513 
7514 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7515 			     const unsigned char *addr)
7516 {
7517 	struct hclge_dev *hdev = vport->back;
7518 	struct hclge_mac_vlan_tbl_entry_cmd req;
7519 	struct hclge_desc desc[3];
7520 	int status;
7521 
7522 	/* mac addr check */
7523 	if (!is_multicast_ether_addr(addr)) {
7524 		dev_err(&hdev->pdev->dev,
7525 			"Add mc mac err! invalid mac:%pM.\n",
7526 			 addr);
7527 		return -EINVAL;
7528 	}
7529 	memset(&req, 0, sizeof(req));
7530 	hclge_prepare_mac_addr(&req, addr, true);
7531 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7532 	if (status) {
7533 		/* This mac addr do not exist, add new entry for it */
7534 		memset(desc[0].data, 0, sizeof(desc[0].data));
7535 		memset(desc[1].data, 0, sizeof(desc[0].data));
7536 		memset(desc[2].data, 0, sizeof(desc[0].data));
7537 	}
7538 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7539 	if (status)
7540 		return status;
7541 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7542 
7543 	/* if already overflow, not to print each time */
7544 	if (status == -ENOSPC &&
7545 	    !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
7546 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7547 
7548 	return status;
7549 }
7550 
7551 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7552 			    const unsigned char *addr)
7553 {
7554 	struct hclge_vport *vport = hclge_get_vport(handle);
7555 
7556 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7557 				     addr);
7558 }
7559 
7560 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7561 			    const unsigned char *addr)
7562 {
7563 	struct hclge_dev *hdev = vport->back;
7564 	struct hclge_mac_vlan_tbl_entry_cmd req;
7565 	enum hclge_cmd_status status;
7566 	struct hclge_desc desc[3];
7567 
7568 	/* mac addr check */
7569 	if (!is_multicast_ether_addr(addr)) {
7570 		dev_dbg(&hdev->pdev->dev,
7571 			"Remove mc mac err! invalid mac:%pM.\n",
7572 			 addr);
7573 		return -EINVAL;
7574 	}
7575 
7576 	memset(&req, 0, sizeof(req));
7577 	hclge_prepare_mac_addr(&req, addr, true);
7578 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7579 	if (!status) {
7580 		/* This mac addr exist, remove this handle's VFID for it */
7581 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7582 		if (status)
7583 			return status;
7584 
7585 		if (hclge_is_all_function_id_zero(desc))
7586 			/* All the vfid is zero, so need to delete this entry */
7587 			status = hclge_remove_mac_vlan_tbl(vport, &req);
7588 		else
7589 			/* Not all the vfid is zero, update the vfid */
7590 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7591 
7592 	} else if (status == -ENOENT) {
7593 		status = 0;
7594 	}
7595 
7596 	return status;
7597 }
7598 
7599 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7600 				      struct list_head *list,
7601 				      int (*sync)(struct hclge_vport *,
7602 						  const unsigned char *))
7603 {
7604 	struct hclge_mac_node *mac_node, *tmp;
7605 	int ret;
7606 
7607 	list_for_each_entry_safe(mac_node, tmp, list, node) {
7608 		ret = sync(vport, mac_node->mac_addr);
7609 		if (!ret) {
7610 			mac_node->state = HCLGE_MAC_ACTIVE;
7611 		} else {
7612 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7613 				&vport->state);
7614 			break;
7615 		}
7616 	}
7617 }
7618 
7619 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7620 					struct list_head *list,
7621 					int (*unsync)(struct hclge_vport *,
7622 						      const unsigned char *))
7623 {
7624 	struct hclge_mac_node *mac_node, *tmp;
7625 	int ret;
7626 
7627 	list_for_each_entry_safe(mac_node, tmp, list, node) {
7628 		ret = unsync(vport, mac_node->mac_addr);
7629 		if (!ret || ret == -ENOENT) {
7630 			list_del(&mac_node->node);
7631 			kfree(mac_node);
7632 		} else {
7633 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7634 				&vport->state);
7635 			break;
7636 		}
7637 	}
7638 }
7639 
7640 static bool hclge_sync_from_add_list(struct list_head *add_list,
7641 				     struct list_head *mac_list)
7642 {
7643 	struct hclge_mac_node *mac_node, *tmp, *new_node;
7644 	bool all_added = true;
7645 
7646 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
7647 		if (mac_node->state == HCLGE_MAC_TO_ADD)
7648 			all_added = false;
7649 
7650 		/* if the mac address from tmp_add_list is not in the
7651 		 * uc/mc_mac_list, it means have received a TO_DEL request
7652 		 * during the time window of adding the mac address into mac
7653 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
7654 		 * then it will be removed at next time. else it must be TO_ADD,
7655 		 * this address hasn't been added into mac table,
7656 		 * so just remove the mac node.
7657 		 */
7658 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7659 		if (new_node) {
7660 			hclge_update_mac_node(new_node, mac_node->state);
7661 			list_del(&mac_node->node);
7662 			kfree(mac_node);
7663 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7664 			mac_node->state = HCLGE_MAC_TO_DEL;
7665 			list_del(&mac_node->node);
7666 			list_add_tail(&mac_node->node, mac_list);
7667 		} else {
7668 			list_del(&mac_node->node);
7669 			kfree(mac_node);
7670 		}
7671 	}
7672 
7673 	return all_added;
7674 }
7675 
7676 static void hclge_sync_from_del_list(struct list_head *del_list,
7677 				     struct list_head *mac_list)
7678 {
7679 	struct hclge_mac_node *mac_node, *tmp, *new_node;
7680 
7681 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7682 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7683 		if (new_node) {
7684 			/* If the mac addr exists in the mac list, it means
7685 			 * received a new TO_ADD request during the time window
7686 			 * of configuring the mac address. For the mac node
7687 			 * state is TO_ADD, and the address is already in the
7688 			 * in the hardware(due to delete fail), so we just need
7689 			 * to change the mac node state to ACTIVE.
7690 			 */
7691 			new_node->state = HCLGE_MAC_ACTIVE;
7692 			list_del(&mac_node->node);
7693 			kfree(mac_node);
7694 		} else {
7695 			list_del(&mac_node->node);
7696 			list_add_tail(&mac_node->node, mac_list);
7697 		}
7698 	}
7699 }
7700 
7701 static void hclge_update_overflow_flags(struct hclge_vport *vport,
7702 					enum HCLGE_MAC_ADDR_TYPE mac_type,
7703 					bool is_all_added)
7704 {
7705 	if (mac_type == HCLGE_MAC_ADDR_UC) {
7706 		if (is_all_added)
7707 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7708 		else
7709 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7710 	} else {
7711 		if (is_all_added)
7712 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7713 		else
7714 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7715 	}
7716 }
7717 
7718 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7719 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
7720 {
7721 	struct hclge_mac_node *mac_node, *tmp, *new_node;
7722 	struct list_head tmp_add_list, tmp_del_list;
7723 	struct list_head *list;
7724 	bool all_added;
7725 
7726 	INIT_LIST_HEAD(&tmp_add_list);
7727 	INIT_LIST_HEAD(&tmp_del_list);
7728 
7729 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
7730 	 * we can add/delete these mac addr outside the spin lock
7731 	 */
7732 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7733 		&vport->uc_mac_list : &vport->mc_mac_list;
7734 
7735 	spin_lock_bh(&vport->mac_list_lock);
7736 
7737 	list_for_each_entry_safe(mac_node, tmp, list, node) {
7738 		switch (mac_node->state) {
7739 		case HCLGE_MAC_TO_DEL:
7740 			list_del(&mac_node->node);
7741 			list_add_tail(&mac_node->node, &tmp_del_list);
7742 			break;
7743 		case HCLGE_MAC_TO_ADD:
7744 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7745 			if (!new_node)
7746 				goto stop_traverse;
7747 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7748 			new_node->state = mac_node->state;
7749 			list_add_tail(&new_node->node, &tmp_add_list);
7750 			break;
7751 		default:
7752 			break;
7753 		}
7754 	}
7755 
7756 stop_traverse:
7757 	spin_unlock_bh(&vport->mac_list_lock);
7758 
7759 	/* delete first, in order to get max mac table space for adding */
7760 	if (mac_type == HCLGE_MAC_ADDR_UC) {
7761 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7762 					    hclge_rm_uc_addr_common);
7763 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
7764 					  hclge_add_uc_addr_common);
7765 	} else {
7766 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7767 					    hclge_rm_mc_addr_common);
7768 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
7769 					  hclge_add_mc_addr_common);
7770 	}
7771 
7772 	/* if some mac addresses were added/deleted fail, move back to the
7773 	 * mac_list, and retry at next time.
7774 	 */
7775 	spin_lock_bh(&vport->mac_list_lock);
7776 
7777 	hclge_sync_from_del_list(&tmp_del_list, list);
7778 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
7779 
7780 	spin_unlock_bh(&vport->mac_list_lock);
7781 
7782 	hclge_update_overflow_flags(vport, mac_type, all_added);
7783 }
7784 
7785 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7786 {
7787 	struct hclge_dev *hdev = vport->back;
7788 
7789 	if (test_bit(vport->vport_id, hdev->vport_config_block))
7790 		return false;
7791 
7792 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7793 		return true;
7794 
7795 	return false;
7796 }
7797 
7798 static void hclge_sync_mac_table(struct hclge_dev *hdev)
7799 {
7800 	int i;
7801 
7802 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7803 		struct hclge_vport *vport = &hdev->vport[i];
7804 
7805 		if (!hclge_need_sync_mac_table(vport))
7806 			continue;
7807 
7808 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7809 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7810 	}
7811 }
7812 
7813 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7814 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
7815 {
7816 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7817 	struct hclge_mac_node *mac_cfg, *tmp;
7818 	struct hclge_dev *hdev = vport->back;
7819 	struct list_head tmp_del_list, *list;
7820 	int ret;
7821 
7822 	if (mac_type == HCLGE_MAC_ADDR_UC) {
7823 		list = &vport->uc_mac_list;
7824 		unsync = hclge_rm_uc_addr_common;
7825 	} else {
7826 		list = &vport->mc_mac_list;
7827 		unsync = hclge_rm_mc_addr_common;
7828 	}
7829 
7830 	INIT_LIST_HEAD(&tmp_del_list);
7831 
7832 	if (!is_del_list)
7833 		set_bit(vport->vport_id, hdev->vport_config_block);
7834 
7835 	spin_lock_bh(&vport->mac_list_lock);
7836 
7837 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7838 		switch (mac_cfg->state) {
7839 		case HCLGE_MAC_TO_DEL:
7840 		case HCLGE_MAC_ACTIVE:
7841 			list_del(&mac_cfg->node);
7842 			list_add_tail(&mac_cfg->node, &tmp_del_list);
7843 			break;
7844 		case HCLGE_MAC_TO_ADD:
7845 			if (is_del_list) {
7846 				list_del(&mac_cfg->node);
7847 				kfree(mac_cfg);
7848 			}
7849 			break;
7850 		}
7851 	}
7852 
7853 	spin_unlock_bh(&vport->mac_list_lock);
7854 
7855 	list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
7856 		ret = unsync(vport, mac_cfg->mac_addr);
7857 		if (!ret || ret == -ENOENT) {
7858 			/* clear all mac addr from hardware, but remain these
7859 			 * mac addr in the mac list, and restore them after
7860 			 * vf reset finished.
7861 			 */
7862 			if (!is_del_list &&
7863 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
7864 				mac_cfg->state = HCLGE_MAC_TO_ADD;
7865 			} else {
7866 				list_del(&mac_cfg->node);
7867 				kfree(mac_cfg);
7868 			}
7869 		} else if (is_del_list) {
7870 			mac_cfg->state = HCLGE_MAC_TO_DEL;
7871 		}
7872 	}
7873 
7874 	spin_lock_bh(&vport->mac_list_lock);
7875 
7876 	hclge_sync_from_del_list(&tmp_del_list, list);
7877 
7878 	spin_unlock_bh(&vport->mac_list_lock);
7879 }
7880 
7881 /* remove all mac address when uninitailize */
7882 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
7883 					enum HCLGE_MAC_ADDR_TYPE mac_type)
7884 {
7885 	struct hclge_mac_node *mac_node, *tmp;
7886 	struct hclge_dev *hdev = vport->back;
7887 	struct list_head tmp_del_list, *list;
7888 
7889 	INIT_LIST_HEAD(&tmp_del_list);
7890 
7891 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7892 		&vport->uc_mac_list : &vport->mc_mac_list;
7893 
7894 	spin_lock_bh(&vport->mac_list_lock);
7895 
7896 	list_for_each_entry_safe(mac_node, tmp, list, node) {
7897 		switch (mac_node->state) {
7898 		case HCLGE_MAC_TO_DEL:
7899 		case HCLGE_MAC_ACTIVE:
7900 			list_del(&mac_node->node);
7901 			list_add_tail(&mac_node->node, &tmp_del_list);
7902 			break;
7903 		case HCLGE_MAC_TO_ADD:
7904 			list_del(&mac_node->node);
7905 			kfree(mac_node);
7906 			break;
7907 		}
7908 	}
7909 
7910 	spin_unlock_bh(&vport->mac_list_lock);
7911 
7912 	if (mac_type == HCLGE_MAC_ADDR_UC)
7913 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7914 					    hclge_rm_uc_addr_common);
7915 	else
7916 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7917 					    hclge_rm_mc_addr_common);
7918 
7919 	if (!list_empty(&tmp_del_list))
7920 		dev_warn(&hdev->pdev->dev,
7921 			 "uninit %s mac list for vport %u not completely.\n",
7922 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
7923 			 vport->vport_id);
7924 
7925 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
7926 		list_del(&mac_node->node);
7927 		kfree(mac_node);
7928 	}
7929 }
7930 
7931 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
7932 {
7933 	struct hclge_vport *vport;
7934 	int i;
7935 
7936 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7937 		vport = &hdev->vport[i];
7938 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
7939 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
7940 	}
7941 }
7942 
7943 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7944 					      u16 cmdq_resp, u8 resp_code)
7945 {
7946 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
7947 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
7948 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
7949 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
7950 
7951 	int return_status;
7952 
7953 	if (cmdq_resp) {
7954 		dev_err(&hdev->pdev->dev,
7955 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7956 			cmdq_resp);
7957 		return -EIO;
7958 	}
7959 
7960 	switch (resp_code) {
7961 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
7962 	case HCLGE_ETHERTYPE_ALREADY_ADD:
7963 		return_status = 0;
7964 		break;
7965 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7966 		dev_err(&hdev->pdev->dev,
7967 			"add mac ethertype failed for manager table overflow.\n");
7968 		return_status = -EIO;
7969 		break;
7970 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
7971 		dev_err(&hdev->pdev->dev,
7972 			"add mac ethertype failed for key conflict.\n");
7973 		return_status = -EIO;
7974 		break;
7975 	default:
7976 		dev_err(&hdev->pdev->dev,
7977 			"add mac ethertype failed for undefined, code=%u.\n",
7978 			resp_code);
7979 		return_status = -EIO;
7980 	}
7981 
7982 	return return_status;
7983 }
7984 
7985 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7986 				     u8 *mac_addr)
7987 {
7988 	struct hclge_mac_vlan_tbl_entry_cmd req;
7989 	struct hclge_dev *hdev = vport->back;
7990 	struct hclge_desc desc;
7991 	u16 egress_port = 0;
7992 	int i;
7993 
7994 	if (is_zero_ether_addr(mac_addr))
7995 		return false;
7996 
7997 	memset(&req, 0, sizeof(req));
7998 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7999 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8000 	req.egress_port = cpu_to_le16(egress_port);
8001 	hclge_prepare_mac_addr(&req, mac_addr, false);
8002 
8003 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8004 		return true;
8005 
8006 	vf_idx += HCLGE_VF_VPORT_START_NUM;
8007 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8008 		if (i != vf_idx &&
8009 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8010 			return true;
8011 
8012 	return false;
8013 }
8014 
8015 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8016 			    u8 *mac_addr)
8017 {
8018 	struct hclge_vport *vport = hclge_get_vport(handle);
8019 	struct hclge_dev *hdev = vport->back;
8020 
8021 	vport = hclge_get_vf_vport(hdev, vf);
8022 	if (!vport)
8023 		return -EINVAL;
8024 
8025 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8026 		dev_info(&hdev->pdev->dev,
8027 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
8028 			 mac_addr);
8029 		return 0;
8030 	}
8031 
8032 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8033 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8034 			mac_addr);
8035 		return -EEXIST;
8036 	}
8037 
8038 	ether_addr_copy(vport->vf_info.mac, mac_addr);
8039 
8040 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8041 		dev_info(&hdev->pdev->dev,
8042 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8043 			 vf, mac_addr);
8044 		return hclge_inform_reset_assert_to_vf(vport);
8045 	}
8046 
8047 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8048 		 vf, mac_addr);
8049 	return 0;
8050 }
8051 
8052 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8053 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
8054 {
8055 	struct hclge_desc desc;
8056 	u8 resp_code;
8057 	u16 retval;
8058 	int ret;
8059 
8060 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8061 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8062 
8063 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8064 	if (ret) {
8065 		dev_err(&hdev->pdev->dev,
8066 			"add mac ethertype failed for cmd_send, ret =%d.\n",
8067 			ret);
8068 		return ret;
8069 	}
8070 
8071 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8072 	retval = le16_to_cpu(desc.retval);
8073 
8074 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8075 }
8076 
8077 static int init_mgr_tbl(struct hclge_dev *hdev)
8078 {
8079 	int ret;
8080 	int i;
8081 
8082 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8083 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8084 		if (ret) {
8085 			dev_err(&hdev->pdev->dev,
8086 				"add mac ethertype failed, ret =%d.\n",
8087 				ret);
8088 			return ret;
8089 		}
8090 	}
8091 
8092 	return 0;
8093 }
8094 
8095 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8096 {
8097 	struct hclge_vport *vport = hclge_get_vport(handle);
8098 	struct hclge_dev *hdev = vport->back;
8099 
8100 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
8101 }
8102 
8103 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8104 				       const u8 *old_addr, const u8 *new_addr)
8105 {
8106 	struct list_head *list = &vport->uc_mac_list;
8107 	struct hclge_mac_node *old_node, *new_node;
8108 
8109 	new_node = hclge_find_mac_node(list, new_addr);
8110 	if (!new_node) {
8111 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8112 		if (!new_node)
8113 			return -ENOMEM;
8114 
8115 		new_node->state = HCLGE_MAC_TO_ADD;
8116 		ether_addr_copy(new_node->mac_addr, new_addr);
8117 		list_add(&new_node->node, list);
8118 	} else {
8119 		if (new_node->state == HCLGE_MAC_TO_DEL)
8120 			new_node->state = HCLGE_MAC_ACTIVE;
8121 
8122 		/* make sure the new addr is in the list head, avoid dev
8123 		 * addr may be not re-added into mac table for the umv space
8124 		 * limitation after global/imp reset which will clear mac
8125 		 * table by hardware.
8126 		 */
8127 		list_move(&new_node->node, list);
8128 	}
8129 
8130 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8131 		old_node = hclge_find_mac_node(list, old_addr);
8132 		if (old_node) {
8133 			if (old_node->state == HCLGE_MAC_TO_ADD) {
8134 				list_del(&old_node->node);
8135 				kfree(old_node);
8136 			} else {
8137 				old_node->state = HCLGE_MAC_TO_DEL;
8138 			}
8139 		}
8140 	}
8141 
8142 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8143 
8144 	return 0;
8145 }
8146 
8147 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8148 			      bool is_first)
8149 {
8150 	const unsigned char *new_addr = (const unsigned char *)p;
8151 	struct hclge_vport *vport = hclge_get_vport(handle);
8152 	struct hclge_dev *hdev = vport->back;
8153 	unsigned char *old_addr = NULL;
8154 	int ret;
8155 
8156 	/* mac addr check */
8157 	if (is_zero_ether_addr(new_addr) ||
8158 	    is_broadcast_ether_addr(new_addr) ||
8159 	    is_multicast_ether_addr(new_addr)) {
8160 		dev_err(&hdev->pdev->dev,
8161 			"change uc mac err! invalid mac: %pM.\n",
8162 			 new_addr);
8163 		return -EINVAL;
8164 	}
8165 
8166 	ret = hclge_pause_addr_cfg(hdev, new_addr);
8167 	if (ret) {
8168 		dev_err(&hdev->pdev->dev,
8169 			"failed to configure mac pause address, ret = %d\n",
8170 			ret);
8171 		return ret;
8172 	}
8173 
8174 	if (!is_first)
8175 		old_addr = hdev->hw.mac.mac_addr;
8176 
8177 	spin_lock_bh(&vport->mac_list_lock);
8178 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8179 	if (ret) {
8180 		dev_err(&hdev->pdev->dev,
8181 			"failed to change the mac addr:%pM, ret = %d\n",
8182 			new_addr, ret);
8183 		spin_unlock_bh(&vport->mac_list_lock);
8184 
8185 		if (!is_first)
8186 			hclge_pause_addr_cfg(hdev, old_addr);
8187 
8188 		return ret;
8189 	}
8190 	/* we must update dev addr with spin lock protect, preventing dev addr
8191 	 * being removed by set_rx_mode path.
8192 	 */
8193 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8194 	spin_unlock_bh(&vport->mac_list_lock);
8195 
8196 	hclge_task_schedule(hdev, 0);
8197 
8198 	return 0;
8199 }
8200 
8201 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8202 			  int cmd)
8203 {
8204 	struct hclge_vport *vport = hclge_get_vport(handle);
8205 	struct hclge_dev *hdev = vport->back;
8206 
8207 	if (!hdev->hw.mac.phydev)
8208 		return -EOPNOTSUPP;
8209 
8210 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8211 }
8212 
8213 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8214 				      u8 fe_type, bool filter_en, u8 vf_id)
8215 {
8216 	struct hclge_vlan_filter_ctrl_cmd *req;
8217 	struct hclge_desc desc;
8218 	int ret;
8219 
8220 	/* read current vlan filter parameter */
8221 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8222 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8223 	req->vlan_type = vlan_type;
8224 	req->vf_id = vf_id;
8225 
8226 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8227 	if (ret) {
8228 		dev_err(&hdev->pdev->dev,
8229 			"failed to get vlan filter config, ret = %d.\n", ret);
8230 		return ret;
8231 	}
8232 
8233 	/* modify and write new config parameter */
8234 	hclge_cmd_reuse_desc(&desc, false);
8235 	req->vlan_fe = filter_en ?
8236 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8237 
8238 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8239 	if (ret)
8240 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8241 			ret);
8242 
8243 	return ret;
8244 }
8245 
8246 #define HCLGE_FILTER_TYPE_VF		0
8247 #define HCLGE_FILTER_TYPE_PORT		1
8248 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
8249 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
8250 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
8251 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
8252 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
8253 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
8254 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
8255 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
8256 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
8257 
8258 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8259 {
8260 	struct hclge_vport *vport = hclge_get_vport(handle);
8261 	struct hclge_dev *hdev = vport->back;
8262 
8263 	if (hdev->pdev->revision >= 0x21) {
8264 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8265 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
8266 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8267 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
8268 	} else {
8269 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8270 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8271 					   0);
8272 	}
8273 	if (enable)
8274 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
8275 	else
8276 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8277 }
8278 
8279 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8280 				    bool is_kill, u16 vlan,
8281 				    __be16 proto)
8282 {
8283 	struct hclge_vport *vport = &hdev->vport[vfid];
8284 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
8285 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
8286 	struct hclge_desc desc[2];
8287 	u8 vf_byte_val;
8288 	u8 vf_byte_off;
8289 	int ret;
8290 
8291 	/* if vf vlan table is full, firmware will close vf vlan filter, it
8292 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
8293 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
8294 	 * new vlan, because tx packets with these vlan id will be dropped.
8295 	 */
8296 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8297 		if (vport->vf_info.spoofchk && vlan) {
8298 			dev_err(&hdev->pdev->dev,
8299 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
8300 			return -EPERM;
8301 		}
8302 		return 0;
8303 	}
8304 
8305 	hclge_cmd_setup_basic_desc(&desc[0],
8306 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8307 	hclge_cmd_setup_basic_desc(&desc[1],
8308 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8309 
8310 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8311 
8312 	vf_byte_off = vfid / 8;
8313 	vf_byte_val = 1 << (vfid % 8);
8314 
8315 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8316 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8317 
8318 	req0->vlan_id  = cpu_to_le16(vlan);
8319 	req0->vlan_cfg = is_kill;
8320 
8321 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8322 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8323 	else
8324 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8325 
8326 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
8327 	if (ret) {
8328 		dev_err(&hdev->pdev->dev,
8329 			"Send vf vlan command fail, ret =%d.\n",
8330 			ret);
8331 		return ret;
8332 	}
8333 
8334 	if (!is_kill) {
8335 #define HCLGE_VF_VLAN_NO_ENTRY	2
8336 		if (!req0->resp_code || req0->resp_code == 1)
8337 			return 0;
8338 
8339 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8340 			set_bit(vfid, hdev->vf_vlan_full);
8341 			dev_warn(&hdev->pdev->dev,
8342 				 "vf vlan table is full, vf vlan filter is disabled\n");
8343 			return 0;
8344 		}
8345 
8346 		dev_err(&hdev->pdev->dev,
8347 			"Add vf vlan filter fail, ret =%u.\n",
8348 			req0->resp_code);
8349 	} else {
8350 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
8351 		if (!req0->resp_code)
8352 			return 0;
8353 
8354 		/* vf vlan filter is disabled when vf vlan table is full,
8355 		 * then new vlan id will not be added into vf vlan table.
8356 		 * Just return 0 without warning, avoid massive verbose
8357 		 * print logs when unload.
8358 		 */
8359 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8360 			return 0;
8361 
8362 		dev_err(&hdev->pdev->dev,
8363 			"Kill vf vlan filter fail, ret =%u.\n",
8364 			req0->resp_code);
8365 	}
8366 
8367 	return -EIO;
8368 }
8369 
8370 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8371 				      u16 vlan_id, bool is_kill)
8372 {
8373 	struct hclge_vlan_filter_pf_cfg_cmd *req;
8374 	struct hclge_desc desc;
8375 	u8 vlan_offset_byte_val;
8376 	u8 vlan_offset_byte;
8377 	u8 vlan_offset_160;
8378 	int ret;
8379 
8380 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8381 
8382 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8383 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8384 			   HCLGE_VLAN_BYTE_SIZE;
8385 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8386 
8387 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8388 	req->vlan_offset = vlan_offset_160;
8389 	req->vlan_cfg = is_kill;
8390 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8391 
8392 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8393 	if (ret)
8394 		dev_err(&hdev->pdev->dev,
8395 			"port vlan command, send fail, ret =%d.\n", ret);
8396 	return ret;
8397 }
8398 
8399 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8400 				    u16 vport_id, u16 vlan_id,
8401 				    bool is_kill)
8402 {
8403 	u16 vport_idx, vport_num = 0;
8404 	int ret;
8405 
8406 	if (is_kill && !vlan_id)
8407 		return 0;
8408 
8409 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8410 				       proto);
8411 	if (ret) {
8412 		dev_err(&hdev->pdev->dev,
8413 			"Set %u vport vlan filter config fail, ret =%d.\n",
8414 			vport_id, ret);
8415 		return ret;
8416 	}
8417 
8418 	/* vlan 0 may be added twice when 8021q module is enabled */
8419 	if (!is_kill && !vlan_id &&
8420 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
8421 		return 0;
8422 
8423 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
8424 		dev_err(&hdev->pdev->dev,
8425 			"Add port vlan failed, vport %u is already in vlan %u\n",
8426 			vport_id, vlan_id);
8427 		return -EINVAL;
8428 	}
8429 
8430 	if (is_kill &&
8431 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8432 		dev_err(&hdev->pdev->dev,
8433 			"Delete port vlan failed, vport %u is not in vlan %u\n",
8434 			vport_id, vlan_id);
8435 		return -EINVAL;
8436 	}
8437 
8438 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
8439 		vport_num++;
8440 
8441 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8442 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8443 						 is_kill);
8444 
8445 	return ret;
8446 }
8447 
8448 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8449 {
8450 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8451 	struct hclge_vport_vtag_tx_cfg_cmd *req;
8452 	struct hclge_dev *hdev = vport->back;
8453 	struct hclge_desc desc;
8454 	u16 bmap_index;
8455 	int status;
8456 
8457 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8458 
8459 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8460 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8461 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8462 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8463 		      vcfg->accept_tag1 ? 1 : 0);
8464 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8465 		      vcfg->accept_untag1 ? 1 : 0);
8466 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8467 		      vcfg->accept_tag2 ? 1 : 0);
8468 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8469 		      vcfg->accept_untag2 ? 1 : 0);
8470 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8471 		      vcfg->insert_tag1_en ? 1 : 0);
8472 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8473 		      vcfg->insert_tag2_en ? 1 : 0);
8474 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8475 
8476 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8477 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8478 			HCLGE_VF_NUM_PER_BYTE;
8479 	req->vf_bitmap[bmap_index] =
8480 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8481 
8482 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8483 	if (status)
8484 		dev_err(&hdev->pdev->dev,
8485 			"Send port txvlan cfg command fail, ret =%d\n",
8486 			status);
8487 
8488 	return status;
8489 }
8490 
8491 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8492 {
8493 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8494 	struct hclge_vport_vtag_rx_cfg_cmd *req;
8495 	struct hclge_dev *hdev = vport->back;
8496 	struct hclge_desc desc;
8497 	u16 bmap_index;
8498 	int status;
8499 
8500 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8501 
8502 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8503 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8504 		      vcfg->strip_tag1_en ? 1 : 0);
8505 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8506 		      vcfg->strip_tag2_en ? 1 : 0);
8507 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8508 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
8509 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8510 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
8511 
8512 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8513 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8514 			HCLGE_VF_NUM_PER_BYTE;
8515 	req->vf_bitmap[bmap_index] =
8516 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8517 
8518 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8519 	if (status)
8520 		dev_err(&hdev->pdev->dev,
8521 			"Send port rxvlan cfg command fail, ret =%d\n",
8522 			status);
8523 
8524 	return status;
8525 }
8526 
8527 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8528 				  u16 port_base_vlan_state,
8529 				  u16 vlan_tag)
8530 {
8531 	int ret;
8532 
8533 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8534 		vport->txvlan_cfg.accept_tag1 = true;
8535 		vport->txvlan_cfg.insert_tag1_en = false;
8536 		vport->txvlan_cfg.default_tag1 = 0;
8537 	} else {
8538 		vport->txvlan_cfg.accept_tag1 = false;
8539 		vport->txvlan_cfg.insert_tag1_en = true;
8540 		vport->txvlan_cfg.default_tag1 = vlan_tag;
8541 	}
8542 
8543 	vport->txvlan_cfg.accept_untag1 = true;
8544 
8545 	/* accept_tag2 and accept_untag2 are not supported on
8546 	 * pdev revision(0x20), new revision support them,
8547 	 * this two fields can not be configured by user.
8548 	 */
8549 	vport->txvlan_cfg.accept_tag2 = true;
8550 	vport->txvlan_cfg.accept_untag2 = true;
8551 	vport->txvlan_cfg.insert_tag2_en = false;
8552 	vport->txvlan_cfg.default_tag2 = 0;
8553 
8554 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8555 		vport->rxvlan_cfg.strip_tag1_en = false;
8556 		vport->rxvlan_cfg.strip_tag2_en =
8557 				vport->rxvlan_cfg.rx_vlan_offload_en;
8558 	} else {
8559 		vport->rxvlan_cfg.strip_tag1_en =
8560 				vport->rxvlan_cfg.rx_vlan_offload_en;
8561 		vport->rxvlan_cfg.strip_tag2_en = true;
8562 	}
8563 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8564 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8565 
8566 	ret = hclge_set_vlan_tx_offload_cfg(vport);
8567 	if (ret)
8568 		return ret;
8569 
8570 	return hclge_set_vlan_rx_offload_cfg(vport);
8571 }
8572 
8573 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8574 {
8575 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8576 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8577 	struct hclge_desc desc;
8578 	int status;
8579 
8580 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8581 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8582 	rx_req->ot_fst_vlan_type =
8583 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8584 	rx_req->ot_sec_vlan_type =
8585 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8586 	rx_req->in_fst_vlan_type =
8587 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8588 	rx_req->in_sec_vlan_type =
8589 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8590 
8591 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8592 	if (status) {
8593 		dev_err(&hdev->pdev->dev,
8594 			"Send rxvlan protocol type command fail, ret =%d\n",
8595 			status);
8596 		return status;
8597 	}
8598 
8599 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8600 
8601 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8602 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8603 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8604 
8605 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8606 	if (status)
8607 		dev_err(&hdev->pdev->dev,
8608 			"Send txvlan protocol type command fail, ret =%d\n",
8609 			status);
8610 
8611 	return status;
8612 }
8613 
8614 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8615 {
8616 #define HCLGE_DEF_VLAN_TYPE		0x8100
8617 
8618 	struct hnae3_handle *handle = &hdev->vport[0].nic;
8619 	struct hclge_vport *vport;
8620 	int ret;
8621 	int i;
8622 
8623 	if (hdev->pdev->revision >= 0x21) {
8624 		/* for revision 0x21, vf vlan filter is per function */
8625 		for (i = 0; i < hdev->num_alloc_vport; i++) {
8626 			vport = &hdev->vport[i];
8627 			ret = hclge_set_vlan_filter_ctrl(hdev,
8628 							 HCLGE_FILTER_TYPE_VF,
8629 							 HCLGE_FILTER_FE_EGRESS,
8630 							 true,
8631 							 vport->vport_id);
8632 			if (ret)
8633 				return ret;
8634 		}
8635 
8636 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8637 						 HCLGE_FILTER_FE_INGRESS, true,
8638 						 0);
8639 		if (ret)
8640 			return ret;
8641 	} else {
8642 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8643 						 HCLGE_FILTER_FE_EGRESS_V1_B,
8644 						 true, 0);
8645 		if (ret)
8646 			return ret;
8647 	}
8648 
8649 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
8650 
8651 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8652 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8653 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8654 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8655 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8656 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8657 
8658 	ret = hclge_set_vlan_protocol_type(hdev);
8659 	if (ret)
8660 		return ret;
8661 
8662 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8663 		u16 vlan_tag;
8664 
8665 		vport = &hdev->vport[i];
8666 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8667 
8668 		ret = hclge_vlan_offload_cfg(vport,
8669 					     vport->port_base_vlan_cfg.state,
8670 					     vlan_tag);
8671 		if (ret)
8672 			return ret;
8673 	}
8674 
8675 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8676 }
8677 
8678 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8679 				       bool writen_to_tbl)
8680 {
8681 	struct hclge_vport_vlan_cfg *vlan;
8682 
8683 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8684 	if (!vlan)
8685 		return;
8686 
8687 	vlan->hd_tbl_status = writen_to_tbl;
8688 	vlan->vlan_id = vlan_id;
8689 
8690 	list_add_tail(&vlan->node, &vport->vlan_list);
8691 }
8692 
8693 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8694 {
8695 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8696 	struct hclge_dev *hdev = vport->back;
8697 	int ret;
8698 
8699 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8700 		if (!vlan->hd_tbl_status) {
8701 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8702 						       vport->vport_id,
8703 						       vlan->vlan_id, false);
8704 			if (ret) {
8705 				dev_err(&hdev->pdev->dev,
8706 					"restore vport vlan list failed, ret=%d\n",
8707 					ret);
8708 				return ret;
8709 			}
8710 		}
8711 		vlan->hd_tbl_status = true;
8712 	}
8713 
8714 	return 0;
8715 }
8716 
8717 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8718 				      bool is_write_tbl)
8719 {
8720 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8721 	struct hclge_dev *hdev = vport->back;
8722 
8723 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8724 		if (vlan->vlan_id == vlan_id) {
8725 			if (is_write_tbl && vlan->hd_tbl_status)
8726 				hclge_set_vlan_filter_hw(hdev,
8727 							 htons(ETH_P_8021Q),
8728 							 vport->vport_id,
8729 							 vlan_id,
8730 							 true);
8731 
8732 			list_del(&vlan->node);
8733 			kfree(vlan);
8734 			break;
8735 		}
8736 	}
8737 }
8738 
8739 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8740 {
8741 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8742 	struct hclge_dev *hdev = vport->back;
8743 
8744 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8745 		if (vlan->hd_tbl_status)
8746 			hclge_set_vlan_filter_hw(hdev,
8747 						 htons(ETH_P_8021Q),
8748 						 vport->vport_id,
8749 						 vlan->vlan_id,
8750 						 true);
8751 
8752 		vlan->hd_tbl_status = false;
8753 		if (is_del_list) {
8754 			list_del(&vlan->node);
8755 			kfree(vlan);
8756 		}
8757 	}
8758 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
8759 }
8760 
8761 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8762 {
8763 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8764 	struct hclge_vport *vport;
8765 	int i;
8766 
8767 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8768 		vport = &hdev->vport[i];
8769 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8770 			list_del(&vlan->node);
8771 			kfree(vlan);
8772 		}
8773 	}
8774 }
8775 
8776 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
8777 {
8778 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8779 	struct hclge_dev *hdev = vport->back;
8780 	u16 vlan_proto;
8781 	u16 vlan_id;
8782 	u16 state;
8783 	int ret;
8784 
8785 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8786 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8787 	state = vport->port_base_vlan_cfg.state;
8788 
8789 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8790 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8791 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8792 					 vport->vport_id, vlan_id,
8793 					 false);
8794 		return;
8795 	}
8796 
8797 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8798 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8799 					       vport->vport_id,
8800 					       vlan->vlan_id, false);
8801 		if (ret)
8802 			break;
8803 		vlan->hd_tbl_status = true;
8804 	}
8805 }
8806 
8807 /* For global reset and imp reset, hardware will clear the mac table,
8808  * so we change the mac address state from ACTIVE to TO_ADD, then they
8809  * can be restored in the service task after reset complete. Furtherly,
8810  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
8811  * be restored after reset, so just remove these mac nodes from mac_list.
8812  */
8813 static void hclge_mac_node_convert_for_reset(struct list_head *list)
8814 {
8815 	struct hclge_mac_node *mac_node, *tmp;
8816 
8817 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8818 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
8819 			mac_node->state = HCLGE_MAC_TO_ADD;
8820 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8821 			list_del(&mac_node->node);
8822 			kfree(mac_node);
8823 		}
8824 	}
8825 }
8826 
8827 void hclge_restore_mac_table_common(struct hclge_vport *vport)
8828 {
8829 	spin_lock_bh(&vport->mac_list_lock);
8830 
8831 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8832 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8833 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8834 
8835 	spin_unlock_bh(&vport->mac_list_lock);
8836 }
8837 
8838 static void hclge_restore_hw_table(struct hclge_dev *hdev)
8839 {
8840 	struct hclge_vport *vport = &hdev->vport[0];
8841 	struct hnae3_handle *handle = &vport->nic;
8842 
8843 	hclge_restore_mac_table_common(vport);
8844 	hclge_restore_vport_vlan_table(vport);
8845 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
8846 
8847 	hclge_restore_fd_entries(handle);
8848 }
8849 
8850 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8851 {
8852 	struct hclge_vport *vport = hclge_get_vport(handle);
8853 
8854 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8855 		vport->rxvlan_cfg.strip_tag1_en = false;
8856 		vport->rxvlan_cfg.strip_tag2_en = enable;
8857 	} else {
8858 		vport->rxvlan_cfg.strip_tag1_en = enable;
8859 		vport->rxvlan_cfg.strip_tag2_en = true;
8860 	}
8861 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8862 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8863 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8864 
8865 	return hclge_set_vlan_rx_offload_cfg(vport);
8866 }
8867 
8868 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8869 					    u16 port_base_vlan_state,
8870 					    struct hclge_vlan_info *new_info,
8871 					    struct hclge_vlan_info *old_info)
8872 {
8873 	struct hclge_dev *hdev = vport->back;
8874 	int ret;
8875 
8876 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8877 		hclge_rm_vport_all_vlan_table(vport, false);
8878 		return hclge_set_vlan_filter_hw(hdev,
8879 						 htons(new_info->vlan_proto),
8880 						 vport->vport_id,
8881 						 new_info->vlan_tag,
8882 						 false);
8883 	}
8884 
8885 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8886 				       vport->vport_id, old_info->vlan_tag,
8887 				       true);
8888 	if (ret)
8889 		return ret;
8890 
8891 	return hclge_add_vport_all_vlan_table(vport);
8892 }
8893 
8894 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8895 				    struct hclge_vlan_info *vlan_info)
8896 {
8897 	struct hnae3_handle *nic = &vport->nic;
8898 	struct hclge_vlan_info *old_vlan_info;
8899 	struct hclge_dev *hdev = vport->back;
8900 	int ret;
8901 
8902 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8903 
8904 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8905 	if (ret)
8906 		return ret;
8907 
8908 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8909 		/* add new VLAN tag */
8910 		ret = hclge_set_vlan_filter_hw(hdev,
8911 					       htons(vlan_info->vlan_proto),
8912 					       vport->vport_id,
8913 					       vlan_info->vlan_tag,
8914 					       false);
8915 		if (ret)
8916 			return ret;
8917 
8918 		/* remove old VLAN tag */
8919 		ret = hclge_set_vlan_filter_hw(hdev,
8920 					       htons(old_vlan_info->vlan_proto),
8921 					       vport->vport_id,
8922 					       old_vlan_info->vlan_tag,
8923 					       true);
8924 		if (ret)
8925 			return ret;
8926 
8927 		goto update;
8928 	}
8929 
8930 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8931 					       old_vlan_info);
8932 	if (ret)
8933 		return ret;
8934 
8935 	/* update state only when disable/enable port based VLAN */
8936 	vport->port_base_vlan_cfg.state = state;
8937 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8938 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8939 	else
8940 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8941 
8942 update:
8943 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8944 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8945 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8946 
8947 	return 0;
8948 }
8949 
8950 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8951 					  enum hnae3_port_base_vlan_state state,
8952 					  u16 vlan)
8953 {
8954 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8955 		if (!vlan)
8956 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8957 		else
8958 			return HNAE3_PORT_BASE_VLAN_ENABLE;
8959 	} else {
8960 		if (!vlan)
8961 			return HNAE3_PORT_BASE_VLAN_DISABLE;
8962 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8963 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8964 		else
8965 			return HNAE3_PORT_BASE_VLAN_MODIFY;
8966 	}
8967 }
8968 
8969 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8970 				    u16 vlan, u8 qos, __be16 proto)
8971 {
8972 	struct hclge_vport *vport = hclge_get_vport(handle);
8973 	struct hclge_dev *hdev = vport->back;
8974 	struct hclge_vlan_info vlan_info;
8975 	u16 state;
8976 	int ret;
8977 
8978 	if (hdev->pdev->revision == 0x20)
8979 		return -EOPNOTSUPP;
8980 
8981 	vport = hclge_get_vf_vport(hdev, vfid);
8982 	if (!vport)
8983 		return -EINVAL;
8984 
8985 	/* qos is a 3 bits value, so can not be bigger than 7 */
8986 	if (vlan > VLAN_N_VID - 1 || qos > 7)
8987 		return -EINVAL;
8988 	if (proto != htons(ETH_P_8021Q))
8989 		return -EPROTONOSUPPORT;
8990 
8991 	state = hclge_get_port_base_vlan_state(vport,
8992 					       vport->port_base_vlan_cfg.state,
8993 					       vlan);
8994 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8995 		return 0;
8996 
8997 	vlan_info.vlan_tag = vlan;
8998 	vlan_info.qos = qos;
8999 	vlan_info.vlan_proto = ntohs(proto);
9000 
9001 	if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9002 		return hclge_update_port_base_vlan_cfg(vport, state,
9003 						       &vlan_info);
9004 	} else {
9005 		ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9006 							vport->vport_id, state,
9007 							vlan, qos,
9008 							ntohs(proto));
9009 		return ret;
9010 	}
9011 }
9012 
9013 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9014 {
9015 	struct hclge_vlan_info *vlan_info;
9016 	struct hclge_vport *vport;
9017 	int ret;
9018 	int vf;
9019 
9020 	/* clear port base vlan for all vf */
9021 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9022 		vport = &hdev->vport[vf];
9023 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9024 
9025 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9026 					       vport->vport_id,
9027 					       vlan_info->vlan_tag, true);
9028 		if (ret)
9029 			dev_err(&hdev->pdev->dev,
9030 				"failed to clear vf vlan for vf%d, ret = %d\n",
9031 				vf - HCLGE_VF_VPORT_START_NUM, ret);
9032 	}
9033 }
9034 
9035 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9036 			  u16 vlan_id, bool is_kill)
9037 {
9038 	struct hclge_vport *vport = hclge_get_vport(handle);
9039 	struct hclge_dev *hdev = vport->back;
9040 	bool writen_to_tbl = false;
9041 	int ret = 0;
9042 
9043 	/* When device is resetting, firmware is unable to handle
9044 	 * mailbox. Just record the vlan id, and remove it after
9045 	 * reset finished.
9046 	 */
9047 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
9048 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
9049 		return -EBUSY;
9050 	}
9051 
9052 	/* when port base vlan enabled, we use port base vlan as the vlan
9053 	 * filter entry. In this case, we don't update vlan filter table
9054 	 * when user add new vlan or remove exist vlan, just update the vport
9055 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
9056 	 * table until port base vlan disabled
9057 	 */
9058 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9059 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9060 					       vlan_id, is_kill);
9061 		writen_to_tbl = true;
9062 	}
9063 
9064 	if (!ret) {
9065 		if (is_kill)
9066 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
9067 		else
9068 			hclge_add_vport_vlan_table(vport, vlan_id,
9069 						   writen_to_tbl);
9070 	} else if (is_kill) {
9071 		/* when remove hw vlan filter failed, record the vlan id,
9072 		 * and try to remove it from hw later, to be consistence
9073 		 * with stack
9074 		 */
9075 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
9076 	}
9077 	return ret;
9078 }
9079 
9080 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9081 {
9082 #define HCLGE_MAX_SYNC_COUNT	60
9083 
9084 	int i, ret, sync_cnt = 0;
9085 	u16 vlan_id;
9086 
9087 	/* start from vport 1 for PF is always alive */
9088 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9089 		struct hclge_vport *vport = &hdev->vport[i];
9090 
9091 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9092 					 VLAN_N_VID);
9093 		while (vlan_id != VLAN_N_VID) {
9094 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9095 						       vport->vport_id, vlan_id,
9096 						       true);
9097 			if (ret && ret != -EINVAL)
9098 				return;
9099 
9100 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9101 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
9102 
9103 			sync_cnt++;
9104 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9105 				return;
9106 
9107 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9108 						 VLAN_N_VID);
9109 		}
9110 	}
9111 }
9112 
9113 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9114 {
9115 	struct hclge_config_max_frm_size_cmd *req;
9116 	struct hclge_desc desc;
9117 
9118 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9119 
9120 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9121 	req->max_frm_size = cpu_to_le16(new_mps);
9122 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9123 
9124 	return hclge_cmd_send(&hdev->hw, &desc, 1);
9125 }
9126 
9127 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9128 {
9129 	struct hclge_vport *vport = hclge_get_vport(handle);
9130 
9131 	return hclge_set_vport_mtu(vport, new_mtu);
9132 }
9133 
9134 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9135 {
9136 	struct hclge_dev *hdev = vport->back;
9137 	int i, max_frm_size, ret;
9138 
9139 	/* HW supprt 2 layer vlan */
9140 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9141 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9142 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
9143 		return -EINVAL;
9144 
9145 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9146 	mutex_lock(&hdev->vport_lock);
9147 	/* VF's mps must fit within hdev->mps */
9148 	if (vport->vport_id && max_frm_size > hdev->mps) {
9149 		mutex_unlock(&hdev->vport_lock);
9150 		return -EINVAL;
9151 	} else if (vport->vport_id) {
9152 		vport->mps = max_frm_size;
9153 		mutex_unlock(&hdev->vport_lock);
9154 		return 0;
9155 	}
9156 
9157 	/* PF's mps must be greater then VF's mps */
9158 	for (i = 1; i < hdev->num_alloc_vport; i++)
9159 		if (max_frm_size < hdev->vport[i].mps) {
9160 			mutex_unlock(&hdev->vport_lock);
9161 			return -EINVAL;
9162 		}
9163 
9164 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9165 
9166 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
9167 	if (ret) {
9168 		dev_err(&hdev->pdev->dev,
9169 			"Change mtu fail, ret =%d\n", ret);
9170 		goto out;
9171 	}
9172 
9173 	hdev->mps = max_frm_size;
9174 	vport->mps = max_frm_size;
9175 
9176 	ret = hclge_buffer_alloc(hdev);
9177 	if (ret)
9178 		dev_err(&hdev->pdev->dev,
9179 			"Allocate buffer fail, ret =%d\n", ret);
9180 
9181 out:
9182 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9183 	mutex_unlock(&hdev->vport_lock);
9184 	return ret;
9185 }
9186 
9187 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9188 				    bool enable)
9189 {
9190 	struct hclge_reset_tqp_queue_cmd *req;
9191 	struct hclge_desc desc;
9192 	int ret;
9193 
9194 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9195 
9196 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9197 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9198 	if (enable)
9199 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9200 
9201 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9202 	if (ret) {
9203 		dev_err(&hdev->pdev->dev,
9204 			"Send tqp reset cmd error, status =%d\n", ret);
9205 		return ret;
9206 	}
9207 
9208 	return 0;
9209 }
9210 
9211 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9212 {
9213 	struct hclge_reset_tqp_queue_cmd *req;
9214 	struct hclge_desc desc;
9215 	int ret;
9216 
9217 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9218 
9219 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9220 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9221 
9222 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9223 	if (ret) {
9224 		dev_err(&hdev->pdev->dev,
9225 			"Get reset status error, status =%d\n", ret);
9226 		return ret;
9227 	}
9228 
9229 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9230 }
9231 
9232 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9233 {
9234 	struct hnae3_queue *queue;
9235 	struct hclge_tqp *tqp;
9236 
9237 	queue = handle->kinfo.tqp[queue_id];
9238 	tqp = container_of(queue, struct hclge_tqp, q);
9239 
9240 	return tqp->index;
9241 }
9242 
9243 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9244 {
9245 	struct hclge_vport *vport = hclge_get_vport(handle);
9246 	struct hclge_dev *hdev = vport->back;
9247 	int reset_try_times = 0;
9248 	int reset_status;
9249 	u16 queue_gid;
9250 	int ret;
9251 
9252 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9253 
9254 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9255 	if (ret) {
9256 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9257 		return ret;
9258 	}
9259 
9260 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9261 	if (ret) {
9262 		dev_err(&hdev->pdev->dev,
9263 			"Send reset tqp cmd fail, ret = %d\n", ret);
9264 		return ret;
9265 	}
9266 
9267 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9268 		reset_status = hclge_get_reset_status(hdev, queue_gid);
9269 		if (reset_status)
9270 			break;
9271 
9272 		/* Wait for tqp hw reset */
9273 		usleep_range(1000, 1200);
9274 	}
9275 
9276 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9277 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9278 		return ret;
9279 	}
9280 
9281 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9282 	if (ret)
9283 		dev_err(&hdev->pdev->dev,
9284 			"Deassert the soft reset fail, ret = %d\n", ret);
9285 
9286 	return ret;
9287 }
9288 
9289 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9290 {
9291 	struct hclge_dev *hdev = vport->back;
9292 	int reset_try_times = 0;
9293 	int reset_status;
9294 	u16 queue_gid;
9295 	int ret;
9296 
9297 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9298 
9299 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9300 	if (ret) {
9301 		dev_warn(&hdev->pdev->dev,
9302 			 "Send reset tqp cmd fail, ret = %d\n", ret);
9303 		return;
9304 	}
9305 
9306 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9307 		reset_status = hclge_get_reset_status(hdev, queue_gid);
9308 		if (reset_status)
9309 			break;
9310 
9311 		/* Wait for tqp hw reset */
9312 		usleep_range(1000, 1200);
9313 	}
9314 
9315 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9316 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9317 		return;
9318 	}
9319 
9320 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9321 	if (ret)
9322 		dev_warn(&hdev->pdev->dev,
9323 			 "Deassert the soft reset fail, ret = %d\n", ret);
9324 }
9325 
9326 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9327 {
9328 	struct hclge_vport *vport = hclge_get_vport(handle);
9329 	struct hclge_dev *hdev = vport->back;
9330 
9331 	return hdev->fw_version;
9332 }
9333 
9334 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9335 {
9336 	struct phy_device *phydev = hdev->hw.mac.phydev;
9337 
9338 	if (!phydev)
9339 		return;
9340 
9341 	phy_set_asym_pause(phydev, rx_en, tx_en);
9342 }
9343 
9344 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9345 {
9346 	int ret;
9347 
9348 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9349 		return 0;
9350 
9351 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9352 	if (ret)
9353 		dev_err(&hdev->pdev->dev,
9354 			"configure pauseparam error, ret = %d.\n", ret);
9355 
9356 	return ret;
9357 }
9358 
9359 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9360 {
9361 	struct phy_device *phydev = hdev->hw.mac.phydev;
9362 	u16 remote_advertising = 0;
9363 	u16 local_advertising;
9364 	u32 rx_pause, tx_pause;
9365 	u8 flowctl;
9366 
9367 	if (!phydev->link || !phydev->autoneg)
9368 		return 0;
9369 
9370 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9371 
9372 	if (phydev->pause)
9373 		remote_advertising = LPA_PAUSE_CAP;
9374 
9375 	if (phydev->asym_pause)
9376 		remote_advertising |= LPA_PAUSE_ASYM;
9377 
9378 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9379 					   remote_advertising);
9380 	tx_pause = flowctl & FLOW_CTRL_TX;
9381 	rx_pause = flowctl & FLOW_CTRL_RX;
9382 
9383 	if (phydev->duplex == HCLGE_MAC_HALF) {
9384 		tx_pause = 0;
9385 		rx_pause = 0;
9386 	}
9387 
9388 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9389 }
9390 
9391 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9392 				 u32 *rx_en, u32 *tx_en)
9393 {
9394 	struct hclge_vport *vport = hclge_get_vport(handle);
9395 	struct hclge_dev *hdev = vport->back;
9396 	struct phy_device *phydev = hdev->hw.mac.phydev;
9397 
9398 	*auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
9399 
9400 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9401 		*rx_en = 0;
9402 		*tx_en = 0;
9403 		return;
9404 	}
9405 
9406 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9407 		*rx_en = 1;
9408 		*tx_en = 0;
9409 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9410 		*tx_en = 1;
9411 		*rx_en = 0;
9412 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9413 		*rx_en = 1;
9414 		*tx_en = 1;
9415 	} else {
9416 		*rx_en = 0;
9417 		*tx_en = 0;
9418 	}
9419 }
9420 
9421 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9422 					 u32 rx_en, u32 tx_en)
9423 {
9424 	if (rx_en && tx_en)
9425 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
9426 	else if (rx_en && !tx_en)
9427 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9428 	else if (!rx_en && tx_en)
9429 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9430 	else
9431 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
9432 
9433 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9434 }
9435 
9436 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9437 				u32 rx_en, u32 tx_en)
9438 {
9439 	struct hclge_vport *vport = hclge_get_vport(handle);
9440 	struct hclge_dev *hdev = vport->back;
9441 	struct phy_device *phydev = hdev->hw.mac.phydev;
9442 	u32 fc_autoneg;
9443 
9444 	if (phydev) {
9445 		fc_autoneg = hclge_get_autoneg(handle);
9446 		if (auto_neg != fc_autoneg) {
9447 			dev_info(&hdev->pdev->dev,
9448 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9449 			return -EOPNOTSUPP;
9450 		}
9451 	}
9452 
9453 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9454 		dev_info(&hdev->pdev->dev,
9455 			 "Priority flow control enabled. Cannot set link flow control.\n");
9456 		return -EOPNOTSUPP;
9457 	}
9458 
9459 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9460 
9461 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9462 
9463 	if (!auto_neg)
9464 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9465 
9466 	if (phydev)
9467 		return phy_start_aneg(phydev);
9468 
9469 	return -EOPNOTSUPP;
9470 }
9471 
9472 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9473 					  u8 *auto_neg, u32 *speed, u8 *duplex)
9474 {
9475 	struct hclge_vport *vport = hclge_get_vport(handle);
9476 	struct hclge_dev *hdev = vport->back;
9477 
9478 	if (speed)
9479 		*speed = hdev->hw.mac.speed;
9480 	if (duplex)
9481 		*duplex = hdev->hw.mac.duplex;
9482 	if (auto_neg)
9483 		*auto_neg = hdev->hw.mac.autoneg;
9484 }
9485 
9486 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9487 				 u8 *module_type)
9488 {
9489 	struct hclge_vport *vport = hclge_get_vport(handle);
9490 	struct hclge_dev *hdev = vport->back;
9491 
9492 	/* When nic is down, the service task is not running, doesn't update
9493 	 * the port information per second. Query the port information before
9494 	 * return the media type, ensure getting the correct media information.
9495 	 */
9496 	hclge_update_port_info(hdev);
9497 
9498 	if (media_type)
9499 		*media_type = hdev->hw.mac.media_type;
9500 
9501 	if (module_type)
9502 		*module_type = hdev->hw.mac.module_type;
9503 }
9504 
9505 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
9506 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
9507 {
9508 	struct hclge_vport *vport = hclge_get_vport(handle);
9509 	struct hclge_dev *hdev = vport->back;
9510 	struct phy_device *phydev = hdev->hw.mac.phydev;
9511 	int mdix_ctrl, mdix, is_resolved;
9512 	unsigned int retval;
9513 
9514 	if (!phydev) {
9515 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9516 		*tp_mdix = ETH_TP_MDI_INVALID;
9517 		return;
9518 	}
9519 
9520 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9521 
9522 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9523 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9524 				    HCLGE_PHY_MDIX_CTRL_S);
9525 
9526 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9527 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9528 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9529 
9530 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9531 
9532 	switch (mdix_ctrl) {
9533 	case 0x0:
9534 		*tp_mdix_ctrl = ETH_TP_MDI;
9535 		break;
9536 	case 0x1:
9537 		*tp_mdix_ctrl = ETH_TP_MDI_X;
9538 		break;
9539 	case 0x3:
9540 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9541 		break;
9542 	default:
9543 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9544 		break;
9545 	}
9546 
9547 	if (!is_resolved)
9548 		*tp_mdix = ETH_TP_MDI_INVALID;
9549 	else if (mdix)
9550 		*tp_mdix = ETH_TP_MDI_X;
9551 	else
9552 		*tp_mdix = ETH_TP_MDI;
9553 }
9554 
9555 static void hclge_info_show(struct hclge_dev *hdev)
9556 {
9557 	struct device *dev = &hdev->pdev->dev;
9558 
9559 	dev_info(dev, "PF info begin:\n");
9560 
9561 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9562 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9563 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9564 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9565 	dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9566 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9567 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9568 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9569 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9570 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9571 	dev_info(dev, "This is %s PF\n",
9572 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9573 	dev_info(dev, "DCB %s\n",
9574 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9575 	dev_info(dev, "MQPRIO %s\n",
9576 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9577 
9578 	dev_info(dev, "PF info end.\n");
9579 }
9580 
9581 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9582 					  struct hclge_vport *vport)
9583 {
9584 	struct hnae3_client *client = vport->nic.client;
9585 	struct hclge_dev *hdev = ae_dev->priv;
9586 	int rst_cnt = hdev->rst_stats.reset_cnt;
9587 	int ret;
9588 
9589 	ret = client->ops->init_instance(&vport->nic);
9590 	if (ret)
9591 		return ret;
9592 
9593 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9594 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9595 	    rst_cnt != hdev->rst_stats.reset_cnt) {
9596 		ret = -EBUSY;
9597 		goto init_nic_err;
9598 	}
9599 
9600 	/* Enable nic hw error interrupts */
9601 	ret = hclge_config_nic_hw_error(hdev, true);
9602 	if (ret) {
9603 		dev_err(&ae_dev->pdev->dev,
9604 			"fail(%d) to enable hw error interrupts\n", ret);
9605 		goto init_nic_err;
9606 	}
9607 
9608 	hnae3_set_client_init_flag(client, ae_dev, 1);
9609 
9610 	if (netif_msg_drv(&hdev->vport->nic))
9611 		hclge_info_show(hdev);
9612 
9613 	return ret;
9614 
9615 init_nic_err:
9616 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9617 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9618 		msleep(HCLGE_WAIT_RESET_DONE);
9619 
9620 	client->ops->uninit_instance(&vport->nic, 0);
9621 
9622 	return ret;
9623 }
9624 
9625 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9626 					   struct hclge_vport *vport)
9627 {
9628 	struct hclge_dev *hdev = ae_dev->priv;
9629 	struct hnae3_client *client;
9630 	int rst_cnt;
9631 	int ret;
9632 
9633 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9634 	    !hdev->nic_client)
9635 		return 0;
9636 
9637 	client = hdev->roce_client;
9638 	ret = hclge_init_roce_base_info(vport);
9639 	if (ret)
9640 		return ret;
9641 
9642 	rst_cnt = hdev->rst_stats.reset_cnt;
9643 	ret = client->ops->init_instance(&vport->roce);
9644 	if (ret)
9645 		return ret;
9646 
9647 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9648 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9649 	    rst_cnt != hdev->rst_stats.reset_cnt) {
9650 		ret = -EBUSY;
9651 		goto init_roce_err;
9652 	}
9653 
9654 	/* Enable roce ras interrupts */
9655 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
9656 	if (ret) {
9657 		dev_err(&ae_dev->pdev->dev,
9658 			"fail(%d) to enable roce ras interrupts\n", ret);
9659 		goto init_roce_err;
9660 	}
9661 
9662 	hnae3_set_client_init_flag(client, ae_dev, 1);
9663 
9664 	return 0;
9665 
9666 init_roce_err:
9667 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9668 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9669 		msleep(HCLGE_WAIT_RESET_DONE);
9670 
9671 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9672 
9673 	return ret;
9674 }
9675 
9676 static int hclge_init_client_instance(struct hnae3_client *client,
9677 				      struct hnae3_ae_dev *ae_dev)
9678 {
9679 	struct hclge_dev *hdev = ae_dev->priv;
9680 	struct hclge_vport *vport;
9681 	int i, ret;
9682 
9683 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9684 		vport = &hdev->vport[i];
9685 
9686 		switch (client->type) {
9687 		case HNAE3_CLIENT_KNIC:
9688 			hdev->nic_client = client;
9689 			vport->nic.client = client;
9690 			ret = hclge_init_nic_client_instance(ae_dev, vport);
9691 			if (ret)
9692 				goto clear_nic;
9693 
9694 			ret = hclge_init_roce_client_instance(ae_dev, vport);
9695 			if (ret)
9696 				goto clear_roce;
9697 
9698 			break;
9699 		case HNAE3_CLIENT_ROCE:
9700 			if (hnae3_dev_roce_supported(hdev)) {
9701 				hdev->roce_client = client;
9702 				vport->roce.client = client;
9703 			}
9704 
9705 			ret = hclge_init_roce_client_instance(ae_dev, vport);
9706 			if (ret)
9707 				goto clear_roce;
9708 
9709 			break;
9710 		default:
9711 			return -EINVAL;
9712 		}
9713 	}
9714 
9715 	return 0;
9716 
9717 clear_nic:
9718 	hdev->nic_client = NULL;
9719 	vport->nic.client = NULL;
9720 	return ret;
9721 clear_roce:
9722 	hdev->roce_client = NULL;
9723 	vport->roce.client = NULL;
9724 	return ret;
9725 }
9726 
9727 static void hclge_uninit_client_instance(struct hnae3_client *client,
9728 					 struct hnae3_ae_dev *ae_dev)
9729 {
9730 	struct hclge_dev *hdev = ae_dev->priv;
9731 	struct hclge_vport *vport;
9732 	int i;
9733 
9734 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9735 		vport = &hdev->vport[i];
9736 		if (hdev->roce_client) {
9737 			clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9738 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9739 				msleep(HCLGE_WAIT_RESET_DONE);
9740 
9741 			hdev->roce_client->ops->uninit_instance(&vport->roce,
9742 								0);
9743 			hdev->roce_client = NULL;
9744 			vport->roce.client = NULL;
9745 		}
9746 		if (client->type == HNAE3_CLIENT_ROCE)
9747 			return;
9748 		if (hdev->nic_client && client->ops->uninit_instance) {
9749 			clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9750 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9751 				msleep(HCLGE_WAIT_RESET_DONE);
9752 
9753 			client->ops->uninit_instance(&vport->nic, 0);
9754 			hdev->nic_client = NULL;
9755 			vport->nic.client = NULL;
9756 		}
9757 	}
9758 }
9759 
9760 static int hclge_pci_init(struct hclge_dev *hdev)
9761 {
9762 	struct pci_dev *pdev = hdev->pdev;
9763 	struct hclge_hw *hw;
9764 	int ret;
9765 
9766 	ret = pci_enable_device(pdev);
9767 	if (ret) {
9768 		dev_err(&pdev->dev, "failed to enable PCI device\n");
9769 		return ret;
9770 	}
9771 
9772 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9773 	if (ret) {
9774 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9775 		if (ret) {
9776 			dev_err(&pdev->dev,
9777 				"can't set consistent PCI DMA");
9778 			goto err_disable_device;
9779 		}
9780 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9781 	}
9782 
9783 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9784 	if (ret) {
9785 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9786 		goto err_disable_device;
9787 	}
9788 
9789 	pci_set_master(pdev);
9790 	hw = &hdev->hw;
9791 	hw->io_base = pcim_iomap(pdev, 2, 0);
9792 	if (!hw->io_base) {
9793 		dev_err(&pdev->dev, "Can't map configuration register space\n");
9794 		ret = -ENOMEM;
9795 		goto err_clr_master;
9796 	}
9797 
9798 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9799 
9800 	return 0;
9801 err_clr_master:
9802 	pci_clear_master(pdev);
9803 	pci_release_regions(pdev);
9804 err_disable_device:
9805 	pci_disable_device(pdev);
9806 
9807 	return ret;
9808 }
9809 
9810 static void hclge_pci_uninit(struct hclge_dev *hdev)
9811 {
9812 	struct pci_dev *pdev = hdev->pdev;
9813 
9814 	pcim_iounmap(pdev, hdev->hw.io_base);
9815 	pci_free_irq_vectors(pdev);
9816 	pci_clear_master(pdev);
9817 	pci_release_mem_regions(pdev);
9818 	pci_disable_device(pdev);
9819 }
9820 
9821 static void hclge_state_init(struct hclge_dev *hdev)
9822 {
9823 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9824 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9825 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9826 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9827 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9828 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9829 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9830 }
9831 
9832 static void hclge_state_uninit(struct hclge_dev *hdev)
9833 {
9834 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9835 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9836 
9837 	if (hdev->reset_timer.function)
9838 		del_timer_sync(&hdev->reset_timer);
9839 	if (hdev->service_task.work.func)
9840 		cancel_delayed_work_sync(&hdev->service_task);
9841 }
9842 
9843 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9844 {
9845 #define HCLGE_FLR_RETRY_WAIT_MS	500
9846 #define HCLGE_FLR_RETRY_CNT	5
9847 
9848 	struct hclge_dev *hdev = ae_dev->priv;
9849 	int retry_cnt = 0;
9850 	int ret;
9851 
9852 retry:
9853 	down(&hdev->reset_sem);
9854 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9855 	hdev->reset_type = HNAE3_FLR_RESET;
9856 	ret = hclge_reset_prepare(hdev);
9857 	if (ret || hdev->reset_pending) {
9858 		dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9859 			ret);
9860 		if (hdev->reset_pending ||
9861 		    retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9862 			dev_err(&hdev->pdev->dev,
9863 				"reset_pending:0x%lx, retry_cnt:%d\n",
9864 				hdev->reset_pending, retry_cnt);
9865 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9866 			up(&hdev->reset_sem);
9867 			msleep(HCLGE_FLR_RETRY_WAIT_MS);
9868 			goto retry;
9869 		}
9870 	}
9871 
9872 	/* disable misc vector before FLR done */
9873 	hclge_enable_vector(&hdev->misc_vector, false);
9874 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9875 	hdev->rst_stats.flr_rst_cnt++;
9876 }
9877 
9878 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9879 {
9880 	struct hclge_dev *hdev = ae_dev->priv;
9881 	int ret;
9882 
9883 	hclge_enable_vector(&hdev->misc_vector, true);
9884 
9885 	ret = hclge_reset_rebuild(hdev);
9886 	if (ret)
9887 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9888 
9889 	hdev->reset_type = HNAE3_NONE_RESET;
9890 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9891 	up(&hdev->reset_sem);
9892 }
9893 
9894 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9895 {
9896 	u16 i;
9897 
9898 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9899 		struct hclge_vport *vport = &hdev->vport[i];
9900 		int ret;
9901 
9902 		 /* Send cmd to clear VF's FUNC_RST_ING */
9903 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9904 		if (ret)
9905 			dev_warn(&hdev->pdev->dev,
9906 				 "clear vf(%u) rst failed %d!\n",
9907 				 vport->vport_id, ret);
9908 	}
9909 }
9910 
9911 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9912 {
9913 	struct pci_dev *pdev = ae_dev->pdev;
9914 	struct hclge_dev *hdev;
9915 	int ret;
9916 
9917 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9918 	if (!hdev)
9919 		return -ENOMEM;
9920 
9921 	hdev->pdev = pdev;
9922 	hdev->ae_dev = ae_dev;
9923 	hdev->reset_type = HNAE3_NONE_RESET;
9924 	hdev->reset_level = HNAE3_FUNC_RESET;
9925 	ae_dev->priv = hdev;
9926 
9927 	/* HW supprt 2 layer vlan */
9928 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9929 
9930 	mutex_init(&hdev->vport_lock);
9931 	spin_lock_init(&hdev->fd_rule_lock);
9932 	sema_init(&hdev->reset_sem, 1);
9933 
9934 	ret = hclge_pci_init(hdev);
9935 	if (ret)
9936 		goto out;
9937 
9938 	/* Firmware command queue initialize */
9939 	ret = hclge_cmd_queue_init(hdev);
9940 	if (ret)
9941 		goto err_pci_uninit;
9942 
9943 	/* Firmware command initialize */
9944 	ret = hclge_cmd_init(hdev);
9945 	if (ret)
9946 		goto err_cmd_uninit;
9947 
9948 	ret = hclge_get_cap(hdev);
9949 	if (ret)
9950 		goto err_cmd_uninit;
9951 
9952 	ret = hclge_configure(hdev);
9953 	if (ret) {
9954 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9955 		goto err_cmd_uninit;
9956 	}
9957 
9958 	ret = hclge_init_msi(hdev);
9959 	if (ret) {
9960 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9961 		goto err_cmd_uninit;
9962 	}
9963 
9964 	ret = hclge_misc_irq_init(hdev);
9965 	if (ret)
9966 		goto err_msi_uninit;
9967 
9968 	ret = hclge_alloc_tqps(hdev);
9969 	if (ret) {
9970 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9971 		goto err_msi_irq_uninit;
9972 	}
9973 
9974 	ret = hclge_alloc_vport(hdev);
9975 	if (ret)
9976 		goto err_msi_irq_uninit;
9977 
9978 	ret = hclge_map_tqp(hdev);
9979 	if (ret)
9980 		goto err_msi_irq_uninit;
9981 
9982 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9983 		ret = hclge_mac_mdio_config(hdev);
9984 		if (ret)
9985 			goto err_msi_irq_uninit;
9986 	}
9987 
9988 	ret = hclge_init_umv_space(hdev);
9989 	if (ret)
9990 		goto err_mdiobus_unreg;
9991 
9992 	ret = hclge_mac_init(hdev);
9993 	if (ret) {
9994 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9995 		goto err_mdiobus_unreg;
9996 	}
9997 
9998 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9999 	if (ret) {
10000 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10001 		goto err_mdiobus_unreg;
10002 	}
10003 
10004 	ret = hclge_config_gro(hdev, true);
10005 	if (ret)
10006 		goto err_mdiobus_unreg;
10007 
10008 	ret = hclge_init_vlan_config(hdev);
10009 	if (ret) {
10010 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10011 		goto err_mdiobus_unreg;
10012 	}
10013 
10014 	ret = hclge_tm_schd_init(hdev);
10015 	if (ret) {
10016 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10017 		goto err_mdiobus_unreg;
10018 	}
10019 
10020 	hclge_rss_init_cfg(hdev);
10021 	ret = hclge_rss_init_hw(hdev);
10022 	if (ret) {
10023 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10024 		goto err_mdiobus_unreg;
10025 	}
10026 
10027 	ret = init_mgr_tbl(hdev);
10028 	if (ret) {
10029 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10030 		goto err_mdiobus_unreg;
10031 	}
10032 
10033 	ret = hclge_init_fd_config(hdev);
10034 	if (ret) {
10035 		dev_err(&pdev->dev,
10036 			"fd table init fail, ret=%d\n", ret);
10037 		goto err_mdiobus_unreg;
10038 	}
10039 
10040 	INIT_KFIFO(hdev->mac_tnl_log);
10041 
10042 	hclge_dcb_ops_set(hdev);
10043 
10044 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10045 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10046 
10047 	/* Setup affinity after service timer setup because add_timer_on
10048 	 * is called in affinity notify.
10049 	 */
10050 	hclge_misc_affinity_setup(hdev);
10051 
10052 	hclge_clear_all_event_cause(hdev);
10053 	hclge_clear_resetting_state(hdev);
10054 
10055 	/* Log and clear the hw errors those already occurred */
10056 	hclge_handle_all_hns_hw_errors(ae_dev);
10057 
10058 	/* request delayed reset for the error recovery because an immediate
10059 	 * global reset on a PF affecting pending initialization of other PFs
10060 	 */
10061 	if (ae_dev->hw_err_reset_req) {
10062 		enum hnae3_reset_type reset_level;
10063 
10064 		reset_level = hclge_get_reset_level(ae_dev,
10065 						    &ae_dev->hw_err_reset_req);
10066 		hclge_set_def_reset_request(ae_dev, reset_level);
10067 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10068 	}
10069 
10070 	/* Enable MISC vector(vector0) */
10071 	hclge_enable_vector(&hdev->misc_vector, true);
10072 
10073 	hclge_state_init(hdev);
10074 	hdev->last_reset_time = jiffies;
10075 
10076 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10077 		 HCLGE_DRIVER_NAME);
10078 
10079 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10080 
10081 	return 0;
10082 
10083 err_mdiobus_unreg:
10084 	if (hdev->hw.mac.phydev)
10085 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
10086 err_msi_irq_uninit:
10087 	hclge_misc_irq_uninit(hdev);
10088 err_msi_uninit:
10089 	pci_free_irq_vectors(pdev);
10090 err_cmd_uninit:
10091 	hclge_cmd_uninit(hdev);
10092 err_pci_uninit:
10093 	pcim_iounmap(pdev, hdev->hw.io_base);
10094 	pci_clear_master(pdev);
10095 	pci_release_regions(pdev);
10096 	pci_disable_device(pdev);
10097 out:
10098 	mutex_destroy(&hdev->vport_lock);
10099 	return ret;
10100 }
10101 
10102 static void hclge_stats_clear(struct hclge_dev *hdev)
10103 {
10104 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10105 }
10106 
10107 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10108 {
10109 	return hclge_config_switch_param(hdev, vf, enable,
10110 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10111 }
10112 
10113 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10114 {
10115 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10116 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
10117 					  enable, vf);
10118 }
10119 
10120 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10121 {
10122 	int ret;
10123 
10124 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10125 	if (ret) {
10126 		dev_err(&hdev->pdev->dev,
10127 			"Set vf %d mac spoof check %s failed, ret=%d\n",
10128 			vf, enable ? "on" : "off", ret);
10129 		return ret;
10130 	}
10131 
10132 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10133 	if (ret)
10134 		dev_err(&hdev->pdev->dev,
10135 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
10136 			vf, enable ? "on" : "off", ret);
10137 
10138 	return ret;
10139 }
10140 
10141 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10142 				 bool enable)
10143 {
10144 	struct hclge_vport *vport = hclge_get_vport(handle);
10145 	struct hclge_dev *hdev = vport->back;
10146 	u32 new_spoofchk = enable ? 1 : 0;
10147 	int ret;
10148 
10149 	if (hdev->pdev->revision == 0x20)
10150 		return -EOPNOTSUPP;
10151 
10152 	vport = hclge_get_vf_vport(hdev, vf);
10153 	if (!vport)
10154 		return -EINVAL;
10155 
10156 	if (vport->vf_info.spoofchk == new_spoofchk)
10157 		return 0;
10158 
10159 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10160 		dev_warn(&hdev->pdev->dev,
10161 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10162 			 vf);
10163 	else if (enable && hclge_is_umv_space_full(vport, true))
10164 		dev_warn(&hdev->pdev->dev,
10165 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10166 			 vf);
10167 
10168 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10169 	if (ret)
10170 		return ret;
10171 
10172 	vport->vf_info.spoofchk = new_spoofchk;
10173 	return 0;
10174 }
10175 
10176 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10177 {
10178 	struct hclge_vport *vport = hdev->vport;
10179 	int ret;
10180 	int i;
10181 
10182 	if (hdev->pdev->revision == 0x20)
10183 		return 0;
10184 
10185 	/* resume the vf spoof check state after reset */
10186 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10187 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10188 					       vport->vf_info.spoofchk);
10189 		if (ret)
10190 			return ret;
10191 
10192 		vport++;
10193 	}
10194 
10195 	return 0;
10196 }
10197 
10198 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10199 {
10200 	struct hclge_vport *vport = hclge_get_vport(handle);
10201 	struct hclge_dev *hdev = vport->back;
10202 	u32 new_trusted = enable ? 1 : 0;
10203 	bool en_bc_pmc;
10204 	int ret;
10205 
10206 	vport = hclge_get_vf_vport(hdev, vf);
10207 	if (!vport)
10208 		return -EINVAL;
10209 
10210 	if (vport->vf_info.trusted == new_trusted)
10211 		return 0;
10212 
10213 	/* Disable promisc mode for VF if it is not trusted any more. */
10214 	if (!enable && vport->vf_info.promisc_enable) {
10215 		en_bc_pmc = hdev->pdev->revision != 0x20;
10216 		ret = hclge_set_vport_promisc_mode(vport, false, false,
10217 						   en_bc_pmc);
10218 		if (ret)
10219 			return ret;
10220 		vport->vf_info.promisc_enable = 0;
10221 		hclge_inform_vf_promisc_info(vport);
10222 	}
10223 
10224 	vport->vf_info.trusted = new_trusted;
10225 
10226 	return 0;
10227 }
10228 
10229 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10230 {
10231 	int ret;
10232 	int vf;
10233 
10234 	/* reset vf rate to default value */
10235 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10236 		struct hclge_vport *vport = &hdev->vport[vf];
10237 
10238 		vport->vf_info.max_tx_rate = 0;
10239 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10240 		if (ret)
10241 			dev_err(&hdev->pdev->dev,
10242 				"vf%d failed to reset to default, ret=%d\n",
10243 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10244 	}
10245 }
10246 
10247 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10248 				     int min_tx_rate, int max_tx_rate)
10249 {
10250 	if (min_tx_rate != 0 ||
10251 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10252 		dev_err(&hdev->pdev->dev,
10253 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10254 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10255 		return -EINVAL;
10256 	}
10257 
10258 	return 0;
10259 }
10260 
10261 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10262 			     int min_tx_rate, int max_tx_rate, bool force)
10263 {
10264 	struct hclge_vport *vport = hclge_get_vport(handle);
10265 	struct hclge_dev *hdev = vport->back;
10266 	int ret;
10267 
10268 	ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10269 	if (ret)
10270 		return ret;
10271 
10272 	vport = hclge_get_vf_vport(hdev, vf);
10273 	if (!vport)
10274 		return -EINVAL;
10275 
10276 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10277 		return 0;
10278 
10279 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10280 	if (ret)
10281 		return ret;
10282 
10283 	vport->vf_info.max_tx_rate = max_tx_rate;
10284 
10285 	return 0;
10286 }
10287 
10288 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10289 {
10290 	struct hnae3_handle *handle = &hdev->vport->nic;
10291 	struct hclge_vport *vport;
10292 	int ret;
10293 	int vf;
10294 
10295 	/* resume the vf max_tx_rate after reset */
10296 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10297 		vport = hclge_get_vf_vport(hdev, vf);
10298 		if (!vport)
10299 			return -EINVAL;
10300 
10301 		/* zero means max rate, after reset, firmware already set it to
10302 		 * max rate, so just continue.
10303 		 */
10304 		if (!vport->vf_info.max_tx_rate)
10305 			continue;
10306 
10307 		ret = hclge_set_vf_rate(handle, vf, 0,
10308 					vport->vf_info.max_tx_rate, true);
10309 		if (ret) {
10310 			dev_err(&hdev->pdev->dev,
10311 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
10312 				vf, vport->vf_info.max_tx_rate, ret);
10313 			return ret;
10314 		}
10315 	}
10316 
10317 	return 0;
10318 }
10319 
10320 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10321 {
10322 	struct hclge_vport *vport = hdev->vport;
10323 	int i;
10324 
10325 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10326 		hclge_vport_stop(vport);
10327 		vport++;
10328 	}
10329 }
10330 
10331 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10332 {
10333 	struct hclge_dev *hdev = ae_dev->priv;
10334 	struct pci_dev *pdev = ae_dev->pdev;
10335 	int ret;
10336 
10337 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
10338 
10339 	hclge_stats_clear(hdev);
10340 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10341 	 * so here should not clean table in memory.
10342 	 */
10343 	if (hdev->reset_type == HNAE3_IMP_RESET ||
10344 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
10345 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10346 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10347 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10348 		hclge_reset_umv_space(hdev);
10349 	}
10350 
10351 	ret = hclge_cmd_init(hdev);
10352 	if (ret) {
10353 		dev_err(&pdev->dev, "Cmd queue init failed\n");
10354 		return ret;
10355 	}
10356 
10357 	ret = hclge_map_tqp(hdev);
10358 	if (ret) {
10359 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10360 		return ret;
10361 	}
10362 
10363 	ret = hclge_mac_init(hdev);
10364 	if (ret) {
10365 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10366 		return ret;
10367 	}
10368 
10369 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10370 	if (ret) {
10371 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10372 		return ret;
10373 	}
10374 
10375 	ret = hclge_config_gro(hdev, true);
10376 	if (ret)
10377 		return ret;
10378 
10379 	ret = hclge_init_vlan_config(hdev);
10380 	if (ret) {
10381 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10382 		return ret;
10383 	}
10384 
10385 	ret = hclge_tm_init_hw(hdev, true);
10386 	if (ret) {
10387 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
10388 		return ret;
10389 	}
10390 
10391 	ret = hclge_rss_init_hw(hdev);
10392 	if (ret) {
10393 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10394 		return ret;
10395 	}
10396 
10397 	ret = init_mgr_tbl(hdev);
10398 	if (ret) {
10399 		dev_err(&pdev->dev,
10400 			"failed to reinit manager table, ret = %d\n", ret);
10401 		return ret;
10402 	}
10403 
10404 	ret = hclge_init_fd_config(hdev);
10405 	if (ret) {
10406 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10407 		return ret;
10408 	}
10409 
10410 	/* Log and clear the hw errors those already occurred */
10411 	hclge_handle_all_hns_hw_errors(ae_dev);
10412 
10413 	/* Re-enable the hw error interrupts because
10414 	 * the interrupts get disabled on global reset.
10415 	 */
10416 	ret = hclge_config_nic_hw_error(hdev, true);
10417 	if (ret) {
10418 		dev_err(&pdev->dev,
10419 			"fail(%d) to re-enable NIC hw error interrupts\n",
10420 			ret);
10421 		return ret;
10422 	}
10423 
10424 	if (hdev->roce_client) {
10425 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
10426 		if (ret) {
10427 			dev_err(&pdev->dev,
10428 				"fail(%d) to re-enable roce ras interrupts\n",
10429 				ret);
10430 			return ret;
10431 		}
10432 	}
10433 
10434 	hclge_reset_vport_state(hdev);
10435 	ret = hclge_reset_vport_spoofchk(hdev);
10436 	if (ret)
10437 		return ret;
10438 
10439 	ret = hclge_resume_vf_rate(hdev);
10440 	if (ret)
10441 		return ret;
10442 
10443 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
10444 		 HCLGE_DRIVER_NAME);
10445 
10446 	return 0;
10447 }
10448 
10449 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
10450 {
10451 	struct hclge_dev *hdev = ae_dev->priv;
10452 	struct hclge_mac *mac = &hdev->hw.mac;
10453 
10454 	hclge_reset_vf_rate(hdev);
10455 	hclge_clear_vf_vlan(hdev);
10456 	hclge_misc_affinity_teardown(hdev);
10457 	hclge_state_uninit(hdev);
10458 	hclge_uninit_mac_table(hdev);
10459 
10460 	if (mac->phydev)
10461 		mdiobus_unregister(mac->mdio_bus);
10462 
10463 	/* Disable MISC vector(vector0) */
10464 	hclge_enable_vector(&hdev->misc_vector, false);
10465 	synchronize_irq(hdev->misc_vector.vector_irq);
10466 
10467 	/* Disable all hw interrupts */
10468 	hclge_config_mac_tnl_int(hdev, false);
10469 	hclge_config_nic_hw_error(hdev, false);
10470 	hclge_config_rocee_ras_interrupt(hdev, false);
10471 
10472 	hclge_cmd_uninit(hdev);
10473 	hclge_misc_irq_uninit(hdev);
10474 	hclge_pci_uninit(hdev);
10475 	mutex_destroy(&hdev->vport_lock);
10476 	hclge_uninit_vport_vlan_table(hdev);
10477 	ae_dev->priv = NULL;
10478 }
10479 
10480 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
10481 {
10482 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10483 	struct hclge_vport *vport = hclge_get_vport(handle);
10484 	struct hclge_dev *hdev = vport->back;
10485 
10486 	return min_t(u32, hdev->rss_size_max,
10487 		     vport->alloc_tqps / kinfo->num_tc);
10488 }
10489 
10490 static void hclge_get_channels(struct hnae3_handle *handle,
10491 			       struct ethtool_channels *ch)
10492 {
10493 	ch->max_combined = hclge_get_max_channels(handle);
10494 	ch->other_count = 1;
10495 	ch->max_other = 1;
10496 	ch->combined_count = handle->kinfo.rss_size;
10497 }
10498 
10499 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
10500 					u16 *alloc_tqps, u16 *max_rss_size)
10501 {
10502 	struct hclge_vport *vport = hclge_get_vport(handle);
10503 	struct hclge_dev *hdev = vport->back;
10504 
10505 	*alloc_tqps = vport->alloc_tqps;
10506 	*max_rss_size = hdev->rss_size_max;
10507 }
10508 
10509 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10510 			      bool rxfh_configured)
10511 {
10512 	struct hclge_vport *vport = hclge_get_vport(handle);
10513 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
10514 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
10515 	struct hclge_dev *hdev = vport->back;
10516 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10517 	u16 cur_rss_size = kinfo->rss_size;
10518 	u16 cur_tqps = kinfo->num_tqps;
10519 	u16 tc_valid[HCLGE_MAX_TC_NUM];
10520 	u16 roundup_size;
10521 	u32 *rss_indir;
10522 	unsigned int i;
10523 	int ret;
10524 
10525 	kinfo->req_rss_size = new_tqps_num;
10526 
10527 	ret = hclge_tm_vport_map_update(hdev);
10528 	if (ret) {
10529 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10530 		return ret;
10531 	}
10532 
10533 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
10534 	roundup_size = ilog2(roundup_size);
10535 	/* Set the RSS TC mode according to the new RSS size */
10536 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10537 		tc_valid[i] = 0;
10538 
10539 		if (!(hdev->hw_tc_map & BIT(i)))
10540 			continue;
10541 
10542 		tc_valid[i] = 1;
10543 		tc_size[i] = roundup_size;
10544 		tc_offset[i] = kinfo->rss_size * i;
10545 	}
10546 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10547 	if (ret)
10548 		return ret;
10549 
10550 	/* RSS indirection table has been configuared by user */
10551 	if (rxfh_configured)
10552 		goto out;
10553 
10554 	/* Reinitializes the rss indirect table according to the new RSS size */
10555 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10556 	if (!rss_indir)
10557 		return -ENOMEM;
10558 
10559 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10560 		rss_indir[i] = i % kinfo->rss_size;
10561 
10562 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10563 	if (ret)
10564 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10565 			ret);
10566 
10567 	kfree(rss_indir);
10568 
10569 out:
10570 	if (!ret)
10571 		dev_info(&hdev->pdev->dev,
10572 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10573 			 cur_rss_size, kinfo->rss_size,
10574 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10575 
10576 	return ret;
10577 }
10578 
10579 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10580 			      u32 *regs_num_64_bit)
10581 {
10582 	struct hclge_desc desc;
10583 	u32 total_num;
10584 	int ret;
10585 
10586 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10587 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10588 	if (ret) {
10589 		dev_err(&hdev->pdev->dev,
10590 			"Query register number cmd failed, ret = %d.\n", ret);
10591 		return ret;
10592 	}
10593 
10594 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
10595 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
10596 
10597 	total_num = *regs_num_32_bit + *regs_num_64_bit;
10598 	if (!total_num)
10599 		return -EINVAL;
10600 
10601 	return 0;
10602 }
10603 
10604 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10605 				 void *data)
10606 {
10607 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10608 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10609 
10610 	struct hclge_desc *desc;
10611 	u32 *reg_val = data;
10612 	__le32 *desc_data;
10613 	int nodata_num;
10614 	int cmd_num;
10615 	int i, k, n;
10616 	int ret;
10617 
10618 	if (regs_num == 0)
10619 		return 0;
10620 
10621 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10622 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10623 			       HCLGE_32_BIT_REG_RTN_DATANUM);
10624 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10625 	if (!desc)
10626 		return -ENOMEM;
10627 
10628 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10629 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10630 	if (ret) {
10631 		dev_err(&hdev->pdev->dev,
10632 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
10633 		kfree(desc);
10634 		return ret;
10635 	}
10636 
10637 	for (i = 0; i < cmd_num; i++) {
10638 		if (i == 0) {
10639 			desc_data = (__le32 *)(&desc[i].data[0]);
10640 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10641 		} else {
10642 			desc_data = (__le32 *)(&desc[i]);
10643 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
10644 		}
10645 		for (k = 0; k < n; k++) {
10646 			*reg_val++ = le32_to_cpu(*desc_data++);
10647 
10648 			regs_num--;
10649 			if (!regs_num)
10650 				break;
10651 		}
10652 	}
10653 
10654 	kfree(desc);
10655 	return 0;
10656 }
10657 
10658 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10659 				 void *data)
10660 {
10661 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10662 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10663 
10664 	struct hclge_desc *desc;
10665 	u64 *reg_val = data;
10666 	__le64 *desc_data;
10667 	int nodata_len;
10668 	int cmd_num;
10669 	int i, k, n;
10670 	int ret;
10671 
10672 	if (regs_num == 0)
10673 		return 0;
10674 
10675 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10676 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10677 			       HCLGE_64_BIT_REG_RTN_DATANUM);
10678 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10679 	if (!desc)
10680 		return -ENOMEM;
10681 
10682 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10683 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10684 	if (ret) {
10685 		dev_err(&hdev->pdev->dev,
10686 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
10687 		kfree(desc);
10688 		return ret;
10689 	}
10690 
10691 	for (i = 0; i < cmd_num; i++) {
10692 		if (i == 0) {
10693 			desc_data = (__le64 *)(&desc[i].data[0]);
10694 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10695 		} else {
10696 			desc_data = (__le64 *)(&desc[i]);
10697 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
10698 		}
10699 		for (k = 0; k < n; k++) {
10700 			*reg_val++ = le64_to_cpu(*desc_data++);
10701 
10702 			regs_num--;
10703 			if (!regs_num)
10704 				break;
10705 		}
10706 	}
10707 
10708 	kfree(desc);
10709 	return 0;
10710 }
10711 
10712 #define MAX_SEPARATE_NUM	4
10713 #define SEPARATOR_VALUE		0xFDFCFBFA
10714 #define REG_NUM_PER_LINE	4
10715 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
10716 #define REG_SEPARATOR_LINE	1
10717 #define REG_NUM_REMAIN_MASK	3
10718 #define BD_LIST_MAX_NUM		30
10719 
10720 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10721 {
10722 	int i;
10723 
10724 	/* initialize command BD except the last one */
10725 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
10726 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
10727 					   true);
10728 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10729 	}
10730 
10731 	/* initialize the last command BD */
10732 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
10733 
10734 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
10735 }
10736 
10737 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10738 				    int *bd_num_list,
10739 				    u32 type_num)
10740 {
10741 	u32 entries_per_desc, desc_index, index, offset, i;
10742 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10743 	int ret;
10744 
10745 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
10746 	if (ret) {
10747 		dev_err(&hdev->pdev->dev,
10748 			"Get dfx bd num fail, status is %d.\n", ret);
10749 		return ret;
10750 	}
10751 
10752 	entries_per_desc = ARRAY_SIZE(desc[0].data);
10753 	for (i = 0; i < type_num; i++) {
10754 		offset = hclge_dfx_bd_offset_list[i];
10755 		index = offset % entries_per_desc;
10756 		desc_index = offset / entries_per_desc;
10757 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10758 	}
10759 
10760 	return ret;
10761 }
10762 
10763 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10764 				  struct hclge_desc *desc_src, int bd_num,
10765 				  enum hclge_opcode_type cmd)
10766 {
10767 	struct hclge_desc *desc = desc_src;
10768 	int i, ret;
10769 
10770 	hclge_cmd_setup_basic_desc(desc, cmd, true);
10771 	for (i = 0; i < bd_num - 1; i++) {
10772 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10773 		desc++;
10774 		hclge_cmd_setup_basic_desc(desc, cmd, true);
10775 	}
10776 
10777 	desc = desc_src;
10778 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10779 	if (ret)
10780 		dev_err(&hdev->pdev->dev,
10781 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10782 			cmd, ret);
10783 
10784 	return ret;
10785 }
10786 
10787 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10788 				    void *data)
10789 {
10790 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10791 	struct hclge_desc *desc = desc_src;
10792 	u32 *reg = data;
10793 
10794 	entries_per_desc = ARRAY_SIZE(desc->data);
10795 	reg_num = entries_per_desc * bd_num;
10796 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10797 	for (i = 0; i < reg_num; i++) {
10798 		index = i % entries_per_desc;
10799 		desc_index = i / entries_per_desc;
10800 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
10801 	}
10802 	for (i = 0; i < separator_num; i++)
10803 		*reg++ = SEPARATOR_VALUE;
10804 
10805 	return reg_num + separator_num;
10806 }
10807 
10808 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10809 {
10810 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10811 	int data_len_per_desc, bd_num, i;
10812 	int bd_num_list[BD_LIST_MAX_NUM];
10813 	u32 data_len;
10814 	int ret;
10815 
10816 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10817 	if (ret) {
10818 		dev_err(&hdev->pdev->dev,
10819 			"Get dfx reg bd num fail, status is %d.\n", ret);
10820 		return ret;
10821 	}
10822 
10823 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
10824 	*len = 0;
10825 	for (i = 0; i < dfx_reg_type_num; i++) {
10826 		bd_num = bd_num_list[i];
10827 		data_len = data_len_per_desc * bd_num;
10828 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10829 	}
10830 
10831 	return ret;
10832 }
10833 
10834 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10835 {
10836 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10837 	int bd_num, bd_num_max, buf_len, i;
10838 	int bd_num_list[BD_LIST_MAX_NUM];
10839 	struct hclge_desc *desc_src;
10840 	u32 *reg = data;
10841 	int ret;
10842 
10843 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10844 	if (ret) {
10845 		dev_err(&hdev->pdev->dev,
10846 			"Get dfx reg bd num fail, status is %d.\n", ret);
10847 		return ret;
10848 	}
10849 
10850 	bd_num_max = bd_num_list[0];
10851 	for (i = 1; i < dfx_reg_type_num; i++)
10852 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10853 
10854 	buf_len = sizeof(*desc_src) * bd_num_max;
10855 	desc_src = kzalloc(buf_len, GFP_KERNEL);
10856 	if (!desc_src)
10857 		return -ENOMEM;
10858 
10859 	for (i = 0; i < dfx_reg_type_num; i++) {
10860 		bd_num = bd_num_list[i];
10861 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10862 					     hclge_dfx_reg_opcode_list[i]);
10863 		if (ret) {
10864 			dev_err(&hdev->pdev->dev,
10865 				"Get dfx reg fail, status is %d.\n", ret);
10866 			break;
10867 		}
10868 
10869 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10870 	}
10871 
10872 	kfree(desc_src);
10873 	return ret;
10874 }
10875 
10876 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10877 			      struct hnae3_knic_private_info *kinfo)
10878 {
10879 #define HCLGE_RING_REG_OFFSET		0x200
10880 #define HCLGE_RING_INT_REG_OFFSET	0x4
10881 
10882 	int i, j, reg_num, separator_num;
10883 	int data_num_sum;
10884 	u32 *reg = data;
10885 
10886 	/* fetching per-PF registers valus from PF PCIe register space */
10887 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10888 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10889 	for (i = 0; i < reg_num; i++)
10890 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10891 	for (i = 0; i < separator_num; i++)
10892 		*reg++ = SEPARATOR_VALUE;
10893 	data_num_sum = reg_num + separator_num;
10894 
10895 	reg_num = ARRAY_SIZE(common_reg_addr_list);
10896 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10897 	for (i = 0; i < reg_num; i++)
10898 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10899 	for (i = 0; i < separator_num; i++)
10900 		*reg++ = SEPARATOR_VALUE;
10901 	data_num_sum += reg_num + separator_num;
10902 
10903 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
10904 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10905 	for (j = 0; j < kinfo->num_tqps; j++) {
10906 		for (i = 0; i < reg_num; i++)
10907 			*reg++ = hclge_read_dev(&hdev->hw,
10908 						ring_reg_addr_list[i] +
10909 						HCLGE_RING_REG_OFFSET * j);
10910 		for (i = 0; i < separator_num; i++)
10911 			*reg++ = SEPARATOR_VALUE;
10912 	}
10913 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10914 
10915 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10916 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10917 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
10918 		for (i = 0; i < reg_num; i++)
10919 			*reg++ = hclge_read_dev(&hdev->hw,
10920 						tqp_intr_reg_addr_list[i] +
10921 						HCLGE_RING_INT_REG_OFFSET * j);
10922 		for (i = 0; i < separator_num; i++)
10923 			*reg++ = SEPARATOR_VALUE;
10924 	}
10925 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10926 
10927 	return data_num_sum;
10928 }
10929 
10930 static int hclge_get_regs_len(struct hnae3_handle *handle)
10931 {
10932 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10933 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10934 	struct hclge_vport *vport = hclge_get_vport(handle);
10935 	struct hclge_dev *hdev = vport->back;
10936 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10937 	int regs_lines_32_bit, regs_lines_64_bit;
10938 	int ret;
10939 
10940 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10941 	if (ret) {
10942 		dev_err(&hdev->pdev->dev,
10943 			"Get register number failed, ret = %d.\n", ret);
10944 		return ret;
10945 	}
10946 
10947 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10948 	if (ret) {
10949 		dev_err(&hdev->pdev->dev,
10950 			"Get dfx reg len failed, ret = %d.\n", ret);
10951 		return ret;
10952 	}
10953 
10954 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10955 		REG_SEPARATOR_LINE;
10956 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10957 		REG_SEPARATOR_LINE;
10958 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10959 		REG_SEPARATOR_LINE;
10960 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10961 		REG_SEPARATOR_LINE;
10962 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10963 		REG_SEPARATOR_LINE;
10964 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10965 		REG_SEPARATOR_LINE;
10966 
10967 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10968 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10969 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10970 }
10971 
10972 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10973 			   void *data)
10974 {
10975 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10976 	struct hclge_vport *vport = hclge_get_vport(handle);
10977 	struct hclge_dev *hdev = vport->back;
10978 	u32 regs_num_32_bit, regs_num_64_bit;
10979 	int i, reg_num, separator_num, ret;
10980 	u32 *reg = data;
10981 
10982 	*version = hdev->fw_version;
10983 
10984 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10985 	if (ret) {
10986 		dev_err(&hdev->pdev->dev,
10987 			"Get register number failed, ret = %d.\n", ret);
10988 		return;
10989 	}
10990 
10991 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10992 
10993 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10994 	if (ret) {
10995 		dev_err(&hdev->pdev->dev,
10996 			"Get 32 bit register failed, ret = %d.\n", ret);
10997 		return;
10998 	}
10999 	reg_num = regs_num_32_bit;
11000 	reg += reg_num;
11001 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11002 	for (i = 0; i < separator_num; i++)
11003 		*reg++ = SEPARATOR_VALUE;
11004 
11005 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11006 	if (ret) {
11007 		dev_err(&hdev->pdev->dev,
11008 			"Get 64 bit register failed, ret = %d.\n", ret);
11009 		return;
11010 	}
11011 	reg_num = regs_num_64_bit * 2;
11012 	reg += reg_num;
11013 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11014 	for (i = 0; i < separator_num; i++)
11015 		*reg++ = SEPARATOR_VALUE;
11016 
11017 	ret = hclge_get_dfx_reg(hdev, reg);
11018 	if (ret)
11019 		dev_err(&hdev->pdev->dev,
11020 			"Get dfx register failed, ret = %d.\n", ret);
11021 }
11022 
11023 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11024 {
11025 	struct hclge_set_led_state_cmd *req;
11026 	struct hclge_desc desc;
11027 	int ret;
11028 
11029 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11030 
11031 	req = (struct hclge_set_led_state_cmd *)desc.data;
11032 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11033 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11034 
11035 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11036 	if (ret)
11037 		dev_err(&hdev->pdev->dev,
11038 			"Send set led state cmd error, ret =%d\n", ret);
11039 
11040 	return ret;
11041 }
11042 
11043 enum hclge_led_status {
11044 	HCLGE_LED_OFF,
11045 	HCLGE_LED_ON,
11046 	HCLGE_LED_NO_CHANGE = 0xFF,
11047 };
11048 
11049 static int hclge_set_led_id(struct hnae3_handle *handle,
11050 			    enum ethtool_phys_id_state status)
11051 {
11052 	struct hclge_vport *vport = hclge_get_vport(handle);
11053 	struct hclge_dev *hdev = vport->back;
11054 
11055 	switch (status) {
11056 	case ETHTOOL_ID_ACTIVE:
11057 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
11058 	case ETHTOOL_ID_INACTIVE:
11059 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11060 	default:
11061 		return -EINVAL;
11062 	}
11063 }
11064 
11065 static void hclge_get_link_mode(struct hnae3_handle *handle,
11066 				unsigned long *supported,
11067 				unsigned long *advertising)
11068 {
11069 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11070 	struct hclge_vport *vport = hclge_get_vport(handle);
11071 	struct hclge_dev *hdev = vport->back;
11072 	unsigned int idx = 0;
11073 
11074 	for (; idx < size; idx++) {
11075 		supported[idx] = hdev->hw.mac.supported[idx];
11076 		advertising[idx] = hdev->hw.mac.advertising[idx];
11077 	}
11078 }
11079 
11080 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11081 {
11082 	struct hclge_vport *vport = hclge_get_vport(handle);
11083 	struct hclge_dev *hdev = vport->back;
11084 
11085 	return hclge_config_gro(hdev, enable);
11086 }
11087 
11088 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11089 {
11090 	struct hclge_vport *vport = &hdev->vport[0];
11091 	struct hnae3_handle *handle = &vport->nic;
11092 	u8 tmp_flags = 0;
11093 	int ret;
11094 
11095 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11096 		set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11097 		vport->last_promisc_flags = vport->overflow_promisc_flags;
11098 	}
11099 
11100 	if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11101 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11102 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11103 					     tmp_flags & HNAE3_MPE);
11104 		if (!ret) {
11105 			clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11106 			hclge_enable_vlan_filter(handle,
11107 						 tmp_flags & HNAE3_VLAN_FLTR);
11108 		}
11109 	}
11110 }
11111 
11112 static bool hclge_module_existed(struct hclge_dev *hdev)
11113 {
11114 	struct hclge_desc desc;
11115 	u32 existed;
11116 	int ret;
11117 
11118 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11119 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11120 	if (ret) {
11121 		dev_err(&hdev->pdev->dev,
11122 			"failed to get SFP exist state, ret = %d\n", ret);
11123 		return false;
11124 	}
11125 
11126 	existed = le32_to_cpu(desc.data[0]);
11127 
11128 	return existed != 0;
11129 }
11130 
11131 /* need 6 bds(total 140 bytes) in one reading
11132  * return the number of bytes actually read, 0 means read failed.
11133  */
11134 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11135 				     u32 len, u8 *data)
11136 {
11137 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11138 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11139 	u16 read_len;
11140 	u16 copy_len;
11141 	int ret;
11142 	int i;
11143 
11144 	/* setup all 6 bds to read module eeprom info. */
11145 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11146 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11147 					   true);
11148 
11149 		/* bd0~bd4 need next flag */
11150 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11151 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11152 	}
11153 
11154 	/* setup bd0, this bd contains offset and read length. */
11155 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11156 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11157 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11158 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
11159 
11160 	ret = hclge_cmd_send(&hdev->hw, desc, i);
11161 	if (ret) {
11162 		dev_err(&hdev->pdev->dev,
11163 			"failed to get SFP eeprom info, ret = %d\n", ret);
11164 		return 0;
11165 	}
11166 
11167 	/* copy sfp info from bd0 to out buffer. */
11168 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11169 	memcpy(data, sfp_info_bd0->data, copy_len);
11170 	read_len = copy_len;
11171 
11172 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
11173 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11174 		if (read_len >= len)
11175 			return read_len;
11176 
11177 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11178 		memcpy(data + read_len, desc[i].data, copy_len);
11179 		read_len += copy_len;
11180 	}
11181 
11182 	return read_len;
11183 }
11184 
11185 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11186 				   u32 len, u8 *data)
11187 {
11188 	struct hclge_vport *vport = hclge_get_vport(handle);
11189 	struct hclge_dev *hdev = vport->back;
11190 	u32 read_len = 0;
11191 	u16 data_len;
11192 
11193 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11194 		return -EOPNOTSUPP;
11195 
11196 	if (!hclge_module_existed(hdev))
11197 		return -ENXIO;
11198 
11199 	while (read_len < len) {
11200 		data_len = hclge_get_sfp_eeprom_info(hdev,
11201 						     offset + read_len,
11202 						     len - read_len,
11203 						     data + read_len);
11204 		if (!data_len)
11205 			return -EIO;
11206 
11207 		read_len += data_len;
11208 	}
11209 
11210 	return 0;
11211 }
11212 
11213 static const struct hnae3_ae_ops hclge_ops = {
11214 	.init_ae_dev = hclge_init_ae_dev,
11215 	.uninit_ae_dev = hclge_uninit_ae_dev,
11216 	.flr_prepare = hclge_flr_prepare,
11217 	.flr_done = hclge_flr_done,
11218 	.init_client_instance = hclge_init_client_instance,
11219 	.uninit_client_instance = hclge_uninit_client_instance,
11220 	.map_ring_to_vector = hclge_map_ring_to_vector,
11221 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11222 	.get_vector = hclge_get_vector,
11223 	.put_vector = hclge_put_vector,
11224 	.set_promisc_mode = hclge_set_promisc_mode,
11225 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
11226 	.set_loopback = hclge_set_loopback,
11227 	.start = hclge_ae_start,
11228 	.stop = hclge_ae_stop,
11229 	.client_start = hclge_client_start,
11230 	.client_stop = hclge_client_stop,
11231 	.get_status = hclge_get_status,
11232 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
11233 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11234 	.get_media_type = hclge_get_media_type,
11235 	.check_port_speed = hclge_check_port_speed,
11236 	.get_fec = hclge_get_fec,
11237 	.set_fec = hclge_set_fec,
11238 	.get_rss_key_size = hclge_get_rss_key_size,
11239 	.get_rss_indir_size = hclge_get_rss_indir_size,
11240 	.get_rss = hclge_get_rss,
11241 	.set_rss = hclge_set_rss,
11242 	.set_rss_tuple = hclge_set_rss_tuple,
11243 	.get_rss_tuple = hclge_get_rss_tuple,
11244 	.get_tc_size = hclge_get_tc_size,
11245 	.get_mac_addr = hclge_get_mac_addr,
11246 	.set_mac_addr = hclge_set_mac_addr,
11247 	.do_ioctl = hclge_do_ioctl,
11248 	.add_uc_addr = hclge_add_uc_addr,
11249 	.rm_uc_addr = hclge_rm_uc_addr,
11250 	.add_mc_addr = hclge_add_mc_addr,
11251 	.rm_mc_addr = hclge_rm_mc_addr,
11252 	.set_autoneg = hclge_set_autoneg,
11253 	.get_autoneg = hclge_get_autoneg,
11254 	.restart_autoneg = hclge_restart_autoneg,
11255 	.halt_autoneg = hclge_halt_autoneg,
11256 	.get_pauseparam = hclge_get_pauseparam,
11257 	.set_pauseparam = hclge_set_pauseparam,
11258 	.set_mtu = hclge_set_mtu,
11259 	.reset_queue = hclge_reset_tqp,
11260 	.get_stats = hclge_get_stats,
11261 	.get_mac_stats = hclge_get_mac_stat,
11262 	.update_stats = hclge_update_stats,
11263 	.get_strings = hclge_get_strings,
11264 	.get_sset_count = hclge_get_sset_count,
11265 	.get_fw_version = hclge_get_fw_version,
11266 	.get_mdix_mode = hclge_get_mdix_mode,
11267 	.enable_vlan_filter = hclge_enable_vlan_filter,
11268 	.set_vlan_filter = hclge_set_vlan_filter,
11269 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11270 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11271 	.reset_event = hclge_reset_event,
11272 	.get_reset_level = hclge_get_reset_level,
11273 	.set_default_reset_request = hclge_set_def_reset_request,
11274 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11275 	.set_channels = hclge_set_channels,
11276 	.get_channels = hclge_get_channels,
11277 	.get_regs_len = hclge_get_regs_len,
11278 	.get_regs = hclge_get_regs,
11279 	.set_led_id = hclge_set_led_id,
11280 	.get_link_mode = hclge_get_link_mode,
11281 	.add_fd_entry = hclge_add_fd_entry,
11282 	.del_fd_entry = hclge_del_fd_entry,
11283 	.del_all_fd_entries = hclge_del_all_fd_entries,
11284 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11285 	.get_fd_rule_info = hclge_get_fd_rule_info,
11286 	.get_fd_all_rules = hclge_get_all_rules,
11287 	.enable_fd = hclge_enable_fd,
11288 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
11289 	.dbg_run_cmd = hclge_dbg_run_cmd,
11290 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
11291 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
11292 	.ae_dev_resetting = hclge_ae_dev_resetting,
11293 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11294 	.set_gro_en = hclge_gro_en,
11295 	.get_global_queue_id = hclge_covert_handle_qid_global,
11296 	.set_timer_task = hclge_set_timer_task,
11297 	.mac_connect_phy = hclge_mac_connect_phy,
11298 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
11299 	.get_vf_config = hclge_get_vf_config,
11300 	.set_vf_link_state = hclge_set_vf_link_state,
11301 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
11302 	.set_vf_trust = hclge_set_vf_trust,
11303 	.set_vf_rate = hclge_set_vf_rate,
11304 	.set_vf_mac = hclge_set_vf_mac,
11305 	.get_module_eeprom = hclge_get_module_eeprom,
11306 	.get_cmdq_stat = hclge_get_cmdq_stat,
11307 };
11308 
11309 static struct hnae3_ae_algo ae_algo = {
11310 	.ops = &hclge_ops,
11311 	.pdev_id_table = ae_algo_pci_tbl,
11312 };
11313 
11314 static int hclge_init(void)
11315 {
11316 	pr_info("%s is initializing\n", HCLGE_NAME);
11317 
11318 	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11319 	if (!hclge_wq) {
11320 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11321 		return -ENOMEM;
11322 	}
11323 
11324 	hnae3_register_ae_algo(&ae_algo);
11325 
11326 	return 0;
11327 }
11328 
11329 static void hclge_exit(void)
11330 {
11331 	hnae3_unregister_ae_algo(&ae_algo);
11332 	destroy_workqueue(hclge_wq);
11333 }
11334 module_init(hclge_init);
11335 module_exit(hclge_exit);
11336 
11337 MODULE_LICENSE("GPL");
11338 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11339 MODULE_DESCRIPTION("HCLGE Driver");
11340 MODULE_VERSION(HCLGE_MOD_VERSION);
11341