1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26 #include "hclge_devlink.h"
27 
28 #define HCLGE_NAME			"hclge"
29 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
30 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31 
32 #define HCLGE_BUF_SIZE_UNIT	256U
33 #define HCLGE_BUF_MUL_BY	2
34 #define HCLGE_BUF_DIV_BY	2
35 #define NEED_RESERVE_TC_NUM	2
36 #define BUF_MAX_PERCENT		100
37 #define BUF_RESERVE_PERCENT	90
38 
39 #define HCLGE_RESET_MAX_FAIL_CNT	5
40 #define HCLGE_RESET_SYNC_TIME		100
41 #define HCLGE_PF_RESET_SYNC_TIME	20
42 #define HCLGE_PF_RESET_SYNC_CNT		1500
43 
44 /* Get DFX BD number offset */
45 #define HCLGE_DFX_BIOS_BD_OFFSET        1
46 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
47 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
48 #define HCLGE_DFX_IGU_BD_OFFSET         4
49 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
50 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
51 #define HCLGE_DFX_NCSI_BD_OFFSET        7
52 #define HCLGE_DFX_RTC_BD_OFFSET         8
53 #define HCLGE_DFX_PPP_BD_OFFSET         9
54 #define HCLGE_DFX_RCB_BD_OFFSET         10
55 #define HCLGE_DFX_TQP_BD_OFFSET         11
56 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
57 
58 #define HCLGE_LINK_STATUS_MS	10
59 
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 						   unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 static void hclge_sync_fd_table(struct hclge_dev *hdev);
75 
76 static struct hnae3_ae_algo ae_algo;
77 
78 static struct workqueue_struct *hclge_wq;
79 
80 static const struct pci_device_id ae_algo_pci_tbl[] = {
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
86 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
87 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
88 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
89 	/* required last entry */
90 	{0, }
91 };
92 
93 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
94 
95 static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG,
96 					 HCLGE_NIC_CSQ_BASEADDR_H_REG,
97 					 HCLGE_NIC_CSQ_DEPTH_REG,
98 					 HCLGE_NIC_CSQ_TAIL_REG,
99 					 HCLGE_NIC_CSQ_HEAD_REG,
100 					 HCLGE_NIC_CRQ_BASEADDR_L_REG,
101 					 HCLGE_NIC_CRQ_BASEADDR_H_REG,
102 					 HCLGE_NIC_CRQ_DEPTH_REG,
103 					 HCLGE_NIC_CRQ_TAIL_REG,
104 					 HCLGE_NIC_CRQ_HEAD_REG,
105 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
106 					 HCLGE_CMDQ_INTR_STS_REG,
107 					 HCLGE_CMDQ_INTR_EN_REG,
108 					 HCLGE_CMDQ_INTR_GEN_REG};
109 
110 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
111 					   HCLGE_PF_OTHER_INT_REG,
112 					   HCLGE_MISC_RESET_STS_REG,
113 					   HCLGE_MISC_VECTOR_INT_STS,
114 					   HCLGE_GLOBAL_RESET_REG,
115 					   HCLGE_FUN_RST_ING,
116 					   HCLGE_GRO_EN_REG};
117 
118 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
119 					 HCLGE_RING_RX_ADDR_H_REG,
120 					 HCLGE_RING_RX_BD_NUM_REG,
121 					 HCLGE_RING_RX_BD_LENGTH_REG,
122 					 HCLGE_RING_RX_MERGE_EN_REG,
123 					 HCLGE_RING_RX_TAIL_REG,
124 					 HCLGE_RING_RX_HEAD_REG,
125 					 HCLGE_RING_RX_FBD_NUM_REG,
126 					 HCLGE_RING_RX_OFFSET_REG,
127 					 HCLGE_RING_RX_FBD_OFFSET_REG,
128 					 HCLGE_RING_RX_STASH_REG,
129 					 HCLGE_RING_RX_BD_ERR_REG,
130 					 HCLGE_RING_TX_ADDR_L_REG,
131 					 HCLGE_RING_TX_ADDR_H_REG,
132 					 HCLGE_RING_TX_BD_NUM_REG,
133 					 HCLGE_RING_TX_PRIORITY_REG,
134 					 HCLGE_RING_TX_TC_REG,
135 					 HCLGE_RING_TX_MERGE_EN_REG,
136 					 HCLGE_RING_TX_TAIL_REG,
137 					 HCLGE_RING_TX_HEAD_REG,
138 					 HCLGE_RING_TX_FBD_NUM_REG,
139 					 HCLGE_RING_TX_OFFSET_REG,
140 					 HCLGE_RING_TX_EBD_NUM_REG,
141 					 HCLGE_RING_TX_EBD_OFFSET_REG,
142 					 HCLGE_RING_TX_BD_ERR_REG,
143 					 HCLGE_RING_EN_REG};
144 
145 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
146 					     HCLGE_TQP_INTR_GL0_REG,
147 					     HCLGE_TQP_INTR_GL1_REG,
148 					     HCLGE_TQP_INTR_GL2_REG,
149 					     HCLGE_TQP_INTR_RL_REG};
150 
151 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
152 	"App    Loopback test",
153 	"Serdes serial Loopback test",
154 	"Serdes parallel Loopback test",
155 	"Phy    Loopback test"
156 };
157 
158 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
159 	{"mac_tx_mac_pause_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
161 	{"mac_rx_mac_pause_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
163 	{"mac_tx_control_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
165 	{"mac_rx_control_pkt_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
167 	{"mac_tx_pfc_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
169 	{"mac_tx_pfc_pri0_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
171 	{"mac_tx_pfc_pri1_pkt_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
173 	{"mac_tx_pfc_pri2_pkt_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
175 	{"mac_tx_pfc_pri3_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
177 	{"mac_tx_pfc_pri4_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
179 	{"mac_tx_pfc_pri5_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
181 	{"mac_tx_pfc_pri6_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
183 	{"mac_tx_pfc_pri7_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
185 	{"mac_rx_pfc_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
187 	{"mac_rx_pfc_pri0_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
189 	{"mac_rx_pfc_pri1_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
191 	{"mac_rx_pfc_pri2_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
193 	{"mac_rx_pfc_pri3_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
195 	{"mac_rx_pfc_pri4_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
197 	{"mac_rx_pfc_pri5_pkt_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
199 	{"mac_rx_pfc_pri6_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
201 	{"mac_rx_pfc_pri7_pkt_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
203 	{"mac_tx_total_pkt_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
205 	{"mac_tx_total_oct_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
207 	{"mac_tx_good_pkt_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
209 	{"mac_tx_bad_pkt_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
211 	{"mac_tx_good_oct_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
213 	{"mac_tx_bad_oct_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
215 	{"mac_tx_uni_pkt_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
217 	{"mac_tx_multi_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
219 	{"mac_tx_broad_pkt_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
221 	{"mac_tx_undersize_pkt_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
223 	{"mac_tx_oversize_pkt_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
225 	{"mac_tx_64_oct_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
227 	{"mac_tx_65_127_oct_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
229 	{"mac_tx_128_255_oct_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
231 	{"mac_tx_256_511_oct_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
233 	{"mac_tx_512_1023_oct_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
235 	{"mac_tx_1024_1518_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
237 	{"mac_tx_1519_2047_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
239 	{"mac_tx_2048_4095_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
241 	{"mac_tx_4096_8191_oct_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
243 	{"mac_tx_8192_9216_oct_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
245 	{"mac_tx_9217_12287_oct_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
247 	{"mac_tx_12288_16383_oct_pkt_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
249 	{"mac_tx_1519_max_good_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
251 	{"mac_tx_1519_max_bad_pkt_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
253 	{"mac_rx_total_pkt_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
255 	{"mac_rx_total_oct_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
257 	{"mac_rx_good_pkt_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
259 	{"mac_rx_bad_pkt_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
261 	{"mac_rx_good_oct_num",
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
263 	{"mac_rx_bad_oct_num",
264 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
265 	{"mac_rx_uni_pkt_num",
266 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
267 	{"mac_rx_multi_pkt_num",
268 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
269 	{"mac_rx_broad_pkt_num",
270 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
271 	{"mac_rx_undersize_pkt_num",
272 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
273 	{"mac_rx_oversize_pkt_num",
274 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
275 	{"mac_rx_64_oct_pkt_num",
276 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
277 	{"mac_rx_65_127_oct_pkt_num",
278 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
279 	{"mac_rx_128_255_oct_pkt_num",
280 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
281 	{"mac_rx_256_511_oct_pkt_num",
282 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
283 	{"mac_rx_512_1023_oct_pkt_num",
284 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
285 	{"mac_rx_1024_1518_oct_pkt_num",
286 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
287 	{"mac_rx_1519_2047_oct_pkt_num",
288 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
289 	{"mac_rx_2048_4095_oct_pkt_num",
290 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
291 	{"mac_rx_4096_8191_oct_pkt_num",
292 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
293 	{"mac_rx_8192_9216_oct_pkt_num",
294 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
295 	{"mac_rx_9217_12287_oct_pkt_num",
296 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
297 	{"mac_rx_12288_16383_oct_pkt_num",
298 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
299 	{"mac_rx_1519_max_good_pkt_num",
300 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
301 	{"mac_rx_1519_max_bad_pkt_num",
302 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
303 
304 	{"mac_tx_fragment_pkt_num",
305 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
306 	{"mac_tx_undermin_pkt_num",
307 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
308 	{"mac_tx_jabber_pkt_num",
309 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
310 	{"mac_tx_err_all_pkt_num",
311 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
312 	{"mac_tx_from_app_good_pkt_num",
313 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
314 	{"mac_tx_from_app_bad_pkt_num",
315 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
316 	{"mac_rx_fragment_pkt_num",
317 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
318 	{"mac_rx_undermin_pkt_num",
319 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
320 	{"mac_rx_jabber_pkt_num",
321 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
322 	{"mac_rx_fcs_err_pkt_num",
323 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
324 	{"mac_rx_send_app_good_pkt_num",
325 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
326 	{"mac_rx_send_app_bad_pkt_num",
327 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
328 };
329 
330 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
331 	{
332 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
333 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
334 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
335 		.i_port_bitmap = 0x1,
336 	},
337 };
338 
339 static const u8 hclge_hash_key[] = {
340 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
341 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
342 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
343 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
344 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
345 };
346 
347 static const u32 hclge_dfx_bd_offset_list[] = {
348 	HCLGE_DFX_BIOS_BD_OFFSET,
349 	HCLGE_DFX_SSU_0_BD_OFFSET,
350 	HCLGE_DFX_SSU_1_BD_OFFSET,
351 	HCLGE_DFX_IGU_BD_OFFSET,
352 	HCLGE_DFX_RPU_0_BD_OFFSET,
353 	HCLGE_DFX_RPU_1_BD_OFFSET,
354 	HCLGE_DFX_NCSI_BD_OFFSET,
355 	HCLGE_DFX_RTC_BD_OFFSET,
356 	HCLGE_DFX_PPP_BD_OFFSET,
357 	HCLGE_DFX_RCB_BD_OFFSET,
358 	HCLGE_DFX_TQP_BD_OFFSET,
359 	HCLGE_DFX_SSU_2_BD_OFFSET
360 };
361 
362 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
363 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
364 	HCLGE_OPC_DFX_SSU_REG_0,
365 	HCLGE_OPC_DFX_SSU_REG_1,
366 	HCLGE_OPC_DFX_IGU_EGU_REG,
367 	HCLGE_OPC_DFX_RPU_REG_0,
368 	HCLGE_OPC_DFX_RPU_REG_1,
369 	HCLGE_OPC_DFX_NCSI_REG,
370 	HCLGE_OPC_DFX_RTC_REG,
371 	HCLGE_OPC_DFX_PPP_REG,
372 	HCLGE_OPC_DFX_RCB_REG,
373 	HCLGE_OPC_DFX_TQP_REG,
374 	HCLGE_OPC_DFX_SSU_REG_2
375 };
376 
377 static const struct key_info meta_data_key_info[] = {
378 	{ PACKET_TYPE_ID, 6 },
379 	{ IP_FRAGEMENT, 1 },
380 	{ ROCE_TYPE, 1 },
381 	{ NEXT_KEY, 5 },
382 	{ VLAN_NUMBER, 2 },
383 	{ SRC_VPORT, 12 },
384 	{ DST_VPORT, 12 },
385 	{ TUNNEL_PACKET, 1 },
386 };
387 
388 static const struct key_info tuple_key_info[] = {
389 	{ OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
390 	{ OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
391 	{ OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
392 	{ OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
393 	{ OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
394 	{ OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
395 	{ OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
396 	{ OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
397 	{ OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
398 	{ OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
399 	{ OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
400 	{ OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
401 	{ OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
402 	{ OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
403 	{ OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
404 	{ OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
405 	{ INNER_DST_MAC, 48, KEY_OPT_MAC,
406 	  offsetof(struct hclge_fd_rule, tuples.dst_mac),
407 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
408 	{ INNER_SRC_MAC, 48, KEY_OPT_MAC,
409 	  offsetof(struct hclge_fd_rule, tuples.src_mac),
410 	  offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
411 	{ INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
412 	  offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
413 	  offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
414 	{ INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
415 	{ INNER_ETH_TYPE, 16, KEY_OPT_LE16,
416 	  offsetof(struct hclge_fd_rule, tuples.ether_proto),
417 	  offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
418 	{ INNER_L2_RSV, 16, KEY_OPT_LE16,
419 	  offsetof(struct hclge_fd_rule, tuples.l2_user_def),
420 	  offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
421 	{ INNER_IP_TOS, 8, KEY_OPT_U8,
422 	  offsetof(struct hclge_fd_rule, tuples.ip_tos),
423 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
424 	{ INNER_IP_PROTO, 8, KEY_OPT_U8,
425 	  offsetof(struct hclge_fd_rule, tuples.ip_proto),
426 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
427 	{ INNER_SRC_IP, 32, KEY_OPT_IP,
428 	  offsetof(struct hclge_fd_rule, tuples.src_ip),
429 	  offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
430 	{ INNER_DST_IP, 32, KEY_OPT_IP,
431 	  offsetof(struct hclge_fd_rule, tuples.dst_ip),
432 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
433 	{ INNER_L3_RSV, 16, KEY_OPT_LE16,
434 	  offsetof(struct hclge_fd_rule, tuples.l3_user_def),
435 	  offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
436 	{ INNER_SRC_PORT, 16, KEY_OPT_LE16,
437 	  offsetof(struct hclge_fd_rule, tuples.src_port),
438 	  offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
439 	{ INNER_DST_PORT, 16, KEY_OPT_LE16,
440 	  offsetof(struct hclge_fd_rule, tuples.dst_port),
441 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
442 	{ INNER_L4_RSV, 32, KEY_OPT_LE32,
443 	  offsetof(struct hclge_fd_rule, tuples.l4_user_def),
444 	  offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
445 };
446 
447 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
448 {
449 #define HCLGE_MAC_CMD_NUM 21
450 
451 	u64 *data = (u64 *)(&hdev->mac_stats);
452 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
453 	__le64 *desc_data;
454 	int i, k, n;
455 	int ret;
456 
457 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
458 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
459 	if (ret) {
460 		dev_err(&hdev->pdev->dev,
461 			"Get MAC pkt stats fail, status = %d.\n", ret);
462 
463 		return ret;
464 	}
465 
466 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
467 		/* for special opcode 0032, only the first desc has the head */
468 		if (unlikely(i == 0)) {
469 			desc_data = (__le64 *)(&desc[i].data[0]);
470 			n = HCLGE_RD_FIRST_STATS_NUM;
471 		} else {
472 			desc_data = (__le64 *)(&desc[i]);
473 			n = HCLGE_RD_OTHER_STATS_NUM;
474 		}
475 
476 		for (k = 0; k < n; k++) {
477 			*data += le64_to_cpu(*desc_data);
478 			data++;
479 			desc_data++;
480 		}
481 	}
482 
483 	return 0;
484 }
485 
486 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
487 {
488 	u64 *data = (u64 *)(&hdev->mac_stats);
489 	struct hclge_desc *desc;
490 	__le64 *desc_data;
491 	u16 i, k, n;
492 	int ret;
493 
494 	/* This may be called inside atomic sections,
495 	 * so GFP_ATOMIC is more suitalbe here
496 	 */
497 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
498 	if (!desc)
499 		return -ENOMEM;
500 
501 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
502 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
503 	if (ret) {
504 		kfree(desc);
505 		return ret;
506 	}
507 
508 	for (i = 0; i < desc_num; i++) {
509 		/* for special opcode 0034, only the first desc has the head */
510 		if (i == 0) {
511 			desc_data = (__le64 *)(&desc[i].data[0]);
512 			n = HCLGE_RD_FIRST_STATS_NUM;
513 		} else {
514 			desc_data = (__le64 *)(&desc[i]);
515 			n = HCLGE_RD_OTHER_STATS_NUM;
516 		}
517 
518 		for (k = 0; k < n; k++) {
519 			*data += le64_to_cpu(*desc_data);
520 			data++;
521 			desc_data++;
522 		}
523 	}
524 
525 	kfree(desc);
526 
527 	return 0;
528 }
529 
530 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
531 {
532 	struct hclge_desc desc;
533 	__le32 *desc_data;
534 	u32 reg_num;
535 	int ret;
536 
537 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
538 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
539 	if (ret)
540 		return ret;
541 
542 	desc_data = (__le32 *)(&desc.data[0]);
543 	reg_num = le32_to_cpu(*desc_data);
544 
545 	*desc_num = 1 + ((reg_num - 3) >> 2) +
546 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
547 
548 	return 0;
549 }
550 
551 static int hclge_mac_update_stats(struct hclge_dev *hdev)
552 {
553 	u32 desc_num;
554 	int ret;
555 
556 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
557 	/* The firmware supports the new statistics acquisition method */
558 	if (!ret)
559 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
560 	else if (ret == -EOPNOTSUPP)
561 		ret = hclge_mac_update_stats_defective(hdev);
562 	else
563 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
564 
565 	return ret;
566 }
567 
568 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
569 {
570 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
571 	struct hclge_vport *vport = hclge_get_vport(handle);
572 	struct hclge_dev *hdev = vport->back;
573 	struct hnae3_queue *queue;
574 	struct hclge_desc desc[1];
575 	struct hclge_tqp *tqp;
576 	int ret, i;
577 
578 	for (i = 0; i < kinfo->num_tqps; i++) {
579 		queue = handle->kinfo.tqp[i];
580 		tqp = container_of(queue, struct hclge_tqp, q);
581 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
582 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
583 					   true);
584 
585 		desc[0].data[0] = cpu_to_le32(tqp->index);
586 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
587 		if (ret) {
588 			dev_err(&hdev->pdev->dev,
589 				"Query tqp stat fail, status = %d,queue = %d\n",
590 				ret, i);
591 			return ret;
592 		}
593 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
594 			le32_to_cpu(desc[0].data[1]);
595 	}
596 
597 	for (i = 0; i < kinfo->num_tqps; i++) {
598 		queue = handle->kinfo.tqp[i];
599 		tqp = container_of(queue, struct hclge_tqp, q);
600 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
601 		hclge_cmd_setup_basic_desc(&desc[0],
602 					   HCLGE_OPC_QUERY_TX_STATS,
603 					   true);
604 
605 		desc[0].data[0] = cpu_to_le32(tqp->index);
606 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
607 		if (ret) {
608 			dev_err(&hdev->pdev->dev,
609 				"Query tqp stat fail, status = %d,queue = %d\n",
610 				ret, i);
611 			return ret;
612 		}
613 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
614 			le32_to_cpu(desc[0].data[1]);
615 	}
616 
617 	return 0;
618 }
619 
620 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
621 {
622 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
623 	struct hclge_tqp *tqp;
624 	u64 *buff = data;
625 	int i;
626 
627 	for (i = 0; i < kinfo->num_tqps; i++) {
628 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
629 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
630 	}
631 
632 	for (i = 0; i < kinfo->num_tqps; i++) {
633 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
634 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
635 	}
636 
637 	return buff;
638 }
639 
640 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
641 {
642 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
643 
644 	/* each tqp has TX & RX two queues */
645 	return kinfo->num_tqps * (2);
646 }
647 
648 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
649 {
650 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
651 	u8 *buff = data;
652 	int i;
653 
654 	for (i = 0; i < kinfo->num_tqps; i++) {
655 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
656 			struct hclge_tqp, q);
657 		snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
658 			 tqp->index);
659 		buff = buff + ETH_GSTRING_LEN;
660 	}
661 
662 	for (i = 0; i < kinfo->num_tqps; i++) {
663 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
664 			struct hclge_tqp, q);
665 		snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
666 			 tqp->index);
667 		buff = buff + ETH_GSTRING_LEN;
668 	}
669 
670 	return buff;
671 }
672 
673 static u64 *hclge_comm_get_stats(const void *comm_stats,
674 				 const struct hclge_comm_stats_str strs[],
675 				 int size, u64 *data)
676 {
677 	u64 *buf = data;
678 	u32 i;
679 
680 	for (i = 0; i < size; i++)
681 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
682 
683 	return buf + size;
684 }
685 
686 static u8 *hclge_comm_get_strings(u32 stringset,
687 				  const struct hclge_comm_stats_str strs[],
688 				  int size, u8 *data)
689 {
690 	char *buff = (char *)data;
691 	u32 i;
692 
693 	if (stringset != ETH_SS_STATS)
694 		return buff;
695 
696 	for (i = 0; i < size; i++) {
697 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
698 		buff = buff + ETH_GSTRING_LEN;
699 	}
700 
701 	return (u8 *)buff;
702 }
703 
704 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
705 {
706 	struct hnae3_handle *handle;
707 	int status;
708 
709 	handle = &hdev->vport[0].nic;
710 	if (handle->client) {
711 		status = hclge_tqps_update_stats(handle);
712 		if (status) {
713 			dev_err(&hdev->pdev->dev,
714 				"Update TQPS stats fail, status = %d.\n",
715 				status);
716 		}
717 	}
718 
719 	status = hclge_mac_update_stats(hdev);
720 	if (status)
721 		dev_err(&hdev->pdev->dev,
722 			"Update MAC stats fail, status = %d.\n", status);
723 }
724 
725 static void hclge_update_stats(struct hnae3_handle *handle,
726 			       struct net_device_stats *net_stats)
727 {
728 	struct hclge_vport *vport = hclge_get_vport(handle);
729 	struct hclge_dev *hdev = vport->back;
730 	int status;
731 
732 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
733 		return;
734 
735 	status = hclge_mac_update_stats(hdev);
736 	if (status)
737 		dev_err(&hdev->pdev->dev,
738 			"Update MAC stats fail, status = %d.\n",
739 			status);
740 
741 	status = hclge_tqps_update_stats(handle);
742 	if (status)
743 		dev_err(&hdev->pdev->dev,
744 			"Update TQPS stats fail, status = %d.\n",
745 			status);
746 
747 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
748 }
749 
750 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
751 {
752 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
753 		HNAE3_SUPPORT_PHY_LOOPBACK | \
754 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
755 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
756 
757 	struct hclge_vport *vport = hclge_get_vport(handle);
758 	struct hclge_dev *hdev = vport->back;
759 	int count = 0;
760 
761 	/* Loopback test support rules:
762 	 * mac: only GE mode support
763 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
764 	 * phy: only support when phy device exist on board
765 	 */
766 	if (stringset == ETH_SS_TEST) {
767 		/* clear loopback bit flags at first */
768 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
769 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
770 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
771 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
772 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
773 			count += 1;
774 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
775 		}
776 
777 		count += 2;
778 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
779 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
780 
781 		if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
782 		     hdev->hw.mac.phydev->drv->set_loopback) ||
783 		    hnae3_dev_phy_imp_supported(hdev)) {
784 			count += 1;
785 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
786 		}
787 	} else if (stringset == ETH_SS_STATS) {
788 		count = ARRAY_SIZE(g_mac_stats_string) +
789 			hclge_tqps_get_sset_count(handle, stringset);
790 	}
791 
792 	return count;
793 }
794 
795 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
796 			      u8 *data)
797 {
798 	u8 *p = (char *)data;
799 	int size;
800 
801 	if (stringset == ETH_SS_STATS) {
802 		size = ARRAY_SIZE(g_mac_stats_string);
803 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
804 					   size, p);
805 		p = hclge_tqps_get_strings(handle, p);
806 	} else if (stringset == ETH_SS_TEST) {
807 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
808 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
809 			       ETH_GSTRING_LEN);
810 			p += ETH_GSTRING_LEN;
811 		}
812 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
813 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
814 			       ETH_GSTRING_LEN);
815 			p += ETH_GSTRING_LEN;
816 		}
817 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
818 			memcpy(p,
819 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
820 			       ETH_GSTRING_LEN);
821 			p += ETH_GSTRING_LEN;
822 		}
823 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
824 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
825 			       ETH_GSTRING_LEN);
826 			p += ETH_GSTRING_LEN;
827 		}
828 	}
829 }
830 
831 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
832 {
833 	struct hclge_vport *vport = hclge_get_vport(handle);
834 	struct hclge_dev *hdev = vport->back;
835 	u64 *p;
836 
837 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
838 				 ARRAY_SIZE(g_mac_stats_string), data);
839 	p = hclge_tqps_get_stats(handle, p);
840 }
841 
842 static void hclge_get_mac_stat(struct hnae3_handle *handle,
843 			       struct hns3_mac_stats *mac_stats)
844 {
845 	struct hclge_vport *vport = hclge_get_vport(handle);
846 	struct hclge_dev *hdev = vport->back;
847 
848 	hclge_update_stats(handle, NULL);
849 
850 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
851 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
852 }
853 
854 static int hclge_parse_func_status(struct hclge_dev *hdev,
855 				   struct hclge_func_status_cmd *status)
856 {
857 #define HCLGE_MAC_ID_MASK	0xF
858 
859 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
860 		return -EINVAL;
861 
862 	/* Set the pf to main pf */
863 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
864 		hdev->flag |= HCLGE_FLAG_MAIN;
865 	else
866 		hdev->flag &= ~HCLGE_FLAG_MAIN;
867 
868 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
869 	return 0;
870 }
871 
872 static int hclge_query_function_status(struct hclge_dev *hdev)
873 {
874 #define HCLGE_QUERY_MAX_CNT	5
875 
876 	struct hclge_func_status_cmd *req;
877 	struct hclge_desc desc;
878 	int timeout = 0;
879 	int ret;
880 
881 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
882 	req = (struct hclge_func_status_cmd *)desc.data;
883 
884 	do {
885 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
886 		if (ret) {
887 			dev_err(&hdev->pdev->dev,
888 				"query function status failed %d.\n", ret);
889 			return ret;
890 		}
891 
892 		/* Check pf reset is done */
893 		if (req->pf_state)
894 			break;
895 		usleep_range(1000, 2000);
896 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
897 
898 	return hclge_parse_func_status(hdev, req);
899 }
900 
901 static int hclge_query_pf_resource(struct hclge_dev *hdev)
902 {
903 	struct hclge_pf_res_cmd *req;
904 	struct hclge_desc desc;
905 	int ret;
906 
907 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
908 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
909 	if (ret) {
910 		dev_err(&hdev->pdev->dev,
911 			"query pf resource failed %d.\n", ret);
912 		return ret;
913 	}
914 
915 	req = (struct hclge_pf_res_cmd *)desc.data;
916 	hdev->num_tqps = le16_to_cpu(req->tqp_num) +
917 			 le16_to_cpu(req->ext_tqp_num);
918 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
919 
920 	if (req->tx_buf_size)
921 		hdev->tx_buf_size =
922 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
923 	else
924 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
925 
926 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
927 
928 	if (req->dv_buf_size)
929 		hdev->dv_buf_size =
930 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
931 	else
932 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
933 
934 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
935 
936 	hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
937 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
938 		dev_err(&hdev->pdev->dev,
939 			"only %u msi resources available, not enough for pf(min:2).\n",
940 			hdev->num_nic_msi);
941 		return -EINVAL;
942 	}
943 
944 	if (hnae3_dev_roce_supported(hdev)) {
945 		hdev->num_roce_msi =
946 			le16_to_cpu(req->pf_intr_vector_number_roce);
947 
948 		/* PF should have NIC vectors and Roce vectors,
949 		 * NIC vectors are queued before Roce vectors.
950 		 */
951 		hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
952 	} else {
953 		hdev->num_msi = hdev->num_nic_msi;
954 	}
955 
956 	return 0;
957 }
958 
959 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
960 {
961 	switch (speed_cmd) {
962 	case HCLGE_FW_MAC_SPEED_10M:
963 		*speed = HCLGE_MAC_SPEED_10M;
964 		break;
965 	case HCLGE_FW_MAC_SPEED_100M:
966 		*speed = HCLGE_MAC_SPEED_100M;
967 		break;
968 	case HCLGE_FW_MAC_SPEED_1G:
969 		*speed = HCLGE_MAC_SPEED_1G;
970 		break;
971 	case HCLGE_FW_MAC_SPEED_10G:
972 		*speed = HCLGE_MAC_SPEED_10G;
973 		break;
974 	case HCLGE_FW_MAC_SPEED_25G:
975 		*speed = HCLGE_MAC_SPEED_25G;
976 		break;
977 	case HCLGE_FW_MAC_SPEED_40G:
978 		*speed = HCLGE_MAC_SPEED_40G;
979 		break;
980 	case HCLGE_FW_MAC_SPEED_50G:
981 		*speed = HCLGE_MAC_SPEED_50G;
982 		break;
983 	case HCLGE_FW_MAC_SPEED_100G:
984 		*speed = HCLGE_MAC_SPEED_100G;
985 		break;
986 	case HCLGE_FW_MAC_SPEED_200G:
987 		*speed = HCLGE_MAC_SPEED_200G;
988 		break;
989 	default:
990 		return -EINVAL;
991 	}
992 
993 	return 0;
994 }
995 
996 static const struct hclge_speed_bit_map speed_bit_map[] = {
997 	{HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
998 	{HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
999 	{HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
1000 	{HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
1001 	{HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
1002 	{HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
1003 	{HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
1004 	{HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
1005 	{HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
1006 };
1007 
1008 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
1009 {
1010 	u16 i;
1011 
1012 	for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
1013 		if (speed == speed_bit_map[i].speed) {
1014 			*speed_bit = speed_bit_map[i].speed_bit;
1015 			return 0;
1016 		}
1017 	}
1018 
1019 	return -EINVAL;
1020 }
1021 
1022 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
1023 {
1024 	struct hclge_vport *vport = hclge_get_vport(handle);
1025 	struct hclge_dev *hdev = vport->back;
1026 	u32 speed_ability = hdev->hw.mac.speed_ability;
1027 	u32 speed_bit = 0;
1028 	int ret;
1029 
1030 	ret = hclge_get_speed_bit(speed, &speed_bit);
1031 	if (ret)
1032 		return ret;
1033 
1034 	if (speed_bit & speed_ability)
1035 		return 0;
1036 
1037 	return -EINVAL;
1038 }
1039 
1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1041 {
1042 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1044 				 mac->supported);
1045 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1047 				 mac->supported);
1048 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1050 				 mac->supported);
1051 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1053 				 mac->supported);
1054 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1056 				 mac->supported);
1057 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1059 				 mac->supported);
1060 }
1061 
1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1063 {
1064 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1066 				 mac->supported);
1067 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1069 				 mac->supported);
1070 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1072 				 mac->supported);
1073 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1075 				 mac->supported);
1076 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1078 				 mac->supported);
1079 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1080 		linkmode_set_bit(
1081 			ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1082 			mac->supported);
1083 }
1084 
1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1086 {
1087 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1089 				 mac->supported);
1090 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1092 				 mac->supported);
1093 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1095 				 mac->supported);
1096 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1098 				 mac->supported);
1099 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1101 				 mac->supported);
1102 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1104 				 mac->supported);
1105 }
1106 
1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1108 {
1109 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1111 				 mac->supported);
1112 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1114 				 mac->supported);
1115 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1117 				 mac->supported);
1118 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1120 				 mac->supported);
1121 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1123 				 mac->supported);
1124 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1126 				 mac->supported);
1127 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1129 				 mac->supported);
1130 }
1131 
1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1133 {
1134 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1136 
1137 	switch (mac->speed) {
1138 	case HCLGE_MAC_SPEED_10G:
1139 	case HCLGE_MAC_SPEED_40G:
1140 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1141 				 mac->supported);
1142 		mac->fec_ability =
1143 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1144 		break;
1145 	case HCLGE_MAC_SPEED_25G:
1146 	case HCLGE_MAC_SPEED_50G:
1147 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1148 				 mac->supported);
1149 		mac->fec_ability =
1150 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151 			BIT(HNAE3_FEC_AUTO);
1152 		break;
1153 	case HCLGE_MAC_SPEED_100G:
1154 	case HCLGE_MAC_SPEED_200G:
1155 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1157 		break;
1158 	default:
1159 		mac->fec_ability = 0;
1160 		break;
1161 	}
1162 }
1163 
1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1165 					u16 speed_ability)
1166 {
1167 	struct hclge_mac *mac = &hdev->hw.mac;
1168 
1169 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1171 				 mac->supported);
1172 
1173 	hclge_convert_setting_sr(mac, speed_ability);
1174 	hclge_convert_setting_lr(mac, speed_ability);
1175 	hclge_convert_setting_cr(mac, speed_ability);
1176 	if (hnae3_dev_fec_supported(hdev))
1177 		hclge_convert_setting_fec(mac);
1178 
1179 	if (hnae3_dev_pause_supported(hdev))
1180 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1181 
1182 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1184 }
1185 
1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1187 					    u16 speed_ability)
1188 {
1189 	struct hclge_mac *mac = &hdev->hw.mac;
1190 
1191 	hclge_convert_setting_kr(mac, speed_ability);
1192 	if (hnae3_dev_fec_supported(hdev))
1193 		hclge_convert_setting_fec(mac);
1194 
1195 	if (hnae3_dev_pause_supported(hdev))
1196 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1197 
1198 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1200 }
1201 
1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1203 					 u16 speed_ability)
1204 {
1205 	unsigned long *supported = hdev->hw.mac.supported;
1206 
1207 	/* default to support all speed for GE port */
1208 	if (!speed_ability)
1209 		speed_ability = HCLGE_SUPPORT_GE;
1210 
1211 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1213 				 supported);
1214 
1215 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1217 				 supported);
1218 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1219 				 supported);
1220 	}
1221 
1222 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1225 	}
1226 
1227 	if (hnae3_dev_pause_supported(hdev)) {
1228 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229 		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1230 	}
1231 
1232 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1234 }
1235 
1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1237 {
1238 	u8 media_type = hdev->hw.mac.media_type;
1239 
1240 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1242 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243 		hclge_parse_copper_link_mode(hdev, speed_ability);
1244 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1246 }
1247 
1248 static u32 hclge_get_max_speed(u16 speed_ability)
1249 {
1250 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251 		return HCLGE_MAC_SPEED_200G;
1252 
1253 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254 		return HCLGE_MAC_SPEED_100G;
1255 
1256 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257 		return HCLGE_MAC_SPEED_50G;
1258 
1259 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260 		return HCLGE_MAC_SPEED_40G;
1261 
1262 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263 		return HCLGE_MAC_SPEED_25G;
1264 
1265 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266 		return HCLGE_MAC_SPEED_10G;
1267 
1268 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269 		return HCLGE_MAC_SPEED_1G;
1270 
1271 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272 		return HCLGE_MAC_SPEED_100M;
1273 
1274 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275 		return HCLGE_MAC_SPEED_10M;
1276 
1277 	return HCLGE_MAC_SPEED_1G;
1278 }
1279 
1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1281 {
1282 #define HCLGE_TX_SPARE_SIZE_UNIT		4096
1283 #define SPEED_ABILITY_EXT_SHIFT			8
1284 
1285 	struct hclge_cfg_param_cmd *req;
1286 	u64 mac_addr_tmp_high;
1287 	u16 speed_ability_ext;
1288 	u64 mac_addr_tmp;
1289 	unsigned int i;
1290 
1291 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1292 
1293 	/* get the configuration */
1294 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1295 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1296 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1297 					    HCLGE_CFG_TQP_DESC_N_M,
1298 					    HCLGE_CFG_TQP_DESC_N_S);
1299 
1300 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1301 					HCLGE_CFG_PHY_ADDR_M,
1302 					HCLGE_CFG_PHY_ADDR_S);
1303 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1304 					  HCLGE_CFG_MEDIA_TP_M,
1305 					  HCLGE_CFG_MEDIA_TP_S);
1306 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307 					  HCLGE_CFG_RX_BUF_LEN_M,
1308 					  HCLGE_CFG_RX_BUF_LEN_S);
1309 	/* get mac_address */
1310 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1311 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1312 					    HCLGE_CFG_MAC_ADDR_H_M,
1313 					    HCLGE_CFG_MAC_ADDR_H_S);
1314 
1315 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1316 
1317 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1318 					     HCLGE_CFG_DEFAULT_SPEED_M,
1319 					     HCLGE_CFG_DEFAULT_SPEED_S);
1320 	cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1321 					       HCLGE_CFG_RSS_SIZE_M,
1322 					       HCLGE_CFG_RSS_SIZE_S);
1323 
1324 	for (i = 0; i < ETH_ALEN; i++)
1325 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1326 
1327 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1328 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1329 
1330 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1331 					     HCLGE_CFG_SPEED_ABILITY_M,
1332 					     HCLGE_CFG_SPEED_ABILITY_S);
1333 	speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1334 					    HCLGE_CFG_SPEED_ABILITY_EXT_M,
1335 					    HCLGE_CFG_SPEED_ABILITY_EXT_S);
1336 	cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1337 
1338 	cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1339 					       HCLGE_CFG_VLAN_FLTR_CAP_M,
1340 					       HCLGE_CFG_VLAN_FLTR_CAP_S);
1341 
1342 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1343 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1344 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1345 	if (!cfg->umv_space)
1346 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1347 
1348 	cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1349 					       HCLGE_CFG_PF_RSS_SIZE_M,
1350 					       HCLGE_CFG_PF_RSS_SIZE_S);
1351 
1352 	/* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1353 	 * power of 2, instead of reading out directly. This would
1354 	 * be more flexible for future changes and expansions.
1355 	 * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1356 	 * it does not make sense if PF's field is 0. In this case, PF and VF
1357 	 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1358 	 */
1359 	cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1360 			       1U << cfg->pf_rss_size_max :
1361 			       cfg->vf_rss_size_max;
1362 
1363 	/* The unit of the tx spare buffer size queried from configuration
1364 	 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1365 	 * needed here.
1366 	 */
1367 	cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1368 						 HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1369 						 HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1370 	cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1371 }
1372 
1373 /* hclge_get_cfg: query the static parameter from flash
1374  * @hdev: pointer to struct hclge_dev
1375  * @hcfg: the config structure to be getted
1376  */
1377 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1378 {
1379 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1380 	struct hclge_cfg_param_cmd *req;
1381 	unsigned int i;
1382 	int ret;
1383 
1384 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1385 		u32 offset = 0;
1386 
1387 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1388 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1389 					   true);
1390 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1391 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1392 		/* Len should be united by 4 bytes when send to hardware */
1393 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1394 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1395 		req->offset = cpu_to_le32(offset);
1396 	}
1397 
1398 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1399 	if (ret) {
1400 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1401 		return ret;
1402 	}
1403 
1404 	hclge_parse_cfg(hcfg, desc);
1405 
1406 	return 0;
1407 }
1408 
1409 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1410 {
1411 #define HCLGE_MAX_NON_TSO_BD_NUM			8U
1412 
1413 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1414 
1415 	ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1416 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1417 	ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1418 	ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1419 	ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1420 	ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1421 	ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1422 }
1423 
1424 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1425 				  struct hclge_desc *desc)
1426 {
1427 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1428 	struct hclge_dev_specs_0_cmd *req0;
1429 	struct hclge_dev_specs_1_cmd *req1;
1430 
1431 	req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1432 	req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1433 
1434 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1435 	ae_dev->dev_specs.rss_ind_tbl_size =
1436 		le16_to_cpu(req0->rss_ind_tbl_size);
1437 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1438 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1439 	ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1440 	ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1441 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1442 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1443 }
1444 
1445 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1446 {
1447 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1448 
1449 	if (!dev_specs->max_non_tso_bd_num)
1450 		dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1451 	if (!dev_specs->rss_ind_tbl_size)
1452 		dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1453 	if (!dev_specs->rss_key_size)
1454 		dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1455 	if (!dev_specs->max_tm_rate)
1456 		dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1457 	if (!dev_specs->max_qset_num)
1458 		dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1459 	if (!dev_specs->max_int_gl)
1460 		dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1461 	if (!dev_specs->max_frm_size)
1462 		dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1463 }
1464 
1465 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1466 {
1467 	struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1468 	int ret;
1469 	int i;
1470 
1471 	/* set default specifications as devices lower than version V3 do not
1472 	 * support querying specifications from firmware.
1473 	 */
1474 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1475 		hclge_set_default_dev_specs(hdev);
1476 		return 0;
1477 	}
1478 
1479 	for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1480 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1481 					   true);
1482 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1483 	}
1484 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1485 
1486 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1487 	if (ret)
1488 		return ret;
1489 
1490 	hclge_parse_dev_specs(hdev, desc);
1491 	hclge_check_dev_specs(hdev);
1492 
1493 	return 0;
1494 }
1495 
1496 static int hclge_get_cap(struct hclge_dev *hdev)
1497 {
1498 	int ret;
1499 
1500 	ret = hclge_query_function_status(hdev);
1501 	if (ret) {
1502 		dev_err(&hdev->pdev->dev,
1503 			"query function status error %d.\n", ret);
1504 		return ret;
1505 	}
1506 
1507 	/* get pf resource */
1508 	return hclge_query_pf_resource(hdev);
1509 }
1510 
1511 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1512 {
1513 #define HCLGE_MIN_TX_DESC	64
1514 #define HCLGE_MIN_RX_DESC	64
1515 
1516 	if (!is_kdump_kernel())
1517 		return;
1518 
1519 	dev_info(&hdev->pdev->dev,
1520 		 "Running kdump kernel. Using minimal resources\n");
1521 
1522 	/* minimal queue pairs equals to the number of vports */
1523 	hdev->num_tqps = hdev->num_req_vfs + 1;
1524 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1525 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1526 }
1527 
1528 static int hclge_configure(struct hclge_dev *hdev)
1529 {
1530 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1531 	const struct cpumask *cpumask = cpu_online_mask;
1532 	struct hclge_cfg cfg;
1533 	unsigned int i;
1534 	int node, ret;
1535 
1536 	ret = hclge_get_cfg(hdev, &cfg);
1537 	if (ret)
1538 		return ret;
1539 
1540 	hdev->base_tqp_pid = 0;
1541 	hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1542 	hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1543 	hdev->rx_buf_len = cfg.rx_buf_len;
1544 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1545 	hdev->hw.mac.media_type = cfg.media_type;
1546 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1547 	hdev->num_tx_desc = cfg.tqp_desc_num;
1548 	hdev->num_rx_desc = cfg.tqp_desc_num;
1549 	hdev->tm_info.num_pg = 1;
1550 	hdev->tc_max = cfg.tc_num;
1551 	hdev->tm_info.hw_pfc_map = 0;
1552 	hdev->wanted_umv_size = cfg.umv_space;
1553 	hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1554 	hdev->gro_en = true;
1555 	if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1556 		set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1557 
1558 	if (hnae3_dev_fd_supported(hdev)) {
1559 		hdev->fd_en = true;
1560 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1561 	}
1562 
1563 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1564 	if (ret) {
1565 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1566 			cfg.default_speed, ret);
1567 		return ret;
1568 	}
1569 
1570 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1571 
1572 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1573 
1574 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1575 	    (hdev->tc_max < 1)) {
1576 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1577 			 hdev->tc_max);
1578 		hdev->tc_max = 1;
1579 	}
1580 
1581 	/* Dev does not support DCB */
1582 	if (!hnae3_dev_dcb_supported(hdev)) {
1583 		hdev->tc_max = 1;
1584 		hdev->pfc_max = 0;
1585 	} else {
1586 		hdev->pfc_max = hdev->tc_max;
1587 	}
1588 
1589 	hdev->tm_info.num_tc = 1;
1590 
1591 	/* Currently not support uncontiuous tc */
1592 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1593 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1594 
1595 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1596 
1597 	hclge_init_kdump_kernel_config(hdev);
1598 
1599 	/* Set the affinity based on numa node */
1600 	node = dev_to_node(&hdev->pdev->dev);
1601 	if (node != NUMA_NO_NODE)
1602 		cpumask = cpumask_of_node(node);
1603 
1604 	cpumask_copy(&hdev->affinity_mask, cpumask);
1605 
1606 	return ret;
1607 }
1608 
1609 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1610 			    u16 tso_mss_max)
1611 {
1612 	struct hclge_cfg_tso_status_cmd *req;
1613 	struct hclge_desc desc;
1614 
1615 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1616 
1617 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1618 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1619 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1620 
1621 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1622 }
1623 
1624 static int hclge_config_gro(struct hclge_dev *hdev)
1625 {
1626 	struct hclge_cfg_gro_status_cmd *req;
1627 	struct hclge_desc desc;
1628 	int ret;
1629 
1630 	if (!hnae3_dev_gro_supported(hdev))
1631 		return 0;
1632 
1633 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1634 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1635 
1636 	req->gro_en = hdev->gro_en ? 1 : 0;
1637 
1638 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1639 	if (ret)
1640 		dev_err(&hdev->pdev->dev,
1641 			"GRO hardware config cmd failed, ret = %d\n", ret);
1642 
1643 	return ret;
1644 }
1645 
1646 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1647 {
1648 	struct hclge_tqp *tqp;
1649 	int i;
1650 
1651 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1652 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1653 	if (!hdev->htqp)
1654 		return -ENOMEM;
1655 
1656 	tqp = hdev->htqp;
1657 
1658 	for (i = 0; i < hdev->num_tqps; i++) {
1659 		tqp->dev = &hdev->pdev->dev;
1660 		tqp->index = i;
1661 
1662 		tqp->q.ae_algo = &ae_algo;
1663 		tqp->q.buf_size = hdev->rx_buf_len;
1664 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1665 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1666 
1667 		/* need an extended offset to configure queues >=
1668 		 * HCLGE_TQP_MAX_SIZE_DEV_V2
1669 		 */
1670 		if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1671 			tqp->q.io_base = hdev->hw.io_base +
1672 					 HCLGE_TQP_REG_OFFSET +
1673 					 i * HCLGE_TQP_REG_SIZE;
1674 		else
1675 			tqp->q.io_base = hdev->hw.io_base +
1676 					 HCLGE_TQP_REG_OFFSET +
1677 					 HCLGE_TQP_EXT_REG_OFFSET +
1678 					 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1679 					 HCLGE_TQP_REG_SIZE;
1680 
1681 		tqp++;
1682 	}
1683 
1684 	return 0;
1685 }
1686 
1687 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1688 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1689 {
1690 	struct hclge_tqp_map_cmd *req;
1691 	struct hclge_desc desc;
1692 	int ret;
1693 
1694 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1695 
1696 	req = (struct hclge_tqp_map_cmd *)desc.data;
1697 	req->tqp_id = cpu_to_le16(tqp_pid);
1698 	req->tqp_vf = func_id;
1699 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1700 	if (!is_pf)
1701 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1702 	req->tqp_vid = cpu_to_le16(tqp_vid);
1703 
1704 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1705 	if (ret)
1706 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1707 
1708 	return ret;
1709 }
1710 
1711 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1712 {
1713 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1714 	struct hclge_dev *hdev = vport->back;
1715 	int i, alloced;
1716 
1717 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1718 	     alloced < num_tqps; i++) {
1719 		if (!hdev->htqp[i].alloced) {
1720 			hdev->htqp[i].q.handle = &vport->nic;
1721 			hdev->htqp[i].q.tqp_index = alloced;
1722 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1723 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1724 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1725 			hdev->htqp[i].alloced = true;
1726 			alloced++;
1727 		}
1728 	}
1729 	vport->alloc_tqps = alloced;
1730 	kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1731 				vport->alloc_tqps / hdev->tm_info.num_tc);
1732 
1733 	/* ensure one to one mapping between irq and queue at default */
1734 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1735 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1736 
1737 	return 0;
1738 }
1739 
1740 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1741 			    u16 num_tx_desc, u16 num_rx_desc)
1742 
1743 {
1744 	struct hnae3_handle *nic = &vport->nic;
1745 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1746 	struct hclge_dev *hdev = vport->back;
1747 	int ret;
1748 
1749 	kinfo->num_tx_desc = num_tx_desc;
1750 	kinfo->num_rx_desc = num_rx_desc;
1751 
1752 	kinfo->rx_buf_len = hdev->rx_buf_len;
1753 	kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1754 
1755 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1756 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1757 	if (!kinfo->tqp)
1758 		return -ENOMEM;
1759 
1760 	ret = hclge_assign_tqp(vport, num_tqps);
1761 	if (ret)
1762 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1763 
1764 	return ret;
1765 }
1766 
1767 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1768 				  struct hclge_vport *vport)
1769 {
1770 	struct hnae3_handle *nic = &vport->nic;
1771 	struct hnae3_knic_private_info *kinfo;
1772 	u16 i;
1773 
1774 	kinfo = &nic->kinfo;
1775 	for (i = 0; i < vport->alloc_tqps; i++) {
1776 		struct hclge_tqp *q =
1777 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1778 		bool is_pf;
1779 		int ret;
1780 
1781 		is_pf = !(vport->vport_id);
1782 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1783 					     i, is_pf);
1784 		if (ret)
1785 			return ret;
1786 	}
1787 
1788 	return 0;
1789 }
1790 
1791 static int hclge_map_tqp(struct hclge_dev *hdev)
1792 {
1793 	struct hclge_vport *vport = hdev->vport;
1794 	u16 i, num_vport;
1795 
1796 	num_vport = hdev->num_req_vfs + 1;
1797 	for (i = 0; i < num_vport; i++)	{
1798 		int ret;
1799 
1800 		ret = hclge_map_tqp_to_vport(hdev, vport);
1801 		if (ret)
1802 			return ret;
1803 
1804 		vport++;
1805 	}
1806 
1807 	return 0;
1808 }
1809 
1810 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1811 {
1812 	struct hnae3_handle *nic = &vport->nic;
1813 	struct hclge_dev *hdev = vport->back;
1814 	int ret;
1815 
1816 	nic->pdev = hdev->pdev;
1817 	nic->ae_algo = &ae_algo;
1818 	nic->numa_node_mask = hdev->numa_node_mask;
1819 	nic->kinfo.io_base = hdev->hw.io_base;
1820 
1821 	ret = hclge_knic_setup(vport, num_tqps,
1822 			       hdev->num_tx_desc, hdev->num_rx_desc);
1823 	if (ret)
1824 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1825 
1826 	return ret;
1827 }
1828 
1829 static int hclge_alloc_vport(struct hclge_dev *hdev)
1830 {
1831 	struct pci_dev *pdev = hdev->pdev;
1832 	struct hclge_vport *vport;
1833 	u32 tqp_main_vport;
1834 	u32 tqp_per_vport;
1835 	int num_vport, i;
1836 	int ret;
1837 
1838 	/* We need to alloc a vport for main NIC of PF */
1839 	num_vport = hdev->num_req_vfs + 1;
1840 
1841 	if (hdev->num_tqps < num_vport) {
1842 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1843 			hdev->num_tqps, num_vport);
1844 		return -EINVAL;
1845 	}
1846 
1847 	/* Alloc the same number of TQPs for every vport */
1848 	tqp_per_vport = hdev->num_tqps / num_vport;
1849 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1850 
1851 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1852 			     GFP_KERNEL);
1853 	if (!vport)
1854 		return -ENOMEM;
1855 
1856 	hdev->vport = vport;
1857 	hdev->num_alloc_vport = num_vport;
1858 
1859 	if (IS_ENABLED(CONFIG_PCI_IOV))
1860 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1861 
1862 	for (i = 0; i < num_vport; i++) {
1863 		vport->back = hdev;
1864 		vport->vport_id = i;
1865 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1866 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1867 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1868 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1869 		vport->req_vlan_fltr_en = true;
1870 		INIT_LIST_HEAD(&vport->vlan_list);
1871 		INIT_LIST_HEAD(&vport->uc_mac_list);
1872 		INIT_LIST_HEAD(&vport->mc_mac_list);
1873 		spin_lock_init(&vport->mac_list_lock);
1874 
1875 		if (i == 0)
1876 			ret = hclge_vport_setup(vport, tqp_main_vport);
1877 		else
1878 			ret = hclge_vport_setup(vport, tqp_per_vport);
1879 		if (ret) {
1880 			dev_err(&pdev->dev,
1881 				"vport setup failed for vport %d, %d\n",
1882 				i, ret);
1883 			return ret;
1884 		}
1885 
1886 		vport++;
1887 	}
1888 
1889 	return 0;
1890 }
1891 
1892 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1893 				    struct hclge_pkt_buf_alloc *buf_alloc)
1894 {
1895 /* TX buffer size is unit by 128 byte */
1896 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1897 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1898 	struct hclge_tx_buff_alloc_cmd *req;
1899 	struct hclge_desc desc;
1900 	int ret;
1901 	u8 i;
1902 
1903 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1904 
1905 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1906 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1907 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1908 
1909 		req->tx_pkt_buff[i] =
1910 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1911 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1912 	}
1913 
1914 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1915 	if (ret)
1916 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1917 			ret);
1918 
1919 	return ret;
1920 }
1921 
1922 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1923 				 struct hclge_pkt_buf_alloc *buf_alloc)
1924 {
1925 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1926 
1927 	if (ret)
1928 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1929 
1930 	return ret;
1931 }
1932 
1933 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1934 {
1935 	unsigned int i;
1936 	u32 cnt = 0;
1937 
1938 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1939 		if (hdev->hw_tc_map & BIT(i))
1940 			cnt++;
1941 	return cnt;
1942 }
1943 
1944 /* Get the number of pfc enabled TCs, which have private buffer */
1945 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1946 				  struct hclge_pkt_buf_alloc *buf_alloc)
1947 {
1948 	struct hclge_priv_buf *priv;
1949 	unsigned int i;
1950 	int cnt = 0;
1951 
1952 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1953 		priv = &buf_alloc->priv_buf[i];
1954 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1955 		    priv->enable)
1956 			cnt++;
1957 	}
1958 
1959 	return cnt;
1960 }
1961 
1962 /* Get the number of pfc disabled TCs, which have private buffer */
1963 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1964 				     struct hclge_pkt_buf_alloc *buf_alloc)
1965 {
1966 	struct hclge_priv_buf *priv;
1967 	unsigned int i;
1968 	int cnt = 0;
1969 
1970 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1971 		priv = &buf_alloc->priv_buf[i];
1972 		if (hdev->hw_tc_map & BIT(i) &&
1973 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1974 		    priv->enable)
1975 			cnt++;
1976 	}
1977 
1978 	return cnt;
1979 }
1980 
1981 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1982 {
1983 	struct hclge_priv_buf *priv;
1984 	u32 rx_priv = 0;
1985 	int i;
1986 
1987 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1988 		priv = &buf_alloc->priv_buf[i];
1989 		if (priv->enable)
1990 			rx_priv += priv->buf_size;
1991 	}
1992 	return rx_priv;
1993 }
1994 
1995 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1996 {
1997 	u32 i, total_tx_size = 0;
1998 
1999 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
2000 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2001 
2002 	return total_tx_size;
2003 }
2004 
2005 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2006 				struct hclge_pkt_buf_alloc *buf_alloc,
2007 				u32 rx_all)
2008 {
2009 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2010 	u32 tc_num = hclge_get_tc_num(hdev);
2011 	u32 shared_buf, aligned_mps;
2012 	u32 rx_priv;
2013 	int i;
2014 
2015 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2016 
2017 	if (hnae3_dev_dcb_supported(hdev))
2018 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2019 					hdev->dv_buf_size;
2020 	else
2021 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2022 					+ hdev->dv_buf_size;
2023 
2024 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2025 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2026 			     HCLGE_BUF_SIZE_UNIT);
2027 
2028 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2029 	if (rx_all < rx_priv + shared_std)
2030 		return false;
2031 
2032 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2033 	buf_alloc->s_buf.buf_size = shared_buf;
2034 	if (hnae3_dev_dcb_supported(hdev)) {
2035 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2036 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2037 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2038 				  HCLGE_BUF_SIZE_UNIT);
2039 	} else {
2040 		buf_alloc->s_buf.self.high = aligned_mps +
2041 						HCLGE_NON_DCB_ADDITIONAL_BUF;
2042 		buf_alloc->s_buf.self.low = aligned_mps;
2043 	}
2044 
2045 	if (hnae3_dev_dcb_supported(hdev)) {
2046 		hi_thrd = shared_buf - hdev->dv_buf_size;
2047 
2048 		if (tc_num <= NEED_RESERVE_TC_NUM)
2049 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2050 					/ BUF_MAX_PERCENT;
2051 
2052 		if (tc_num)
2053 			hi_thrd = hi_thrd / tc_num;
2054 
2055 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2056 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2057 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2058 	} else {
2059 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2060 		lo_thrd = aligned_mps;
2061 	}
2062 
2063 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2064 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2065 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2066 	}
2067 
2068 	return true;
2069 }
2070 
2071 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2072 				struct hclge_pkt_buf_alloc *buf_alloc)
2073 {
2074 	u32 i, total_size;
2075 
2076 	total_size = hdev->pkt_buf_size;
2077 
2078 	/* alloc tx buffer for all enabled tc */
2079 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2080 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2081 
2082 		if (hdev->hw_tc_map & BIT(i)) {
2083 			if (total_size < hdev->tx_buf_size)
2084 				return -ENOMEM;
2085 
2086 			priv->tx_buf_size = hdev->tx_buf_size;
2087 		} else {
2088 			priv->tx_buf_size = 0;
2089 		}
2090 
2091 		total_size -= priv->tx_buf_size;
2092 	}
2093 
2094 	return 0;
2095 }
2096 
2097 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2098 				  struct hclge_pkt_buf_alloc *buf_alloc)
2099 {
2100 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2101 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2102 	unsigned int i;
2103 
2104 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2105 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2106 
2107 		priv->enable = 0;
2108 		priv->wl.low = 0;
2109 		priv->wl.high = 0;
2110 		priv->buf_size = 0;
2111 
2112 		if (!(hdev->hw_tc_map & BIT(i)))
2113 			continue;
2114 
2115 		priv->enable = 1;
2116 
2117 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2118 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2119 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
2120 						HCLGE_BUF_SIZE_UNIT);
2121 		} else {
2122 			priv->wl.low = 0;
2123 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2124 					aligned_mps;
2125 		}
2126 
2127 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2128 	}
2129 
2130 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2131 }
2132 
2133 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2134 					  struct hclge_pkt_buf_alloc *buf_alloc)
2135 {
2136 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2137 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2138 	int i;
2139 
2140 	/* let the last to be cleared first */
2141 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2142 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2143 		unsigned int mask = BIT((unsigned int)i);
2144 
2145 		if (hdev->hw_tc_map & mask &&
2146 		    !(hdev->tm_info.hw_pfc_map & mask)) {
2147 			/* Clear the no pfc TC private buffer */
2148 			priv->wl.low = 0;
2149 			priv->wl.high = 0;
2150 			priv->buf_size = 0;
2151 			priv->enable = 0;
2152 			no_pfc_priv_num--;
2153 		}
2154 
2155 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2156 		    no_pfc_priv_num == 0)
2157 			break;
2158 	}
2159 
2160 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2161 }
2162 
2163 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2164 					struct hclge_pkt_buf_alloc *buf_alloc)
2165 {
2166 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2167 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2168 	int i;
2169 
2170 	/* let the last to be cleared first */
2171 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2172 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2173 		unsigned int mask = BIT((unsigned int)i);
2174 
2175 		if (hdev->hw_tc_map & mask &&
2176 		    hdev->tm_info.hw_pfc_map & mask) {
2177 			/* Reduce the number of pfc TC with private buffer */
2178 			priv->wl.low = 0;
2179 			priv->enable = 0;
2180 			priv->wl.high = 0;
2181 			priv->buf_size = 0;
2182 			pfc_priv_num--;
2183 		}
2184 
2185 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2186 		    pfc_priv_num == 0)
2187 			break;
2188 	}
2189 
2190 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2191 }
2192 
2193 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2194 				      struct hclge_pkt_buf_alloc *buf_alloc)
2195 {
2196 #define COMPENSATE_BUFFER	0x3C00
2197 #define COMPENSATE_HALF_MPS_NUM	5
2198 #define PRIV_WL_GAP		0x1800
2199 
2200 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2201 	u32 tc_num = hclge_get_tc_num(hdev);
2202 	u32 half_mps = hdev->mps >> 1;
2203 	u32 min_rx_priv;
2204 	unsigned int i;
2205 
2206 	if (tc_num)
2207 		rx_priv = rx_priv / tc_num;
2208 
2209 	if (tc_num <= NEED_RESERVE_TC_NUM)
2210 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2211 
2212 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2213 			COMPENSATE_HALF_MPS_NUM * half_mps;
2214 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2215 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2216 	if (rx_priv < min_rx_priv)
2217 		return false;
2218 
2219 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2220 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2221 
2222 		priv->enable = 0;
2223 		priv->wl.low = 0;
2224 		priv->wl.high = 0;
2225 		priv->buf_size = 0;
2226 
2227 		if (!(hdev->hw_tc_map & BIT(i)))
2228 			continue;
2229 
2230 		priv->enable = 1;
2231 		priv->buf_size = rx_priv;
2232 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2233 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2234 	}
2235 
2236 	buf_alloc->s_buf.buf_size = 0;
2237 
2238 	return true;
2239 }
2240 
2241 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2242  * @hdev: pointer to struct hclge_dev
2243  * @buf_alloc: pointer to buffer calculation data
2244  * @return: 0: calculate successful, negative: fail
2245  */
2246 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2247 				struct hclge_pkt_buf_alloc *buf_alloc)
2248 {
2249 	/* When DCB is not supported, rx private buffer is not allocated. */
2250 	if (!hnae3_dev_dcb_supported(hdev)) {
2251 		u32 rx_all = hdev->pkt_buf_size;
2252 
2253 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2254 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2255 			return -ENOMEM;
2256 
2257 		return 0;
2258 	}
2259 
2260 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2261 		return 0;
2262 
2263 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2264 		return 0;
2265 
2266 	/* try to decrease the buffer size */
2267 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2268 		return 0;
2269 
2270 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2271 		return 0;
2272 
2273 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2274 		return 0;
2275 
2276 	return -ENOMEM;
2277 }
2278 
2279 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2280 				   struct hclge_pkt_buf_alloc *buf_alloc)
2281 {
2282 	struct hclge_rx_priv_buff_cmd *req;
2283 	struct hclge_desc desc;
2284 	int ret;
2285 	int i;
2286 
2287 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2288 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2289 
2290 	/* Alloc private buffer TCs */
2291 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2292 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2293 
2294 		req->buf_num[i] =
2295 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2296 		req->buf_num[i] |=
2297 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2298 	}
2299 
2300 	req->shared_buf =
2301 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2302 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2303 
2304 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2305 	if (ret)
2306 		dev_err(&hdev->pdev->dev,
2307 			"rx private buffer alloc cmd failed %d\n", ret);
2308 
2309 	return ret;
2310 }
2311 
2312 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2313 				   struct hclge_pkt_buf_alloc *buf_alloc)
2314 {
2315 	struct hclge_rx_priv_wl_buf *req;
2316 	struct hclge_priv_buf *priv;
2317 	struct hclge_desc desc[2];
2318 	int i, j;
2319 	int ret;
2320 
2321 	for (i = 0; i < 2; i++) {
2322 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2323 					   false);
2324 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2325 
2326 		/* The first descriptor set the NEXT bit to 1 */
2327 		if (i == 0)
2328 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2329 		else
2330 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2331 
2332 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2333 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2334 
2335 			priv = &buf_alloc->priv_buf[idx];
2336 			req->tc_wl[j].high =
2337 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2338 			req->tc_wl[j].high |=
2339 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2340 			req->tc_wl[j].low =
2341 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2342 			req->tc_wl[j].low |=
2343 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2344 		}
2345 	}
2346 
2347 	/* Send 2 descriptor at one time */
2348 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2349 	if (ret)
2350 		dev_err(&hdev->pdev->dev,
2351 			"rx private waterline config cmd failed %d\n",
2352 			ret);
2353 	return ret;
2354 }
2355 
2356 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2357 				    struct hclge_pkt_buf_alloc *buf_alloc)
2358 {
2359 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2360 	struct hclge_rx_com_thrd *req;
2361 	struct hclge_desc desc[2];
2362 	struct hclge_tc_thrd *tc;
2363 	int i, j;
2364 	int ret;
2365 
2366 	for (i = 0; i < 2; i++) {
2367 		hclge_cmd_setup_basic_desc(&desc[i],
2368 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2369 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2370 
2371 		/* The first descriptor set the NEXT bit to 1 */
2372 		if (i == 0)
2373 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2374 		else
2375 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2376 
2377 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2378 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2379 
2380 			req->com_thrd[j].high =
2381 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2382 			req->com_thrd[j].high |=
2383 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2384 			req->com_thrd[j].low =
2385 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2386 			req->com_thrd[j].low |=
2387 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2388 		}
2389 	}
2390 
2391 	/* Send 2 descriptors at one time */
2392 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2393 	if (ret)
2394 		dev_err(&hdev->pdev->dev,
2395 			"common threshold config cmd failed %d\n", ret);
2396 	return ret;
2397 }
2398 
2399 static int hclge_common_wl_config(struct hclge_dev *hdev,
2400 				  struct hclge_pkt_buf_alloc *buf_alloc)
2401 {
2402 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2403 	struct hclge_rx_com_wl *req;
2404 	struct hclge_desc desc;
2405 	int ret;
2406 
2407 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2408 
2409 	req = (struct hclge_rx_com_wl *)desc.data;
2410 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2411 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2412 
2413 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2414 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2415 
2416 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2417 	if (ret)
2418 		dev_err(&hdev->pdev->dev,
2419 			"common waterline config cmd failed %d\n", ret);
2420 
2421 	return ret;
2422 }
2423 
2424 int hclge_buffer_alloc(struct hclge_dev *hdev)
2425 {
2426 	struct hclge_pkt_buf_alloc *pkt_buf;
2427 	int ret;
2428 
2429 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2430 	if (!pkt_buf)
2431 		return -ENOMEM;
2432 
2433 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2434 	if (ret) {
2435 		dev_err(&hdev->pdev->dev,
2436 			"could not calc tx buffer size for all TCs %d\n", ret);
2437 		goto out;
2438 	}
2439 
2440 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2441 	if (ret) {
2442 		dev_err(&hdev->pdev->dev,
2443 			"could not alloc tx buffers %d\n", ret);
2444 		goto out;
2445 	}
2446 
2447 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2448 	if (ret) {
2449 		dev_err(&hdev->pdev->dev,
2450 			"could not calc rx priv buffer size for all TCs %d\n",
2451 			ret);
2452 		goto out;
2453 	}
2454 
2455 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2456 	if (ret) {
2457 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2458 			ret);
2459 		goto out;
2460 	}
2461 
2462 	if (hnae3_dev_dcb_supported(hdev)) {
2463 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2464 		if (ret) {
2465 			dev_err(&hdev->pdev->dev,
2466 				"could not configure rx private waterline %d\n",
2467 				ret);
2468 			goto out;
2469 		}
2470 
2471 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2472 		if (ret) {
2473 			dev_err(&hdev->pdev->dev,
2474 				"could not configure common threshold %d\n",
2475 				ret);
2476 			goto out;
2477 		}
2478 	}
2479 
2480 	ret = hclge_common_wl_config(hdev, pkt_buf);
2481 	if (ret)
2482 		dev_err(&hdev->pdev->dev,
2483 			"could not configure common waterline %d\n", ret);
2484 
2485 out:
2486 	kfree(pkt_buf);
2487 	return ret;
2488 }
2489 
2490 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2491 {
2492 	struct hnae3_handle *roce = &vport->roce;
2493 	struct hnae3_handle *nic = &vport->nic;
2494 	struct hclge_dev *hdev = vport->back;
2495 
2496 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2497 
2498 	if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2499 		return -EINVAL;
2500 
2501 	roce->rinfo.base_vector = hdev->roce_base_vector;
2502 
2503 	roce->rinfo.netdev = nic->kinfo.netdev;
2504 	roce->rinfo.roce_io_base = hdev->hw.io_base;
2505 	roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2506 
2507 	roce->pdev = nic->pdev;
2508 	roce->ae_algo = nic->ae_algo;
2509 	roce->numa_node_mask = nic->numa_node_mask;
2510 
2511 	return 0;
2512 }
2513 
2514 static int hclge_init_msi(struct hclge_dev *hdev)
2515 {
2516 	struct pci_dev *pdev = hdev->pdev;
2517 	int vectors;
2518 	int i;
2519 
2520 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2521 					hdev->num_msi,
2522 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2523 	if (vectors < 0) {
2524 		dev_err(&pdev->dev,
2525 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2526 			vectors);
2527 		return vectors;
2528 	}
2529 	if (vectors < hdev->num_msi)
2530 		dev_warn(&hdev->pdev->dev,
2531 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2532 			 hdev->num_msi, vectors);
2533 
2534 	hdev->num_msi = vectors;
2535 	hdev->num_msi_left = vectors;
2536 
2537 	hdev->base_msi_vector = pdev->irq;
2538 	hdev->roce_base_vector = hdev->base_msi_vector +
2539 				hdev->num_nic_msi;
2540 
2541 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2542 					   sizeof(u16), GFP_KERNEL);
2543 	if (!hdev->vector_status) {
2544 		pci_free_irq_vectors(pdev);
2545 		return -ENOMEM;
2546 	}
2547 
2548 	for (i = 0; i < hdev->num_msi; i++)
2549 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2550 
2551 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2552 					sizeof(int), GFP_KERNEL);
2553 	if (!hdev->vector_irq) {
2554 		pci_free_irq_vectors(pdev);
2555 		return -ENOMEM;
2556 	}
2557 
2558 	return 0;
2559 }
2560 
2561 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2562 {
2563 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2564 		duplex = HCLGE_MAC_FULL;
2565 
2566 	return duplex;
2567 }
2568 
2569 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2570 				      u8 duplex)
2571 {
2572 	struct hclge_config_mac_speed_dup_cmd *req;
2573 	struct hclge_desc desc;
2574 	int ret;
2575 
2576 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2577 
2578 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2579 
2580 	if (duplex)
2581 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2582 
2583 	switch (speed) {
2584 	case HCLGE_MAC_SPEED_10M:
2585 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2586 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
2587 		break;
2588 	case HCLGE_MAC_SPEED_100M:
2589 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2590 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
2591 		break;
2592 	case HCLGE_MAC_SPEED_1G:
2593 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2594 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
2595 		break;
2596 	case HCLGE_MAC_SPEED_10G:
2597 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2598 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
2599 		break;
2600 	case HCLGE_MAC_SPEED_25G:
2601 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2602 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
2603 		break;
2604 	case HCLGE_MAC_SPEED_40G:
2605 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2606 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
2607 		break;
2608 	case HCLGE_MAC_SPEED_50G:
2609 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2610 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
2611 		break;
2612 	case HCLGE_MAC_SPEED_100G:
2613 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2614 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
2615 		break;
2616 	case HCLGE_MAC_SPEED_200G:
2617 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2618 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
2619 		break;
2620 	default:
2621 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2622 		return -EINVAL;
2623 	}
2624 
2625 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2626 		      1);
2627 
2628 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2629 	if (ret) {
2630 		dev_err(&hdev->pdev->dev,
2631 			"mac speed/duplex config cmd failed %d.\n", ret);
2632 		return ret;
2633 	}
2634 
2635 	return 0;
2636 }
2637 
2638 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2639 {
2640 	struct hclge_mac *mac = &hdev->hw.mac;
2641 	int ret;
2642 
2643 	duplex = hclge_check_speed_dup(duplex, speed);
2644 	if (!mac->support_autoneg && mac->speed == speed &&
2645 	    mac->duplex == duplex)
2646 		return 0;
2647 
2648 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2649 	if (ret)
2650 		return ret;
2651 
2652 	hdev->hw.mac.speed = speed;
2653 	hdev->hw.mac.duplex = duplex;
2654 
2655 	return 0;
2656 }
2657 
2658 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2659 				     u8 duplex)
2660 {
2661 	struct hclge_vport *vport = hclge_get_vport(handle);
2662 	struct hclge_dev *hdev = vport->back;
2663 
2664 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2665 }
2666 
2667 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2668 {
2669 	struct hclge_config_auto_neg_cmd *req;
2670 	struct hclge_desc desc;
2671 	u32 flag = 0;
2672 	int ret;
2673 
2674 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2675 
2676 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2677 	if (enable)
2678 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2679 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2680 
2681 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2682 	if (ret)
2683 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2684 			ret);
2685 
2686 	return ret;
2687 }
2688 
2689 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2690 {
2691 	struct hclge_vport *vport = hclge_get_vport(handle);
2692 	struct hclge_dev *hdev = vport->back;
2693 
2694 	if (!hdev->hw.mac.support_autoneg) {
2695 		if (enable) {
2696 			dev_err(&hdev->pdev->dev,
2697 				"autoneg is not supported by current port\n");
2698 			return -EOPNOTSUPP;
2699 		} else {
2700 			return 0;
2701 		}
2702 	}
2703 
2704 	return hclge_set_autoneg_en(hdev, enable);
2705 }
2706 
2707 static int hclge_get_autoneg(struct hnae3_handle *handle)
2708 {
2709 	struct hclge_vport *vport = hclge_get_vport(handle);
2710 	struct hclge_dev *hdev = vport->back;
2711 	struct phy_device *phydev = hdev->hw.mac.phydev;
2712 
2713 	if (phydev)
2714 		return phydev->autoneg;
2715 
2716 	return hdev->hw.mac.autoneg;
2717 }
2718 
2719 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2720 {
2721 	struct hclge_vport *vport = hclge_get_vport(handle);
2722 	struct hclge_dev *hdev = vport->back;
2723 	int ret;
2724 
2725 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2726 
2727 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2728 	if (ret)
2729 		return ret;
2730 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2731 }
2732 
2733 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2734 {
2735 	struct hclge_vport *vport = hclge_get_vport(handle);
2736 	struct hclge_dev *hdev = vport->back;
2737 
2738 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2739 		return hclge_set_autoneg_en(hdev, !halt);
2740 
2741 	return 0;
2742 }
2743 
2744 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2745 {
2746 	struct hclge_config_fec_cmd *req;
2747 	struct hclge_desc desc;
2748 	int ret;
2749 
2750 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2751 
2752 	req = (struct hclge_config_fec_cmd *)desc.data;
2753 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2754 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2755 	if (fec_mode & BIT(HNAE3_FEC_RS))
2756 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2757 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2758 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2759 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2760 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2761 
2762 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2763 	if (ret)
2764 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2765 
2766 	return ret;
2767 }
2768 
2769 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2770 {
2771 	struct hclge_vport *vport = hclge_get_vport(handle);
2772 	struct hclge_dev *hdev = vport->back;
2773 	struct hclge_mac *mac = &hdev->hw.mac;
2774 	int ret;
2775 
2776 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2777 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2778 		return -EINVAL;
2779 	}
2780 
2781 	ret = hclge_set_fec_hw(hdev, fec_mode);
2782 	if (ret)
2783 		return ret;
2784 
2785 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2786 	return 0;
2787 }
2788 
2789 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2790 			  u8 *fec_mode)
2791 {
2792 	struct hclge_vport *vport = hclge_get_vport(handle);
2793 	struct hclge_dev *hdev = vport->back;
2794 	struct hclge_mac *mac = &hdev->hw.mac;
2795 
2796 	if (fec_ability)
2797 		*fec_ability = mac->fec_ability;
2798 	if (fec_mode)
2799 		*fec_mode = mac->fec_mode;
2800 }
2801 
2802 static int hclge_mac_init(struct hclge_dev *hdev)
2803 {
2804 	struct hclge_mac *mac = &hdev->hw.mac;
2805 	int ret;
2806 
2807 	hdev->support_sfp_query = true;
2808 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2809 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2810 					 hdev->hw.mac.duplex);
2811 	if (ret)
2812 		return ret;
2813 
2814 	if (hdev->hw.mac.support_autoneg) {
2815 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2816 		if (ret)
2817 			return ret;
2818 	}
2819 
2820 	mac->link = 0;
2821 
2822 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2823 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2824 		if (ret)
2825 			return ret;
2826 	}
2827 
2828 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2829 	if (ret) {
2830 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2831 		return ret;
2832 	}
2833 
2834 	ret = hclge_set_default_loopback(hdev);
2835 	if (ret)
2836 		return ret;
2837 
2838 	ret = hclge_buffer_alloc(hdev);
2839 	if (ret)
2840 		dev_err(&hdev->pdev->dev,
2841 			"allocate buffer fail, ret=%d\n", ret);
2842 
2843 	return ret;
2844 }
2845 
2846 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2847 {
2848 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2849 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2850 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2851 				    hclge_wq, &hdev->service_task, 0);
2852 }
2853 
2854 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2855 {
2856 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2857 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2858 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2859 				    hclge_wq, &hdev->service_task, 0);
2860 }
2861 
2862 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2863 {
2864 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2865 	    !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2866 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2867 				    hclge_wq, &hdev->service_task, 0);
2868 }
2869 
2870 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2871 {
2872 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2873 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2874 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2875 				    hclge_wq, &hdev->service_task,
2876 				    delay_time);
2877 }
2878 
2879 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2880 {
2881 	struct hclge_link_status_cmd *req;
2882 	struct hclge_desc desc;
2883 	int ret;
2884 
2885 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2886 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2887 	if (ret) {
2888 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2889 			ret);
2890 		return ret;
2891 	}
2892 
2893 	req = (struct hclge_link_status_cmd *)desc.data;
2894 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2895 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2896 
2897 	return 0;
2898 }
2899 
2900 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2901 {
2902 	struct phy_device *phydev = hdev->hw.mac.phydev;
2903 
2904 	*link_status = HCLGE_LINK_STATUS_DOWN;
2905 
2906 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2907 		return 0;
2908 
2909 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2910 		return 0;
2911 
2912 	return hclge_get_mac_link_status(hdev, link_status);
2913 }
2914 
2915 static void hclge_push_link_status(struct hclge_dev *hdev)
2916 {
2917 	struct hclge_vport *vport;
2918 	int ret;
2919 	u16 i;
2920 
2921 	for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2922 		vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2923 
2924 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2925 		    vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2926 			continue;
2927 
2928 		ret = hclge_push_vf_link_status(vport);
2929 		if (ret) {
2930 			dev_err(&hdev->pdev->dev,
2931 				"failed to push link status to vf%u, ret = %d\n",
2932 				i, ret);
2933 		}
2934 	}
2935 }
2936 
2937 static void hclge_update_link_status(struct hclge_dev *hdev)
2938 {
2939 	struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2940 	struct hnae3_handle *handle = &hdev->vport[0].nic;
2941 	struct hnae3_client *rclient = hdev->roce_client;
2942 	struct hnae3_client *client = hdev->nic_client;
2943 	int state;
2944 	int ret;
2945 
2946 	if (!client)
2947 		return;
2948 
2949 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2950 		return;
2951 
2952 	ret = hclge_get_mac_phy_link(hdev, &state);
2953 	if (ret) {
2954 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2955 		return;
2956 	}
2957 
2958 	if (state != hdev->hw.mac.link) {
2959 		hdev->hw.mac.link = state;
2960 		client->ops->link_status_change(handle, state);
2961 		hclge_config_mac_tnl_int(hdev, state);
2962 		if (rclient && rclient->ops->link_status_change)
2963 			rclient->ops->link_status_change(rhandle, state);
2964 
2965 		hclge_push_link_status(hdev);
2966 	}
2967 
2968 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2969 }
2970 
2971 static void hclge_update_port_capability(struct hclge_dev *hdev,
2972 					 struct hclge_mac *mac)
2973 {
2974 	if (hnae3_dev_fec_supported(hdev))
2975 		/* update fec ability by speed */
2976 		hclge_convert_setting_fec(mac);
2977 
2978 	/* firmware can not identify back plane type, the media type
2979 	 * read from configuration can help deal it
2980 	 */
2981 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2982 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2983 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2984 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2985 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2986 
2987 	if (mac->support_autoneg) {
2988 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2989 		linkmode_copy(mac->advertising, mac->supported);
2990 	} else {
2991 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2992 				   mac->supported);
2993 		linkmode_zero(mac->advertising);
2994 	}
2995 }
2996 
2997 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2998 {
2999 	struct hclge_sfp_info_cmd *resp;
3000 	struct hclge_desc desc;
3001 	int ret;
3002 
3003 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3004 	resp = (struct hclge_sfp_info_cmd *)desc.data;
3005 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3006 	if (ret == -EOPNOTSUPP) {
3007 		dev_warn(&hdev->pdev->dev,
3008 			 "IMP do not support get SFP speed %d\n", ret);
3009 		return ret;
3010 	} else if (ret) {
3011 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3012 		return ret;
3013 	}
3014 
3015 	*speed = le32_to_cpu(resp->speed);
3016 
3017 	return 0;
3018 }
3019 
3020 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3021 {
3022 	struct hclge_sfp_info_cmd *resp;
3023 	struct hclge_desc desc;
3024 	int ret;
3025 
3026 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3027 	resp = (struct hclge_sfp_info_cmd *)desc.data;
3028 
3029 	resp->query_type = QUERY_ACTIVE_SPEED;
3030 
3031 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3032 	if (ret == -EOPNOTSUPP) {
3033 		dev_warn(&hdev->pdev->dev,
3034 			 "IMP does not support get SFP info %d\n", ret);
3035 		return ret;
3036 	} else if (ret) {
3037 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3038 		return ret;
3039 	}
3040 
3041 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
3042 	 * set to mac->speed.
3043 	 */
3044 	if (!le32_to_cpu(resp->speed))
3045 		return 0;
3046 
3047 	mac->speed = le32_to_cpu(resp->speed);
3048 	/* if resp->speed_ability is 0, it means it's an old version
3049 	 * firmware, do not update these params
3050 	 */
3051 	if (resp->speed_ability) {
3052 		mac->module_type = le32_to_cpu(resp->module_type);
3053 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
3054 		mac->autoneg = resp->autoneg;
3055 		mac->support_autoneg = resp->autoneg_ability;
3056 		mac->speed_type = QUERY_ACTIVE_SPEED;
3057 		if (!resp->active_fec)
3058 			mac->fec_mode = 0;
3059 		else
3060 			mac->fec_mode = BIT(resp->active_fec);
3061 	} else {
3062 		mac->speed_type = QUERY_SFP_SPEED;
3063 	}
3064 
3065 	return 0;
3066 }
3067 
3068 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3069 					struct ethtool_link_ksettings *cmd)
3070 {
3071 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3072 	struct hclge_vport *vport = hclge_get_vport(handle);
3073 	struct hclge_phy_link_ksetting_0_cmd *req0;
3074 	struct hclge_phy_link_ksetting_1_cmd *req1;
3075 	u32 supported, advertising, lp_advertising;
3076 	struct hclge_dev *hdev = vport->back;
3077 	int ret;
3078 
3079 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3080 				   true);
3081 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3082 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3083 				   true);
3084 
3085 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3086 	if (ret) {
3087 		dev_err(&hdev->pdev->dev,
3088 			"failed to get phy link ksetting, ret = %d.\n", ret);
3089 		return ret;
3090 	}
3091 
3092 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3093 	cmd->base.autoneg = req0->autoneg;
3094 	cmd->base.speed = le32_to_cpu(req0->speed);
3095 	cmd->base.duplex = req0->duplex;
3096 	cmd->base.port = req0->port;
3097 	cmd->base.transceiver = req0->transceiver;
3098 	cmd->base.phy_address = req0->phy_address;
3099 	cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3100 	cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3101 	supported = le32_to_cpu(req0->supported);
3102 	advertising = le32_to_cpu(req0->advertising);
3103 	lp_advertising = le32_to_cpu(req0->lp_advertising);
3104 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3105 						supported);
3106 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3107 						advertising);
3108 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3109 						lp_advertising);
3110 
3111 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3112 	cmd->base.master_slave_cfg = req1->master_slave_cfg;
3113 	cmd->base.master_slave_state = req1->master_slave_state;
3114 
3115 	return 0;
3116 }
3117 
3118 static int
3119 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3120 			     const struct ethtool_link_ksettings *cmd)
3121 {
3122 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3123 	struct hclge_vport *vport = hclge_get_vport(handle);
3124 	struct hclge_phy_link_ksetting_0_cmd *req0;
3125 	struct hclge_phy_link_ksetting_1_cmd *req1;
3126 	struct hclge_dev *hdev = vport->back;
3127 	u32 advertising;
3128 	int ret;
3129 
3130 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
3131 	    ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3132 	     (cmd->base.duplex != DUPLEX_HALF &&
3133 	      cmd->base.duplex != DUPLEX_FULL)))
3134 		return -EINVAL;
3135 
3136 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3137 				   false);
3138 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3139 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3140 				   false);
3141 
3142 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3143 	req0->autoneg = cmd->base.autoneg;
3144 	req0->speed = cpu_to_le32(cmd->base.speed);
3145 	req0->duplex = cmd->base.duplex;
3146 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
3147 						cmd->link_modes.advertising);
3148 	req0->advertising = cpu_to_le32(advertising);
3149 	req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3150 
3151 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3152 	req1->master_slave_cfg = cmd->base.master_slave_cfg;
3153 
3154 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3155 	if (ret) {
3156 		dev_err(&hdev->pdev->dev,
3157 			"failed to set phy link ksettings, ret = %d.\n", ret);
3158 		return ret;
3159 	}
3160 
3161 	hdev->hw.mac.autoneg = cmd->base.autoneg;
3162 	hdev->hw.mac.speed = cmd->base.speed;
3163 	hdev->hw.mac.duplex = cmd->base.duplex;
3164 	linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3165 
3166 	return 0;
3167 }
3168 
3169 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3170 {
3171 	struct ethtool_link_ksettings cmd;
3172 	int ret;
3173 
3174 	if (!hnae3_dev_phy_imp_supported(hdev))
3175 		return 0;
3176 
3177 	ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3178 	if (ret)
3179 		return ret;
3180 
3181 	hdev->hw.mac.autoneg = cmd.base.autoneg;
3182 	hdev->hw.mac.speed = cmd.base.speed;
3183 	hdev->hw.mac.duplex = cmd.base.duplex;
3184 
3185 	return 0;
3186 }
3187 
3188 static int hclge_tp_port_init(struct hclge_dev *hdev)
3189 {
3190 	struct ethtool_link_ksettings cmd;
3191 
3192 	if (!hnae3_dev_phy_imp_supported(hdev))
3193 		return 0;
3194 
3195 	cmd.base.autoneg = hdev->hw.mac.autoneg;
3196 	cmd.base.speed = hdev->hw.mac.speed;
3197 	cmd.base.duplex = hdev->hw.mac.duplex;
3198 	linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3199 
3200 	return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3201 }
3202 
3203 static int hclge_update_port_info(struct hclge_dev *hdev)
3204 {
3205 	struct hclge_mac *mac = &hdev->hw.mac;
3206 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
3207 	int ret;
3208 
3209 	/* get the port info from SFP cmd if not copper port */
3210 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3211 		return hclge_update_tp_port_info(hdev);
3212 
3213 	/* if IMP does not support get SFP/qSFP info, return directly */
3214 	if (!hdev->support_sfp_query)
3215 		return 0;
3216 
3217 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3218 		ret = hclge_get_sfp_info(hdev, mac);
3219 	else
3220 		ret = hclge_get_sfp_speed(hdev, &speed);
3221 
3222 	if (ret == -EOPNOTSUPP) {
3223 		hdev->support_sfp_query = false;
3224 		return ret;
3225 	} else if (ret) {
3226 		return ret;
3227 	}
3228 
3229 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3230 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3231 			hclge_update_port_capability(hdev, mac);
3232 			return 0;
3233 		}
3234 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3235 					       HCLGE_MAC_FULL);
3236 	} else {
3237 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3238 			return 0; /* do nothing if no SFP */
3239 
3240 		/* must config full duplex for SFP */
3241 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3242 	}
3243 }
3244 
3245 static int hclge_get_status(struct hnae3_handle *handle)
3246 {
3247 	struct hclge_vport *vport = hclge_get_vport(handle);
3248 	struct hclge_dev *hdev = vport->back;
3249 
3250 	hclge_update_link_status(hdev);
3251 
3252 	return hdev->hw.mac.link;
3253 }
3254 
3255 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3256 {
3257 	if (!pci_num_vf(hdev->pdev)) {
3258 		dev_err(&hdev->pdev->dev,
3259 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
3260 		return NULL;
3261 	}
3262 
3263 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3264 		dev_err(&hdev->pdev->dev,
3265 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
3266 			vf, pci_num_vf(hdev->pdev));
3267 		return NULL;
3268 	}
3269 
3270 	/* VF start from 1 in vport */
3271 	vf += HCLGE_VF_VPORT_START_NUM;
3272 	return &hdev->vport[vf];
3273 }
3274 
3275 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3276 			       struct ifla_vf_info *ivf)
3277 {
3278 	struct hclge_vport *vport = hclge_get_vport(handle);
3279 	struct hclge_dev *hdev = vport->back;
3280 
3281 	vport = hclge_get_vf_vport(hdev, vf);
3282 	if (!vport)
3283 		return -EINVAL;
3284 
3285 	ivf->vf = vf;
3286 	ivf->linkstate = vport->vf_info.link_state;
3287 	ivf->spoofchk = vport->vf_info.spoofchk;
3288 	ivf->trusted = vport->vf_info.trusted;
3289 	ivf->min_tx_rate = 0;
3290 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3291 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3292 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3293 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3294 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
3295 
3296 	return 0;
3297 }
3298 
3299 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3300 				   int link_state)
3301 {
3302 	struct hclge_vport *vport = hclge_get_vport(handle);
3303 	struct hclge_dev *hdev = vport->back;
3304 	int link_state_old;
3305 	int ret;
3306 
3307 	vport = hclge_get_vf_vport(hdev, vf);
3308 	if (!vport)
3309 		return -EINVAL;
3310 
3311 	link_state_old = vport->vf_info.link_state;
3312 	vport->vf_info.link_state = link_state;
3313 
3314 	ret = hclge_push_vf_link_status(vport);
3315 	if (ret) {
3316 		vport->vf_info.link_state = link_state_old;
3317 		dev_err(&hdev->pdev->dev,
3318 			"failed to push vf%d link status, ret = %d\n", vf, ret);
3319 	}
3320 
3321 	return ret;
3322 }
3323 
3324 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3325 {
3326 	u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3327 
3328 	/* fetch the events from their corresponding regs */
3329 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3330 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3331 	hw_err_src_reg = hclge_read_dev(&hdev->hw,
3332 					HCLGE_RAS_PF_OTHER_INT_STS_REG);
3333 
3334 	/* Assumption: If by any chance reset and mailbox events are reported
3335 	 * together then we will only process reset event in this go and will
3336 	 * defer the processing of the mailbox events. Since, we would have not
3337 	 * cleared RX CMDQ event this time we would receive again another
3338 	 * interrupt from H/W just for the mailbox.
3339 	 *
3340 	 * check for vector0 reset event sources
3341 	 */
3342 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3343 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3344 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3345 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3346 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3347 		hdev->rst_stats.imp_rst_cnt++;
3348 		return HCLGE_VECTOR0_EVENT_RST;
3349 	}
3350 
3351 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3352 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3353 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3354 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3355 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3356 		hdev->rst_stats.global_rst_cnt++;
3357 		return HCLGE_VECTOR0_EVENT_RST;
3358 	}
3359 
3360 	/* check for vector0 msix event and hardware error event source */
3361 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3362 	    hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3363 		return HCLGE_VECTOR0_EVENT_ERR;
3364 
3365 	/* check for vector0 ptp event source */
3366 	if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3367 		*clearval = msix_src_reg;
3368 		return HCLGE_VECTOR0_EVENT_PTP;
3369 	}
3370 
3371 	/* check for vector0 mailbox(=CMDQ RX) event source */
3372 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3373 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3374 		*clearval = cmdq_src_reg;
3375 		return HCLGE_VECTOR0_EVENT_MBX;
3376 	}
3377 
3378 	/* print other vector0 event source */
3379 	dev_info(&hdev->pdev->dev,
3380 		 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3381 		 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3382 
3383 	return HCLGE_VECTOR0_EVENT_OTHER;
3384 }
3385 
3386 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3387 				    u32 regclr)
3388 {
3389 	switch (event_type) {
3390 	case HCLGE_VECTOR0_EVENT_PTP:
3391 	case HCLGE_VECTOR0_EVENT_RST:
3392 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3393 		break;
3394 	case HCLGE_VECTOR0_EVENT_MBX:
3395 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3396 		break;
3397 	default:
3398 		break;
3399 	}
3400 }
3401 
3402 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3403 {
3404 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3405 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3406 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3407 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3408 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3409 }
3410 
3411 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3412 {
3413 	writel(enable ? 1 : 0, vector->addr);
3414 }
3415 
3416 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3417 {
3418 	struct hclge_dev *hdev = data;
3419 	unsigned long flags;
3420 	u32 clearval = 0;
3421 	u32 event_cause;
3422 
3423 	hclge_enable_vector(&hdev->misc_vector, false);
3424 	event_cause = hclge_check_event_cause(hdev, &clearval);
3425 
3426 	/* vector 0 interrupt is shared with reset and mailbox source events. */
3427 	switch (event_cause) {
3428 	case HCLGE_VECTOR0_EVENT_ERR:
3429 		hclge_errhand_task_schedule(hdev);
3430 		break;
3431 	case HCLGE_VECTOR0_EVENT_RST:
3432 		hclge_reset_task_schedule(hdev);
3433 		break;
3434 	case HCLGE_VECTOR0_EVENT_PTP:
3435 		spin_lock_irqsave(&hdev->ptp->lock, flags);
3436 		hclge_ptp_clean_tx_hwts(hdev);
3437 		spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3438 		break;
3439 	case HCLGE_VECTOR0_EVENT_MBX:
3440 		/* If we are here then,
3441 		 * 1. Either we are not handling any mbx task and we are not
3442 		 *    scheduled as well
3443 		 *                        OR
3444 		 * 2. We could be handling a mbx task but nothing more is
3445 		 *    scheduled.
3446 		 * In both cases, we should schedule mbx task as there are more
3447 		 * mbx messages reported by this interrupt.
3448 		 */
3449 		hclge_mbx_task_schedule(hdev);
3450 		break;
3451 	default:
3452 		dev_warn(&hdev->pdev->dev,
3453 			 "received unknown or unhandled event of vector0\n");
3454 		break;
3455 	}
3456 
3457 	hclge_clear_event_cause(hdev, event_cause, clearval);
3458 
3459 	/* Enable interrupt if it is not caused by reset event or error event */
3460 	if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3461 	    event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3462 	    event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3463 		hclge_enable_vector(&hdev->misc_vector, true);
3464 
3465 	return IRQ_HANDLED;
3466 }
3467 
3468 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3469 {
3470 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3471 		dev_warn(&hdev->pdev->dev,
3472 			 "vector(vector_id %d) has been freed.\n", vector_id);
3473 		return;
3474 	}
3475 
3476 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3477 	hdev->num_msi_left += 1;
3478 	hdev->num_msi_used -= 1;
3479 }
3480 
3481 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3482 {
3483 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3484 
3485 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3486 
3487 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3488 	hdev->vector_status[0] = 0;
3489 
3490 	hdev->num_msi_left -= 1;
3491 	hdev->num_msi_used += 1;
3492 }
3493 
3494 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3495 				      const cpumask_t *mask)
3496 {
3497 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3498 					      affinity_notify);
3499 
3500 	cpumask_copy(&hdev->affinity_mask, mask);
3501 }
3502 
3503 static void hclge_irq_affinity_release(struct kref *ref)
3504 {
3505 }
3506 
3507 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3508 {
3509 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3510 			      &hdev->affinity_mask);
3511 
3512 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3513 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3514 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3515 				  &hdev->affinity_notify);
3516 }
3517 
3518 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3519 {
3520 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3521 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3522 }
3523 
3524 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3525 {
3526 	int ret;
3527 
3528 	hclge_get_misc_vector(hdev);
3529 
3530 	/* this would be explicitly freed in the end */
3531 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3532 		 HCLGE_NAME, pci_name(hdev->pdev));
3533 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3534 			  0, hdev->misc_vector.name, hdev);
3535 	if (ret) {
3536 		hclge_free_vector(hdev, 0);
3537 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3538 			hdev->misc_vector.vector_irq);
3539 	}
3540 
3541 	return ret;
3542 }
3543 
3544 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3545 {
3546 	free_irq(hdev->misc_vector.vector_irq, hdev);
3547 	hclge_free_vector(hdev, 0);
3548 }
3549 
3550 int hclge_notify_client(struct hclge_dev *hdev,
3551 			enum hnae3_reset_notify_type type)
3552 {
3553 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3554 	struct hnae3_client *client = hdev->nic_client;
3555 	int ret;
3556 
3557 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3558 		return 0;
3559 
3560 	if (!client->ops->reset_notify)
3561 		return -EOPNOTSUPP;
3562 
3563 	ret = client->ops->reset_notify(handle, type);
3564 	if (ret)
3565 		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3566 			type, ret);
3567 
3568 	return ret;
3569 }
3570 
3571 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3572 				    enum hnae3_reset_notify_type type)
3573 {
3574 	struct hnae3_handle *handle = &hdev->vport[0].roce;
3575 	struct hnae3_client *client = hdev->roce_client;
3576 	int ret;
3577 
3578 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3579 		return 0;
3580 
3581 	if (!client->ops->reset_notify)
3582 		return -EOPNOTSUPP;
3583 
3584 	ret = client->ops->reset_notify(handle, type);
3585 	if (ret)
3586 		dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3587 			type, ret);
3588 
3589 	return ret;
3590 }
3591 
3592 static int hclge_reset_wait(struct hclge_dev *hdev)
3593 {
3594 #define HCLGE_RESET_WATI_MS	100
3595 #define HCLGE_RESET_WAIT_CNT	350
3596 
3597 	u32 val, reg, reg_bit;
3598 	u32 cnt = 0;
3599 
3600 	switch (hdev->reset_type) {
3601 	case HNAE3_IMP_RESET:
3602 		reg = HCLGE_GLOBAL_RESET_REG;
3603 		reg_bit = HCLGE_IMP_RESET_BIT;
3604 		break;
3605 	case HNAE3_GLOBAL_RESET:
3606 		reg = HCLGE_GLOBAL_RESET_REG;
3607 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3608 		break;
3609 	case HNAE3_FUNC_RESET:
3610 		reg = HCLGE_FUN_RST_ING;
3611 		reg_bit = HCLGE_FUN_RST_ING_B;
3612 		break;
3613 	default:
3614 		dev_err(&hdev->pdev->dev,
3615 			"Wait for unsupported reset type: %d\n",
3616 			hdev->reset_type);
3617 		return -EINVAL;
3618 	}
3619 
3620 	val = hclge_read_dev(&hdev->hw, reg);
3621 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3622 		msleep(HCLGE_RESET_WATI_MS);
3623 		val = hclge_read_dev(&hdev->hw, reg);
3624 		cnt++;
3625 	}
3626 
3627 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3628 		dev_warn(&hdev->pdev->dev,
3629 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3630 		return -EBUSY;
3631 	}
3632 
3633 	return 0;
3634 }
3635 
3636 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3637 {
3638 	struct hclge_vf_rst_cmd *req;
3639 	struct hclge_desc desc;
3640 
3641 	req = (struct hclge_vf_rst_cmd *)desc.data;
3642 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3643 	req->dest_vfid = func_id;
3644 
3645 	if (reset)
3646 		req->vf_rst = 0x1;
3647 
3648 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3649 }
3650 
3651 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3652 {
3653 	int i;
3654 
3655 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3656 		struct hclge_vport *vport = &hdev->vport[i];
3657 		int ret;
3658 
3659 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3660 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3661 		if (ret) {
3662 			dev_err(&hdev->pdev->dev,
3663 				"set vf(%u) rst failed %d!\n",
3664 				vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3665 				ret);
3666 			return ret;
3667 		}
3668 
3669 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3670 			continue;
3671 
3672 		/* Inform VF to process the reset.
3673 		 * hclge_inform_reset_assert_to_vf may fail if VF
3674 		 * driver is not loaded.
3675 		 */
3676 		ret = hclge_inform_reset_assert_to_vf(vport);
3677 		if (ret)
3678 			dev_warn(&hdev->pdev->dev,
3679 				 "inform reset to vf(%u) failed %d!\n",
3680 				 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3681 				 ret);
3682 	}
3683 
3684 	return 0;
3685 }
3686 
3687 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3688 {
3689 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3690 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3691 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3692 		return;
3693 
3694 	hclge_mbx_handler(hdev);
3695 
3696 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3697 }
3698 
3699 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3700 {
3701 	struct hclge_pf_rst_sync_cmd *req;
3702 	struct hclge_desc desc;
3703 	int cnt = 0;
3704 	int ret;
3705 
3706 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3707 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3708 
3709 	do {
3710 		/* vf need to down netdev by mbx during PF or FLR reset */
3711 		hclge_mailbox_service_task(hdev);
3712 
3713 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3714 		/* for compatible with old firmware, wait
3715 		 * 100 ms for VF to stop IO
3716 		 */
3717 		if (ret == -EOPNOTSUPP) {
3718 			msleep(HCLGE_RESET_SYNC_TIME);
3719 			return;
3720 		} else if (ret) {
3721 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3722 				 ret);
3723 			return;
3724 		} else if (req->all_vf_ready) {
3725 			return;
3726 		}
3727 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3728 		hclge_cmd_reuse_desc(&desc, true);
3729 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3730 
3731 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3732 }
3733 
3734 void hclge_report_hw_error(struct hclge_dev *hdev,
3735 			   enum hnae3_hw_error_type type)
3736 {
3737 	struct hnae3_client *client = hdev->nic_client;
3738 
3739 	if (!client || !client->ops->process_hw_error ||
3740 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3741 		return;
3742 
3743 	client->ops->process_hw_error(&hdev->vport[0].nic, type);
3744 }
3745 
3746 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3747 {
3748 	u32 reg_val;
3749 
3750 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3751 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3752 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3753 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3754 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3755 	}
3756 
3757 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3758 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3759 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3760 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3761 	}
3762 }
3763 
3764 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3765 {
3766 	struct hclge_desc desc;
3767 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3768 	int ret;
3769 
3770 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3771 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3772 	req->fun_reset_vfid = func_id;
3773 
3774 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3775 	if (ret)
3776 		dev_err(&hdev->pdev->dev,
3777 			"send function reset cmd fail, status =%d\n", ret);
3778 
3779 	return ret;
3780 }
3781 
3782 static void hclge_do_reset(struct hclge_dev *hdev)
3783 {
3784 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3785 	struct pci_dev *pdev = hdev->pdev;
3786 	u32 val;
3787 
3788 	if (hclge_get_hw_reset_stat(handle)) {
3789 		dev_info(&pdev->dev, "hardware reset not finish\n");
3790 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3791 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3792 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3793 		return;
3794 	}
3795 
3796 	switch (hdev->reset_type) {
3797 	case HNAE3_IMP_RESET:
3798 		dev_info(&pdev->dev, "IMP reset requested\n");
3799 		val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3800 		hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
3801 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
3802 		break;
3803 	case HNAE3_GLOBAL_RESET:
3804 		dev_info(&pdev->dev, "global reset requested\n");
3805 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3806 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3807 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3808 		break;
3809 	case HNAE3_FUNC_RESET:
3810 		dev_info(&pdev->dev, "PF reset requested\n");
3811 		/* schedule again to check later */
3812 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3813 		hclge_reset_task_schedule(hdev);
3814 		break;
3815 	default:
3816 		dev_warn(&pdev->dev,
3817 			 "unsupported reset type: %d\n", hdev->reset_type);
3818 		break;
3819 	}
3820 }
3821 
3822 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3823 						   unsigned long *addr)
3824 {
3825 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3826 	struct hclge_dev *hdev = ae_dev->priv;
3827 
3828 	/* return the highest priority reset level amongst all */
3829 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3830 		rst_level = HNAE3_IMP_RESET;
3831 		clear_bit(HNAE3_IMP_RESET, addr);
3832 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3833 		clear_bit(HNAE3_FUNC_RESET, addr);
3834 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3835 		rst_level = HNAE3_GLOBAL_RESET;
3836 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3837 		clear_bit(HNAE3_FUNC_RESET, addr);
3838 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3839 		rst_level = HNAE3_FUNC_RESET;
3840 		clear_bit(HNAE3_FUNC_RESET, addr);
3841 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3842 		rst_level = HNAE3_FLR_RESET;
3843 		clear_bit(HNAE3_FLR_RESET, addr);
3844 	}
3845 
3846 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3847 	    rst_level < hdev->reset_type)
3848 		return HNAE3_NONE_RESET;
3849 
3850 	return rst_level;
3851 }
3852 
3853 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3854 {
3855 	u32 clearval = 0;
3856 
3857 	switch (hdev->reset_type) {
3858 	case HNAE3_IMP_RESET:
3859 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3860 		break;
3861 	case HNAE3_GLOBAL_RESET:
3862 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3863 		break;
3864 	default:
3865 		break;
3866 	}
3867 
3868 	if (!clearval)
3869 		return;
3870 
3871 	/* For revision 0x20, the reset interrupt source
3872 	 * can only be cleared after hardware reset done
3873 	 */
3874 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3875 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3876 				clearval);
3877 
3878 	hclge_enable_vector(&hdev->misc_vector, true);
3879 }
3880 
3881 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3882 {
3883 	u32 reg_val;
3884 
3885 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3886 	if (enable)
3887 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3888 	else
3889 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3890 
3891 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3892 }
3893 
3894 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3895 {
3896 	int ret;
3897 
3898 	ret = hclge_set_all_vf_rst(hdev, true);
3899 	if (ret)
3900 		return ret;
3901 
3902 	hclge_func_reset_sync_vf(hdev);
3903 
3904 	return 0;
3905 }
3906 
3907 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3908 {
3909 	u32 reg_val;
3910 	int ret = 0;
3911 
3912 	switch (hdev->reset_type) {
3913 	case HNAE3_FUNC_RESET:
3914 		ret = hclge_func_reset_notify_vf(hdev);
3915 		if (ret)
3916 			return ret;
3917 
3918 		ret = hclge_func_reset_cmd(hdev, 0);
3919 		if (ret) {
3920 			dev_err(&hdev->pdev->dev,
3921 				"asserting function reset fail %d!\n", ret);
3922 			return ret;
3923 		}
3924 
3925 		/* After performaning pf reset, it is not necessary to do the
3926 		 * mailbox handling or send any command to firmware, because
3927 		 * any mailbox handling or command to firmware is only valid
3928 		 * after hclge_cmd_init is called.
3929 		 */
3930 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3931 		hdev->rst_stats.pf_rst_cnt++;
3932 		break;
3933 	case HNAE3_FLR_RESET:
3934 		ret = hclge_func_reset_notify_vf(hdev);
3935 		if (ret)
3936 			return ret;
3937 		break;
3938 	case HNAE3_IMP_RESET:
3939 		hclge_handle_imp_error(hdev);
3940 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3941 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3942 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3943 		break;
3944 	default:
3945 		break;
3946 	}
3947 
3948 	/* inform hardware that preparatory work is done */
3949 	msleep(HCLGE_RESET_SYNC_TIME);
3950 	hclge_reset_handshake(hdev, true);
3951 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3952 
3953 	return ret;
3954 }
3955 
3956 static void hclge_show_rst_info(struct hclge_dev *hdev)
3957 {
3958 	char *buf;
3959 
3960 	buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
3961 	if (!buf)
3962 		return;
3963 
3964 	hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
3965 
3966 	dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
3967 
3968 	kfree(buf);
3969 }
3970 
3971 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3972 {
3973 #define MAX_RESET_FAIL_CNT 5
3974 
3975 	if (hdev->reset_pending) {
3976 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3977 			 hdev->reset_pending);
3978 		return true;
3979 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3980 		   HCLGE_RESET_INT_M) {
3981 		dev_info(&hdev->pdev->dev,
3982 			 "reset failed because new reset interrupt\n");
3983 		hclge_clear_reset_cause(hdev);
3984 		return false;
3985 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3986 		hdev->rst_stats.reset_fail_cnt++;
3987 		set_bit(hdev->reset_type, &hdev->reset_pending);
3988 		dev_info(&hdev->pdev->dev,
3989 			 "re-schedule reset task(%u)\n",
3990 			 hdev->rst_stats.reset_fail_cnt);
3991 		return true;
3992 	}
3993 
3994 	hclge_clear_reset_cause(hdev);
3995 
3996 	/* recover the handshake status when reset fail */
3997 	hclge_reset_handshake(hdev, true);
3998 
3999 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
4000 
4001 	hclge_show_rst_info(hdev);
4002 
4003 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4004 
4005 	return false;
4006 }
4007 
4008 static void hclge_update_reset_level(struct hclge_dev *hdev)
4009 {
4010 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4011 	enum hnae3_reset_type reset_level;
4012 
4013 	/* reset request will not be set during reset, so clear
4014 	 * pending reset request to avoid unnecessary reset
4015 	 * caused by the same reason.
4016 	 */
4017 	hclge_get_reset_level(ae_dev, &hdev->reset_request);
4018 
4019 	/* if default_reset_request has a higher level reset request,
4020 	 * it should be handled as soon as possible. since some errors
4021 	 * need this kind of reset to fix.
4022 	 */
4023 	reset_level = hclge_get_reset_level(ae_dev,
4024 					    &hdev->default_reset_request);
4025 	if (reset_level != HNAE3_NONE_RESET)
4026 		set_bit(reset_level, &hdev->reset_request);
4027 }
4028 
4029 static int hclge_set_rst_done(struct hclge_dev *hdev)
4030 {
4031 	struct hclge_pf_rst_done_cmd *req;
4032 	struct hclge_desc desc;
4033 	int ret;
4034 
4035 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
4036 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4037 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4038 
4039 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4040 	/* To be compatible with the old firmware, which does not support
4041 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4042 	 * return success
4043 	 */
4044 	if (ret == -EOPNOTSUPP) {
4045 		dev_warn(&hdev->pdev->dev,
4046 			 "current firmware does not support command(0x%x)!\n",
4047 			 HCLGE_OPC_PF_RST_DONE);
4048 		return 0;
4049 	} else if (ret) {
4050 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4051 			ret);
4052 	}
4053 
4054 	return ret;
4055 }
4056 
4057 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4058 {
4059 	int ret = 0;
4060 
4061 	switch (hdev->reset_type) {
4062 	case HNAE3_FUNC_RESET:
4063 	case HNAE3_FLR_RESET:
4064 		ret = hclge_set_all_vf_rst(hdev, false);
4065 		break;
4066 	case HNAE3_GLOBAL_RESET:
4067 	case HNAE3_IMP_RESET:
4068 		ret = hclge_set_rst_done(hdev);
4069 		break;
4070 	default:
4071 		break;
4072 	}
4073 
4074 	/* clear up the handshake status after re-initialize done */
4075 	hclge_reset_handshake(hdev, false);
4076 
4077 	return ret;
4078 }
4079 
4080 static int hclge_reset_stack(struct hclge_dev *hdev)
4081 {
4082 	int ret;
4083 
4084 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4085 	if (ret)
4086 		return ret;
4087 
4088 	ret = hclge_reset_ae_dev(hdev->ae_dev);
4089 	if (ret)
4090 		return ret;
4091 
4092 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4093 }
4094 
4095 static int hclge_reset_prepare(struct hclge_dev *hdev)
4096 {
4097 	int ret;
4098 
4099 	hdev->rst_stats.reset_cnt++;
4100 	/* perform reset of the stack & ae device for a client */
4101 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4102 	if (ret)
4103 		return ret;
4104 
4105 	rtnl_lock();
4106 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4107 	rtnl_unlock();
4108 	if (ret)
4109 		return ret;
4110 
4111 	return hclge_reset_prepare_wait(hdev);
4112 }
4113 
4114 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4115 {
4116 	int ret;
4117 
4118 	hdev->rst_stats.hw_reset_done_cnt++;
4119 
4120 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4121 	if (ret)
4122 		return ret;
4123 
4124 	rtnl_lock();
4125 	ret = hclge_reset_stack(hdev);
4126 	rtnl_unlock();
4127 	if (ret)
4128 		return ret;
4129 
4130 	hclge_clear_reset_cause(hdev);
4131 
4132 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4133 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4134 	 * times
4135 	 */
4136 	if (ret &&
4137 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4138 		return ret;
4139 
4140 	ret = hclge_reset_prepare_up(hdev);
4141 	if (ret)
4142 		return ret;
4143 
4144 	rtnl_lock();
4145 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4146 	rtnl_unlock();
4147 	if (ret)
4148 		return ret;
4149 
4150 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4151 	if (ret)
4152 		return ret;
4153 
4154 	hdev->last_reset_time = jiffies;
4155 	hdev->rst_stats.reset_fail_cnt = 0;
4156 	hdev->rst_stats.reset_done_cnt++;
4157 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4158 
4159 	hclge_update_reset_level(hdev);
4160 
4161 	return 0;
4162 }
4163 
4164 static void hclge_reset(struct hclge_dev *hdev)
4165 {
4166 	if (hclge_reset_prepare(hdev))
4167 		goto err_reset;
4168 
4169 	if (hclge_reset_wait(hdev))
4170 		goto err_reset;
4171 
4172 	if (hclge_reset_rebuild(hdev))
4173 		goto err_reset;
4174 
4175 	return;
4176 
4177 err_reset:
4178 	if (hclge_reset_err_handle(hdev))
4179 		hclge_reset_task_schedule(hdev);
4180 }
4181 
4182 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4183 {
4184 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4185 	struct hclge_dev *hdev = ae_dev->priv;
4186 
4187 	/* We might end up getting called broadly because of 2 below cases:
4188 	 * 1. Recoverable error was conveyed through APEI and only way to bring
4189 	 *    normalcy is to reset.
4190 	 * 2. A new reset request from the stack due to timeout
4191 	 *
4192 	 * check if this is a new reset request and we are not here just because
4193 	 * last reset attempt did not succeed and watchdog hit us again. We will
4194 	 * know this if last reset request did not occur very recently (watchdog
4195 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4196 	 * In case of new request we reset the "reset level" to PF reset.
4197 	 * And if it is a repeat reset request of the most recent one then we
4198 	 * want to make sure we throttle the reset request. Therefore, we will
4199 	 * not allow it again before 3*HZ times.
4200 	 */
4201 
4202 	if (time_before(jiffies, (hdev->last_reset_time +
4203 				  HCLGE_RESET_INTERVAL))) {
4204 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4205 		return;
4206 	}
4207 
4208 	if (hdev->default_reset_request) {
4209 		hdev->reset_level =
4210 			hclge_get_reset_level(ae_dev,
4211 					      &hdev->default_reset_request);
4212 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4213 		hdev->reset_level = HNAE3_FUNC_RESET;
4214 	}
4215 
4216 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4217 		 hdev->reset_level);
4218 
4219 	/* request reset & schedule reset task */
4220 	set_bit(hdev->reset_level, &hdev->reset_request);
4221 	hclge_reset_task_schedule(hdev);
4222 
4223 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4224 		hdev->reset_level++;
4225 }
4226 
4227 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4228 					enum hnae3_reset_type rst_type)
4229 {
4230 	struct hclge_dev *hdev = ae_dev->priv;
4231 
4232 	set_bit(rst_type, &hdev->default_reset_request);
4233 }
4234 
4235 static void hclge_reset_timer(struct timer_list *t)
4236 {
4237 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4238 
4239 	/* if default_reset_request has no value, it means that this reset
4240 	 * request has already be handled, so just return here
4241 	 */
4242 	if (!hdev->default_reset_request)
4243 		return;
4244 
4245 	dev_info(&hdev->pdev->dev,
4246 		 "triggering reset in reset timer\n");
4247 	hclge_reset_event(hdev->pdev, NULL);
4248 }
4249 
4250 static void hclge_reset_subtask(struct hclge_dev *hdev)
4251 {
4252 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4253 
4254 	/* check if there is any ongoing reset in the hardware. This status can
4255 	 * be checked from reset_pending. If there is then, we need to wait for
4256 	 * hardware to complete reset.
4257 	 *    a. If we are able to figure out in reasonable time that hardware
4258 	 *       has fully resetted then, we can proceed with driver, client
4259 	 *       reset.
4260 	 *    b. else, we can come back later to check this status so re-sched
4261 	 *       now.
4262 	 */
4263 	hdev->last_reset_time = jiffies;
4264 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4265 	if (hdev->reset_type != HNAE3_NONE_RESET)
4266 		hclge_reset(hdev);
4267 
4268 	/* check if we got any *new* reset requests to be honored */
4269 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4270 	if (hdev->reset_type != HNAE3_NONE_RESET)
4271 		hclge_do_reset(hdev);
4272 
4273 	hdev->reset_type = HNAE3_NONE_RESET;
4274 }
4275 
4276 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4277 {
4278 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4279 	enum hnae3_reset_type reset_type;
4280 
4281 	if (ae_dev->hw_err_reset_req) {
4282 		reset_type = hclge_get_reset_level(ae_dev,
4283 						   &ae_dev->hw_err_reset_req);
4284 		hclge_set_def_reset_request(ae_dev, reset_type);
4285 	}
4286 
4287 	if (hdev->default_reset_request && ae_dev->ops->reset_event)
4288 		ae_dev->ops->reset_event(hdev->pdev, NULL);
4289 
4290 	/* enable interrupt after error handling complete */
4291 	hclge_enable_vector(&hdev->misc_vector, true);
4292 }
4293 
4294 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4295 {
4296 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4297 
4298 	ae_dev->hw_err_reset_req = 0;
4299 
4300 	if (hclge_find_error_source(hdev)) {
4301 		hclge_handle_error_info_log(ae_dev);
4302 		hclge_handle_mac_tnl(hdev);
4303 	}
4304 
4305 	hclge_handle_err_reset_request(hdev);
4306 }
4307 
4308 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4309 {
4310 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4311 	struct device *dev = &hdev->pdev->dev;
4312 	u32 msix_sts_reg;
4313 
4314 	msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4315 	if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4316 		if (hclge_handle_hw_msix_error
4317 				(hdev, &hdev->default_reset_request))
4318 			dev_info(dev, "received msix interrupt 0x%x\n",
4319 				 msix_sts_reg);
4320 	}
4321 
4322 	hclge_handle_hw_ras_error(ae_dev);
4323 
4324 	hclge_handle_err_reset_request(hdev);
4325 }
4326 
4327 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4328 {
4329 	if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4330 		return;
4331 
4332 	if (hnae3_dev_ras_imp_supported(hdev))
4333 		hclge_handle_err_recovery(hdev);
4334 	else
4335 		hclge_misc_err_recovery(hdev);
4336 }
4337 
4338 static void hclge_reset_service_task(struct hclge_dev *hdev)
4339 {
4340 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4341 		return;
4342 
4343 	down(&hdev->reset_sem);
4344 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4345 
4346 	hclge_reset_subtask(hdev);
4347 
4348 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4349 	up(&hdev->reset_sem);
4350 }
4351 
4352 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4353 {
4354 	int i;
4355 
4356 	/* start from vport 1 for PF is always alive */
4357 	for (i = 1; i < hdev->num_alloc_vport; i++) {
4358 		struct hclge_vport *vport = &hdev->vport[i];
4359 
4360 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4361 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4362 
4363 		/* If vf is not alive, set to default value */
4364 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4365 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4366 	}
4367 }
4368 
4369 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4370 {
4371 	unsigned long delta = round_jiffies_relative(HZ);
4372 
4373 	if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4374 		return;
4375 
4376 	/* Always handle the link updating to make sure link state is
4377 	 * updated when it is triggered by mbx.
4378 	 */
4379 	hclge_update_link_status(hdev);
4380 	hclge_sync_mac_table(hdev);
4381 	hclge_sync_promisc_mode(hdev);
4382 	hclge_sync_fd_table(hdev);
4383 
4384 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4385 		delta = jiffies - hdev->last_serv_processed;
4386 
4387 		if (delta < round_jiffies_relative(HZ)) {
4388 			delta = round_jiffies_relative(HZ) - delta;
4389 			goto out;
4390 		}
4391 	}
4392 
4393 	hdev->serv_processed_cnt++;
4394 	hclge_update_vport_alive(hdev);
4395 
4396 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4397 		hdev->last_serv_processed = jiffies;
4398 		goto out;
4399 	}
4400 
4401 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4402 		hclge_update_stats_for_all(hdev);
4403 
4404 	hclge_update_port_info(hdev);
4405 	hclge_sync_vlan_filter(hdev);
4406 
4407 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4408 		hclge_rfs_filter_expire(hdev);
4409 
4410 	hdev->last_serv_processed = jiffies;
4411 
4412 out:
4413 	hclge_task_schedule(hdev, delta);
4414 }
4415 
4416 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4417 {
4418 	unsigned long flags;
4419 
4420 	if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4421 	    !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4422 	    !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4423 		return;
4424 
4425 	/* to prevent concurrence with the irq handler */
4426 	spin_lock_irqsave(&hdev->ptp->lock, flags);
4427 
4428 	/* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4429 	 * handler may handle it just before spin_lock_irqsave().
4430 	 */
4431 	if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4432 		hclge_ptp_clean_tx_hwts(hdev);
4433 
4434 	spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4435 }
4436 
4437 static void hclge_service_task(struct work_struct *work)
4438 {
4439 	struct hclge_dev *hdev =
4440 		container_of(work, struct hclge_dev, service_task.work);
4441 
4442 	hclge_errhand_service_task(hdev);
4443 	hclge_reset_service_task(hdev);
4444 	hclge_ptp_service_task(hdev);
4445 	hclge_mailbox_service_task(hdev);
4446 	hclge_periodic_service_task(hdev);
4447 
4448 	/* Handle error recovery, reset and mbx again in case periodical task
4449 	 * delays the handling by calling hclge_task_schedule() in
4450 	 * hclge_periodic_service_task().
4451 	 */
4452 	hclge_errhand_service_task(hdev);
4453 	hclge_reset_service_task(hdev);
4454 	hclge_mailbox_service_task(hdev);
4455 }
4456 
4457 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4458 {
4459 	/* VF handle has no client */
4460 	if (!handle->client)
4461 		return container_of(handle, struct hclge_vport, nic);
4462 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4463 		return container_of(handle, struct hclge_vport, roce);
4464 	else
4465 		return container_of(handle, struct hclge_vport, nic);
4466 }
4467 
4468 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4469 				  struct hnae3_vector_info *vector_info)
4470 {
4471 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2	64
4472 
4473 	vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4474 
4475 	/* need an extend offset to config vector >= 64 */
4476 	if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4477 		vector_info->io_addr = hdev->hw.io_base +
4478 				HCLGE_VECTOR_REG_BASE +
4479 				(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4480 	else
4481 		vector_info->io_addr = hdev->hw.io_base +
4482 				HCLGE_VECTOR_EXT_REG_BASE +
4483 				(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4484 				HCLGE_VECTOR_REG_OFFSET_H +
4485 				(idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4486 				HCLGE_VECTOR_REG_OFFSET;
4487 
4488 	hdev->vector_status[idx] = hdev->vport[0].vport_id;
4489 	hdev->vector_irq[idx] = vector_info->vector;
4490 }
4491 
4492 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4493 			    struct hnae3_vector_info *vector_info)
4494 {
4495 	struct hclge_vport *vport = hclge_get_vport(handle);
4496 	struct hnae3_vector_info *vector = vector_info;
4497 	struct hclge_dev *hdev = vport->back;
4498 	int alloc = 0;
4499 	u16 i = 0;
4500 	u16 j;
4501 
4502 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4503 	vector_num = min(hdev->num_msi_left, vector_num);
4504 
4505 	for (j = 0; j < vector_num; j++) {
4506 		while (++i < hdev->num_nic_msi) {
4507 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4508 				hclge_get_vector_info(hdev, i, vector);
4509 				vector++;
4510 				alloc++;
4511 
4512 				break;
4513 			}
4514 		}
4515 	}
4516 	hdev->num_msi_left -= alloc;
4517 	hdev->num_msi_used += alloc;
4518 
4519 	return alloc;
4520 }
4521 
4522 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4523 {
4524 	int i;
4525 
4526 	for (i = 0; i < hdev->num_msi; i++)
4527 		if (vector == hdev->vector_irq[i])
4528 			return i;
4529 
4530 	return -EINVAL;
4531 }
4532 
4533 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4534 {
4535 	struct hclge_vport *vport = hclge_get_vport(handle);
4536 	struct hclge_dev *hdev = vport->back;
4537 	int vector_id;
4538 
4539 	vector_id = hclge_get_vector_index(hdev, vector);
4540 	if (vector_id < 0) {
4541 		dev_err(&hdev->pdev->dev,
4542 			"Get vector index fail. vector = %d\n", vector);
4543 		return vector_id;
4544 	}
4545 
4546 	hclge_free_vector(hdev, vector_id);
4547 
4548 	return 0;
4549 }
4550 
4551 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4552 {
4553 	return HCLGE_RSS_KEY_SIZE;
4554 }
4555 
4556 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4557 				  const u8 hfunc, const u8 *key)
4558 {
4559 	struct hclge_rss_config_cmd *req;
4560 	unsigned int key_offset = 0;
4561 	struct hclge_desc desc;
4562 	int key_counts;
4563 	int key_size;
4564 	int ret;
4565 
4566 	key_counts = HCLGE_RSS_KEY_SIZE;
4567 	req = (struct hclge_rss_config_cmd *)desc.data;
4568 
4569 	while (key_counts) {
4570 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4571 					   false);
4572 
4573 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4574 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4575 
4576 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4577 		memcpy(req->hash_key,
4578 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4579 
4580 		key_counts -= key_size;
4581 		key_offset++;
4582 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4583 		if (ret) {
4584 			dev_err(&hdev->pdev->dev,
4585 				"Configure RSS config fail, status = %d\n",
4586 				ret);
4587 			return ret;
4588 		}
4589 	}
4590 	return 0;
4591 }
4592 
4593 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4594 {
4595 	struct hclge_rss_indirection_table_cmd *req;
4596 	struct hclge_desc desc;
4597 	int rss_cfg_tbl_num;
4598 	u8 rss_msb_oft;
4599 	u8 rss_msb_val;
4600 	int ret;
4601 	u16 qid;
4602 	int i;
4603 	u32 j;
4604 
4605 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4606 	rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4607 			  HCLGE_RSS_CFG_TBL_SIZE;
4608 
4609 	for (i = 0; i < rss_cfg_tbl_num; i++) {
4610 		hclge_cmd_setup_basic_desc
4611 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4612 
4613 		req->start_table_index =
4614 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4615 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4616 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4617 			qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4618 			req->rss_qid_l[j] = qid & 0xff;
4619 			rss_msb_oft =
4620 				j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4621 			rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4622 				(j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4623 			req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4624 		}
4625 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4626 		if (ret) {
4627 			dev_err(&hdev->pdev->dev,
4628 				"Configure rss indir table fail,status = %d\n",
4629 				ret);
4630 			return ret;
4631 		}
4632 	}
4633 	return 0;
4634 }
4635 
4636 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4637 				 u16 *tc_size, u16 *tc_offset)
4638 {
4639 	struct hclge_rss_tc_mode_cmd *req;
4640 	struct hclge_desc desc;
4641 	int ret;
4642 	int i;
4643 
4644 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4645 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4646 
4647 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4648 		u16 mode = 0;
4649 
4650 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4651 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4652 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4653 		hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4654 			      tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4655 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4656 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4657 
4658 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4659 	}
4660 
4661 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4662 	if (ret)
4663 		dev_err(&hdev->pdev->dev,
4664 			"Configure rss tc mode fail, status = %d\n", ret);
4665 
4666 	return ret;
4667 }
4668 
4669 static void hclge_get_rss_type(struct hclge_vport *vport)
4670 {
4671 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4672 	    vport->rss_tuple_sets.ipv4_udp_en ||
4673 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4674 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4675 	    vport->rss_tuple_sets.ipv6_udp_en ||
4676 	    vport->rss_tuple_sets.ipv6_sctp_en)
4677 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4678 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4679 		 vport->rss_tuple_sets.ipv6_fragment_en)
4680 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4681 	else
4682 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4683 }
4684 
4685 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4686 {
4687 	struct hclge_rss_input_tuple_cmd *req;
4688 	struct hclge_desc desc;
4689 	int ret;
4690 
4691 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4692 
4693 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4694 
4695 	/* Get the tuple cfg from pf */
4696 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4697 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4698 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4699 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4700 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4701 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4702 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4703 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4704 	hclge_get_rss_type(&hdev->vport[0]);
4705 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4706 	if (ret)
4707 		dev_err(&hdev->pdev->dev,
4708 			"Configure rss input fail, status = %d\n", ret);
4709 	return ret;
4710 }
4711 
4712 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4713 			 u8 *key, u8 *hfunc)
4714 {
4715 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4716 	struct hclge_vport *vport = hclge_get_vport(handle);
4717 	int i;
4718 
4719 	/* Get hash algorithm */
4720 	if (hfunc) {
4721 		switch (vport->rss_algo) {
4722 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4723 			*hfunc = ETH_RSS_HASH_TOP;
4724 			break;
4725 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4726 			*hfunc = ETH_RSS_HASH_XOR;
4727 			break;
4728 		default:
4729 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4730 			break;
4731 		}
4732 	}
4733 
4734 	/* Get the RSS Key required by the user */
4735 	if (key)
4736 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4737 
4738 	/* Get indirect table */
4739 	if (indir)
4740 		for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4741 			indir[i] =  vport->rss_indirection_tbl[i];
4742 
4743 	return 0;
4744 }
4745 
4746 static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
4747 				 u8 *hash_algo)
4748 {
4749 	switch (hfunc) {
4750 	case ETH_RSS_HASH_TOP:
4751 		*hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4752 		return 0;
4753 	case ETH_RSS_HASH_XOR:
4754 		*hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4755 		return 0;
4756 	case ETH_RSS_HASH_NO_CHANGE:
4757 		*hash_algo = vport->rss_algo;
4758 		return 0;
4759 	default:
4760 		return -EINVAL;
4761 	}
4762 }
4763 
4764 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4765 			 const  u8 *key, const  u8 hfunc)
4766 {
4767 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4768 	struct hclge_vport *vport = hclge_get_vport(handle);
4769 	struct hclge_dev *hdev = vport->back;
4770 	u8 hash_algo;
4771 	int ret, i;
4772 
4773 	ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
4774 	if (ret) {
4775 		dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4776 		return ret;
4777 	}
4778 
4779 	/* Set the RSS Hash Key if specififed by the user */
4780 	if (key) {
4781 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4782 		if (ret)
4783 			return ret;
4784 
4785 		/* Update the shadow RSS key with user specified qids */
4786 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4787 	} else {
4788 		ret = hclge_set_rss_algo_key(hdev, hash_algo,
4789 					     vport->rss_hash_key);
4790 		if (ret)
4791 			return ret;
4792 	}
4793 	vport->rss_algo = hash_algo;
4794 
4795 	/* Update the shadow RSS table with user specified qids */
4796 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4797 		vport->rss_indirection_tbl[i] = indir[i];
4798 
4799 	/* Update the hardware */
4800 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4801 }
4802 
4803 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4804 {
4805 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4806 
4807 	if (nfc->data & RXH_L4_B_2_3)
4808 		hash_sets |= HCLGE_D_PORT_BIT;
4809 	else
4810 		hash_sets &= ~HCLGE_D_PORT_BIT;
4811 
4812 	if (nfc->data & RXH_IP_SRC)
4813 		hash_sets |= HCLGE_S_IP_BIT;
4814 	else
4815 		hash_sets &= ~HCLGE_S_IP_BIT;
4816 
4817 	if (nfc->data & RXH_IP_DST)
4818 		hash_sets |= HCLGE_D_IP_BIT;
4819 	else
4820 		hash_sets &= ~HCLGE_D_IP_BIT;
4821 
4822 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4823 		hash_sets |= HCLGE_V_TAG_BIT;
4824 
4825 	return hash_sets;
4826 }
4827 
4828 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4829 				    struct ethtool_rxnfc *nfc,
4830 				    struct hclge_rss_input_tuple_cmd *req)
4831 {
4832 	struct hclge_dev *hdev = vport->back;
4833 	u8 tuple_sets;
4834 
4835 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4836 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4837 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4838 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4839 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4840 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4841 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4842 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4843 
4844 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4845 	switch (nfc->flow_type) {
4846 	case TCP_V4_FLOW:
4847 		req->ipv4_tcp_en = tuple_sets;
4848 		break;
4849 	case TCP_V6_FLOW:
4850 		req->ipv6_tcp_en = tuple_sets;
4851 		break;
4852 	case UDP_V4_FLOW:
4853 		req->ipv4_udp_en = tuple_sets;
4854 		break;
4855 	case UDP_V6_FLOW:
4856 		req->ipv6_udp_en = tuple_sets;
4857 		break;
4858 	case SCTP_V4_FLOW:
4859 		req->ipv4_sctp_en = tuple_sets;
4860 		break;
4861 	case SCTP_V6_FLOW:
4862 		if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4863 		    (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4864 			return -EINVAL;
4865 
4866 		req->ipv6_sctp_en = tuple_sets;
4867 		break;
4868 	case IPV4_FLOW:
4869 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4870 		break;
4871 	case IPV6_FLOW:
4872 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4873 		break;
4874 	default:
4875 		return -EINVAL;
4876 	}
4877 
4878 	return 0;
4879 }
4880 
4881 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4882 			       struct ethtool_rxnfc *nfc)
4883 {
4884 	struct hclge_vport *vport = hclge_get_vport(handle);
4885 	struct hclge_dev *hdev = vport->back;
4886 	struct hclge_rss_input_tuple_cmd *req;
4887 	struct hclge_desc desc;
4888 	int ret;
4889 
4890 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4891 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4892 		return -EINVAL;
4893 
4894 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4895 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4896 
4897 	ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4898 	if (ret) {
4899 		dev_err(&hdev->pdev->dev,
4900 			"failed to init rss tuple cmd, ret = %d\n", ret);
4901 		return ret;
4902 	}
4903 
4904 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4905 	if (ret) {
4906 		dev_err(&hdev->pdev->dev,
4907 			"Set rss tuple fail, status = %d\n", ret);
4908 		return ret;
4909 	}
4910 
4911 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4912 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4913 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4914 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4915 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4916 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4917 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4918 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4919 	hclge_get_rss_type(vport);
4920 	return 0;
4921 }
4922 
4923 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4924 				     u8 *tuple_sets)
4925 {
4926 	switch (flow_type) {
4927 	case TCP_V4_FLOW:
4928 		*tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4929 		break;
4930 	case UDP_V4_FLOW:
4931 		*tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4932 		break;
4933 	case TCP_V6_FLOW:
4934 		*tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4935 		break;
4936 	case UDP_V6_FLOW:
4937 		*tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4938 		break;
4939 	case SCTP_V4_FLOW:
4940 		*tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4941 		break;
4942 	case SCTP_V6_FLOW:
4943 		*tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4944 		break;
4945 	case IPV4_FLOW:
4946 	case IPV6_FLOW:
4947 		*tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4948 		break;
4949 	default:
4950 		return -EINVAL;
4951 	}
4952 
4953 	return 0;
4954 }
4955 
4956 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4957 {
4958 	u64 tuple_data = 0;
4959 
4960 	if (tuple_sets & HCLGE_D_PORT_BIT)
4961 		tuple_data |= RXH_L4_B_2_3;
4962 	if (tuple_sets & HCLGE_S_PORT_BIT)
4963 		tuple_data |= RXH_L4_B_0_1;
4964 	if (tuple_sets & HCLGE_D_IP_BIT)
4965 		tuple_data |= RXH_IP_DST;
4966 	if (tuple_sets & HCLGE_S_IP_BIT)
4967 		tuple_data |= RXH_IP_SRC;
4968 
4969 	return tuple_data;
4970 }
4971 
4972 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4973 			       struct ethtool_rxnfc *nfc)
4974 {
4975 	struct hclge_vport *vport = hclge_get_vport(handle);
4976 	u8 tuple_sets;
4977 	int ret;
4978 
4979 	nfc->data = 0;
4980 
4981 	ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4982 	if (ret || !tuple_sets)
4983 		return ret;
4984 
4985 	nfc->data = hclge_convert_rss_tuple(tuple_sets);
4986 
4987 	return 0;
4988 }
4989 
4990 static int hclge_get_tc_size(struct hnae3_handle *handle)
4991 {
4992 	struct hclge_vport *vport = hclge_get_vport(handle);
4993 	struct hclge_dev *hdev = vport->back;
4994 
4995 	return hdev->pf_rss_size_max;
4996 }
4997 
4998 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4999 {
5000 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
5001 	struct hclge_vport *vport = hdev->vport;
5002 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
5003 	u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
5004 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
5005 	struct hnae3_tc_info *tc_info;
5006 	u16 roundup_size;
5007 	u16 rss_size;
5008 	int i;
5009 
5010 	tc_info = &vport->nic.kinfo.tc_info;
5011 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
5012 		rss_size = tc_info->tqp_count[i];
5013 		tc_valid[i] = 0;
5014 
5015 		if (!(hdev->hw_tc_map & BIT(i)))
5016 			continue;
5017 
5018 		/* tc_size set to hardware is the log2 of roundup power of two
5019 		 * of rss_size, the acutal queue size is limited by indirection
5020 		 * table.
5021 		 */
5022 		if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
5023 		    rss_size == 0) {
5024 			dev_err(&hdev->pdev->dev,
5025 				"Configure rss tc size failed, invalid TC_SIZE = %u\n",
5026 				rss_size);
5027 			return -EINVAL;
5028 		}
5029 
5030 		roundup_size = roundup_pow_of_two(rss_size);
5031 		roundup_size = ilog2(roundup_size);
5032 
5033 		tc_valid[i] = 1;
5034 		tc_size[i] = roundup_size;
5035 		tc_offset[i] = tc_info->tqp_offset[i];
5036 	}
5037 
5038 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5039 }
5040 
5041 int hclge_rss_init_hw(struct hclge_dev *hdev)
5042 {
5043 	struct hclge_vport *vport = hdev->vport;
5044 	u16 *rss_indir = vport[0].rss_indirection_tbl;
5045 	u8 *key = vport[0].rss_hash_key;
5046 	u8 hfunc = vport[0].rss_algo;
5047 	int ret;
5048 
5049 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
5050 	if (ret)
5051 		return ret;
5052 
5053 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
5054 	if (ret)
5055 		return ret;
5056 
5057 	ret = hclge_set_rss_input_tuple(hdev);
5058 	if (ret)
5059 		return ret;
5060 
5061 	return hclge_init_rss_tc_mode(hdev);
5062 }
5063 
5064 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
5065 {
5066 	struct hclge_vport *vport = &hdev->vport[0];
5067 	int i;
5068 
5069 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
5070 		vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
5071 }
5072 
5073 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
5074 {
5075 	u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
5076 	int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
5077 	struct hclge_vport *vport = &hdev->vport[0];
5078 	u16 *rss_ind_tbl;
5079 
5080 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
5081 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
5082 
5083 	vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5084 	vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5085 	vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
5086 	vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5087 	vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5088 	vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5089 	vport->rss_tuple_sets.ipv6_sctp_en =
5090 		hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
5091 		HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
5092 		HCLGE_RSS_INPUT_TUPLE_SCTP;
5093 	vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5094 
5095 	vport->rss_algo = rss_algo;
5096 
5097 	rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
5098 				   sizeof(*rss_ind_tbl), GFP_KERNEL);
5099 	if (!rss_ind_tbl)
5100 		return -ENOMEM;
5101 
5102 	vport->rss_indirection_tbl = rss_ind_tbl;
5103 	memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
5104 
5105 	hclge_rss_indir_init_cfg(hdev);
5106 
5107 	return 0;
5108 }
5109 
5110 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5111 				int vector_id, bool en,
5112 				struct hnae3_ring_chain_node *ring_chain)
5113 {
5114 	struct hclge_dev *hdev = vport->back;
5115 	struct hnae3_ring_chain_node *node;
5116 	struct hclge_desc desc;
5117 	struct hclge_ctrl_vector_chain_cmd *req =
5118 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
5119 	enum hclge_cmd_status status;
5120 	enum hclge_opcode_type op;
5121 	u16 tqp_type_and_id;
5122 	int i;
5123 
5124 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5125 	hclge_cmd_setup_basic_desc(&desc, op, false);
5126 	req->int_vector_id_l = hnae3_get_field(vector_id,
5127 					       HCLGE_VECTOR_ID_L_M,
5128 					       HCLGE_VECTOR_ID_L_S);
5129 	req->int_vector_id_h = hnae3_get_field(vector_id,
5130 					       HCLGE_VECTOR_ID_H_M,
5131 					       HCLGE_VECTOR_ID_H_S);
5132 
5133 	i = 0;
5134 	for (node = ring_chain; node; node = node->next) {
5135 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5136 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
5137 				HCLGE_INT_TYPE_S,
5138 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5139 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5140 				HCLGE_TQP_ID_S, node->tqp_index);
5141 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5142 				HCLGE_INT_GL_IDX_S,
5143 				hnae3_get_field(node->int_gl_idx,
5144 						HNAE3_RING_GL_IDX_M,
5145 						HNAE3_RING_GL_IDX_S));
5146 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5147 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5148 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5149 			req->vfid = vport->vport_id;
5150 
5151 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
5152 			if (status) {
5153 				dev_err(&hdev->pdev->dev,
5154 					"Map TQP fail, status is %d.\n",
5155 					status);
5156 				return -EIO;
5157 			}
5158 			i = 0;
5159 
5160 			hclge_cmd_setup_basic_desc(&desc,
5161 						   op,
5162 						   false);
5163 			req->int_vector_id_l =
5164 				hnae3_get_field(vector_id,
5165 						HCLGE_VECTOR_ID_L_M,
5166 						HCLGE_VECTOR_ID_L_S);
5167 			req->int_vector_id_h =
5168 				hnae3_get_field(vector_id,
5169 						HCLGE_VECTOR_ID_H_M,
5170 						HCLGE_VECTOR_ID_H_S);
5171 		}
5172 	}
5173 
5174 	if (i > 0) {
5175 		req->int_cause_num = i;
5176 		req->vfid = vport->vport_id;
5177 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
5178 		if (status) {
5179 			dev_err(&hdev->pdev->dev,
5180 				"Map TQP fail, status is %d.\n", status);
5181 			return -EIO;
5182 		}
5183 	}
5184 
5185 	return 0;
5186 }
5187 
5188 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5189 				    struct hnae3_ring_chain_node *ring_chain)
5190 {
5191 	struct hclge_vport *vport = hclge_get_vport(handle);
5192 	struct hclge_dev *hdev = vport->back;
5193 	int vector_id;
5194 
5195 	vector_id = hclge_get_vector_index(hdev, vector);
5196 	if (vector_id < 0) {
5197 		dev_err(&hdev->pdev->dev,
5198 			"failed to get vector index. vector=%d\n", vector);
5199 		return vector_id;
5200 	}
5201 
5202 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5203 }
5204 
5205 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5206 				       struct hnae3_ring_chain_node *ring_chain)
5207 {
5208 	struct hclge_vport *vport = hclge_get_vport(handle);
5209 	struct hclge_dev *hdev = vport->back;
5210 	int vector_id, ret;
5211 
5212 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5213 		return 0;
5214 
5215 	vector_id = hclge_get_vector_index(hdev, vector);
5216 	if (vector_id < 0) {
5217 		dev_err(&handle->pdev->dev,
5218 			"Get vector index fail. ret =%d\n", vector_id);
5219 		return vector_id;
5220 	}
5221 
5222 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5223 	if (ret)
5224 		dev_err(&handle->pdev->dev,
5225 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5226 			vector_id, ret);
5227 
5228 	return ret;
5229 }
5230 
5231 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5232 				      bool en_uc, bool en_mc, bool en_bc)
5233 {
5234 	struct hclge_vport *vport = &hdev->vport[vf_id];
5235 	struct hnae3_handle *handle = &vport->nic;
5236 	struct hclge_promisc_cfg_cmd *req;
5237 	struct hclge_desc desc;
5238 	bool uc_tx_en = en_uc;
5239 	u8 promisc_cfg = 0;
5240 	int ret;
5241 
5242 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5243 
5244 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
5245 	req->vf_id = vf_id;
5246 
5247 	if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5248 		uc_tx_en = false;
5249 
5250 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5251 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5252 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5253 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5254 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5255 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5256 	req->extend_promisc = promisc_cfg;
5257 
5258 	/* to be compatible with DEVICE_VERSION_V1/2 */
5259 	promisc_cfg = 0;
5260 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5261 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5262 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5263 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5264 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5265 	req->promisc = promisc_cfg;
5266 
5267 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5268 	if (ret)
5269 		dev_err(&hdev->pdev->dev,
5270 			"failed to set vport %u promisc mode, ret = %d.\n",
5271 			vf_id, ret);
5272 
5273 	return ret;
5274 }
5275 
5276 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5277 				 bool en_mc_pmc, bool en_bc_pmc)
5278 {
5279 	return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5280 					  en_uc_pmc, en_mc_pmc, en_bc_pmc);
5281 }
5282 
5283 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5284 				  bool en_mc_pmc)
5285 {
5286 	struct hclge_vport *vport = hclge_get_vport(handle);
5287 	struct hclge_dev *hdev = vport->back;
5288 	bool en_bc_pmc = true;
5289 
5290 	/* For device whose version below V2, if broadcast promisc enabled,
5291 	 * vlan filter is always bypassed. So broadcast promisc should be
5292 	 * disabled until user enable promisc mode
5293 	 */
5294 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5295 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5296 
5297 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5298 					    en_bc_pmc);
5299 }
5300 
5301 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5302 {
5303 	struct hclge_vport *vport = hclge_get_vport(handle);
5304 
5305 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5306 }
5307 
5308 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5309 {
5310 	if (hlist_empty(&hdev->fd_rule_list))
5311 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5312 }
5313 
5314 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5315 {
5316 	if (!test_bit(location, hdev->fd_bmap)) {
5317 		set_bit(location, hdev->fd_bmap);
5318 		hdev->hclge_fd_rule_num++;
5319 	}
5320 }
5321 
5322 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5323 {
5324 	if (test_bit(location, hdev->fd_bmap)) {
5325 		clear_bit(location, hdev->fd_bmap);
5326 		hdev->hclge_fd_rule_num--;
5327 	}
5328 }
5329 
5330 static void hclge_fd_free_node(struct hclge_dev *hdev,
5331 			       struct hclge_fd_rule *rule)
5332 {
5333 	hlist_del(&rule->rule_node);
5334 	kfree(rule);
5335 	hclge_sync_fd_state(hdev);
5336 }
5337 
5338 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5339 				      struct hclge_fd_rule *old_rule,
5340 				      struct hclge_fd_rule *new_rule,
5341 				      enum HCLGE_FD_NODE_STATE state)
5342 {
5343 	switch (state) {
5344 	case HCLGE_FD_TO_ADD:
5345 	case HCLGE_FD_ACTIVE:
5346 		/* 1) if the new state is TO_ADD, just replace the old rule
5347 		 * with the same location, no matter its state, because the
5348 		 * new rule will be configured to the hardware.
5349 		 * 2) if the new state is ACTIVE, it means the new rule
5350 		 * has been configured to the hardware, so just replace
5351 		 * the old rule node with the same location.
5352 		 * 3) for it doesn't add a new node to the list, so it's
5353 		 * unnecessary to update the rule number and fd_bmap.
5354 		 */
5355 		new_rule->rule_node.next = old_rule->rule_node.next;
5356 		new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5357 		memcpy(old_rule, new_rule, sizeof(*old_rule));
5358 		kfree(new_rule);
5359 		break;
5360 	case HCLGE_FD_DELETED:
5361 		hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5362 		hclge_fd_free_node(hdev, old_rule);
5363 		break;
5364 	case HCLGE_FD_TO_DEL:
5365 		/* if new request is TO_DEL, and old rule is existent
5366 		 * 1) the state of old rule is TO_DEL, we need do nothing,
5367 		 * because we delete rule by location, other rule content
5368 		 * is unncessary.
5369 		 * 2) the state of old rule is ACTIVE, we need to change its
5370 		 * state to TO_DEL, so the rule will be deleted when periodic
5371 		 * task being scheduled.
5372 		 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5373 		 * been added to hardware, so we just delete the rule node from
5374 		 * fd_rule_list directly.
5375 		 */
5376 		if (old_rule->state == HCLGE_FD_TO_ADD) {
5377 			hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5378 			hclge_fd_free_node(hdev, old_rule);
5379 			return;
5380 		}
5381 		old_rule->state = HCLGE_FD_TO_DEL;
5382 		break;
5383 	}
5384 }
5385 
5386 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5387 						u16 location,
5388 						struct hclge_fd_rule **parent)
5389 {
5390 	struct hclge_fd_rule *rule;
5391 	struct hlist_node *node;
5392 
5393 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5394 		if (rule->location == location)
5395 			return rule;
5396 		else if (rule->location > location)
5397 			return NULL;
5398 		/* record the parent node, use to keep the nodes in fd_rule_list
5399 		 * in ascend order.
5400 		 */
5401 		*parent = rule;
5402 	}
5403 
5404 	return NULL;
5405 }
5406 
5407 /* insert fd rule node in ascend order according to rule->location */
5408 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5409 				      struct hclge_fd_rule *rule,
5410 				      struct hclge_fd_rule *parent)
5411 {
5412 	INIT_HLIST_NODE(&rule->rule_node);
5413 
5414 	if (parent)
5415 		hlist_add_behind(&rule->rule_node, &parent->rule_node);
5416 	else
5417 		hlist_add_head(&rule->rule_node, hlist);
5418 }
5419 
5420 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5421 				     struct hclge_fd_user_def_cfg *cfg)
5422 {
5423 	struct hclge_fd_user_def_cfg_cmd *req;
5424 	struct hclge_desc desc;
5425 	u16 data = 0;
5426 	int ret;
5427 
5428 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5429 
5430 	req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5431 
5432 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5433 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5434 			HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5435 	req->ol2_cfg = cpu_to_le16(data);
5436 
5437 	data = 0;
5438 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5439 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5440 			HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5441 	req->ol3_cfg = cpu_to_le16(data);
5442 
5443 	data = 0;
5444 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5445 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5446 			HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5447 	req->ol4_cfg = cpu_to_le16(data);
5448 
5449 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5450 	if (ret)
5451 		dev_err(&hdev->pdev->dev,
5452 			"failed to set fd user def data, ret= %d\n", ret);
5453 	return ret;
5454 }
5455 
5456 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5457 {
5458 	int ret;
5459 
5460 	if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5461 		return;
5462 
5463 	if (!locked)
5464 		spin_lock_bh(&hdev->fd_rule_lock);
5465 
5466 	ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5467 	if (ret)
5468 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5469 
5470 	if (!locked)
5471 		spin_unlock_bh(&hdev->fd_rule_lock);
5472 }
5473 
5474 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5475 					  struct hclge_fd_rule *rule)
5476 {
5477 	struct hlist_head *hlist = &hdev->fd_rule_list;
5478 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5479 	struct hclge_fd_user_def_info *info, *old_info;
5480 	struct hclge_fd_user_def_cfg *cfg;
5481 
5482 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5483 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5484 		return 0;
5485 
5486 	/* for valid layer is start from 1, so need minus 1 to get the cfg */
5487 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5488 	info = &rule->ep.user_def;
5489 
5490 	if (!cfg->ref_cnt || cfg->offset == info->offset)
5491 		return 0;
5492 
5493 	if (cfg->ref_cnt > 1)
5494 		goto error;
5495 
5496 	fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5497 	if (fd_rule) {
5498 		old_info = &fd_rule->ep.user_def;
5499 		if (info->layer == old_info->layer)
5500 			return 0;
5501 	}
5502 
5503 error:
5504 	dev_err(&hdev->pdev->dev,
5505 		"No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5506 		info->layer + 1);
5507 	return -ENOSPC;
5508 }
5509 
5510 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5511 					 struct hclge_fd_rule *rule)
5512 {
5513 	struct hclge_fd_user_def_cfg *cfg;
5514 
5515 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5516 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5517 		return;
5518 
5519 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5520 	if (!cfg->ref_cnt) {
5521 		cfg->offset = rule->ep.user_def.offset;
5522 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5523 	}
5524 	cfg->ref_cnt++;
5525 }
5526 
5527 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5528 					 struct hclge_fd_rule *rule)
5529 {
5530 	struct hclge_fd_user_def_cfg *cfg;
5531 
5532 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5533 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5534 		return;
5535 
5536 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5537 	if (!cfg->ref_cnt)
5538 		return;
5539 
5540 	cfg->ref_cnt--;
5541 	if (!cfg->ref_cnt) {
5542 		cfg->offset = 0;
5543 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5544 	}
5545 }
5546 
5547 static void hclge_update_fd_list(struct hclge_dev *hdev,
5548 				 enum HCLGE_FD_NODE_STATE state, u16 location,
5549 				 struct hclge_fd_rule *new_rule)
5550 {
5551 	struct hlist_head *hlist = &hdev->fd_rule_list;
5552 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5553 
5554 	fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5555 	if (fd_rule) {
5556 		hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5557 		if (state == HCLGE_FD_ACTIVE)
5558 			hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5559 		hclge_sync_fd_user_def_cfg(hdev, true);
5560 
5561 		hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5562 		return;
5563 	}
5564 
5565 	/* it's unlikely to fail here, because we have checked the rule
5566 	 * exist before.
5567 	 */
5568 	if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5569 		dev_warn(&hdev->pdev->dev,
5570 			 "failed to delete fd rule %u, it's inexistent\n",
5571 			 location);
5572 		return;
5573 	}
5574 
5575 	hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5576 	hclge_sync_fd_user_def_cfg(hdev, true);
5577 
5578 	hclge_fd_insert_rule_node(hlist, new_rule, parent);
5579 	hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5580 
5581 	if (state == HCLGE_FD_TO_ADD) {
5582 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5583 		hclge_task_schedule(hdev, 0);
5584 	}
5585 }
5586 
5587 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5588 {
5589 	struct hclge_get_fd_mode_cmd *req;
5590 	struct hclge_desc desc;
5591 	int ret;
5592 
5593 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5594 
5595 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
5596 
5597 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5598 	if (ret) {
5599 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5600 		return ret;
5601 	}
5602 
5603 	*fd_mode = req->mode;
5604 
5605 	return ret;
5606 }
5607 
5608 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5609 				   u32 *stage1_entry_num,
5610 				   u32 *stage2_entry_num,
5611 				   u16 *stage1_counter_num,
5612 				   u16 *stage2_counter_num)
5613 {
5614 	struct hclge_get_fd_allocation_cmd *req;
5615 	struct hclge_desc desc;
5616 	int ret;
5617 
5618 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5619 
5620 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5621 
5622 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5623 	if (ret) {
5624 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5625 			ret);
5626 		return ret;
5627 	}
5628 
5629 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5630 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5631 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5632 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5633 
5634 	return ret;
5635 }
5636 
5637 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5638 				   enum HCLGE_FD_STAGE stage_num)
5639 {
5640 	struct hclge_set_fd_key_config_cmd *req;
5641 	struct hclge_fd_key_cfg *stage;
5642 	struct hclge_desc desc;
5643 	int ret;
5644 
5645 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5646 
5647 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5648 	stage = &hdev->fd_cfg.key_cfg[stage_num];
5649 	req->stage = stage_num;
5650 	req->key_select = stage->key_sel;
5651 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5652 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5653 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5654 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5655 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5656 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5657 
5658 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5659 	if (ret)
5660 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5661 
5662 	return ret;
5663 }
5664 
5665 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5666 {
5667 	struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5668 
5669 	spin_lock_bh(&hdev->fd_rule_lock);
5670 	memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5671 	spin_unlock_bh(&hdev->fd_rule_lock);
5672 
5673 	hclge_fd_set_user_def_cmd(hdev, cfg);
5674 }
5675 
5676 static int hclge_init_fd_config(struct hclge_dev *hdev)
5677 {
5678 #define LOW_2_WORDS		0x03
5679 	struct hclge_fd_key_cfg *key_cfg;
5680 	int ret;
5681 
5682 	if (!hnae3_dev_fd_supported(hdev))
5683 		return 0;
5684 
5685 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5686 	if (ret)
5687 		return ret;
5688 
5689 	switch (hdev->fd_cfg.fd_mode) {
5690 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5691 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5692 		break;
5693 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5694 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5695 		break;
5696 	default:
5697 		dev_err(&hdev->pdev->dev,
5698 			"Unsupported flow director mode %u\n",
5699 			hdev->fd_cfg.fd_mode);
5700 		return -EOPNOTSUPP;
5701 	}
5702 
5703 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5704 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5705 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5706 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5707 	key_cfg->outer_sipv6_word_en = 0;
5708 	key_cfg->outer_dipv6_word_en = 0;
5709 
5710 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5711 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5712 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5713 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5714 
5715 	/* If use max 400bit key, we can support tuples for ether type */
5716 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5717 		key_cfg->tuple_active |=
5718 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5719 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5720 			key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5721 	}
5722 
5723 	/* roce_type is used to filter roce frames
5724 	 * dst_vport is used to specify the rule
5725 	 */
5726 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5727 
5728 	ret = hclge_get_fd_allocation(hdev,
5729 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5730 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5731 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5732 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5733 	if (ret)
5734 		return ret;
5735 
5736 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5737 }
5738 
5739 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5740 				int loc, u8 *key, bool is_add)
5741 {
5742 	struct hclge_fd_tcam_config_1_cmd *req1;
5743 	struct hclge_fd_tcam_config_2_cmd *req2;
5744 	struct hclge_fd_tcam_config_3_cmd *req3;
5745 	struct hclge_desc desc[3];
5746 	int ret;
5747 
5748 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5749 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5750 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5751 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5752 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5753 
5754 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5755 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5756 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5757 
5758 	req1->stage = stage;
5759 	req1->xy_sel = sel_x ? 1 : 0;
5760 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5761 	req1->index = cpu_to_le32(loc);
5762 	req1->entry_vld = sel_x ? is_add : 0;
5763 
5764 	if (key) {
5765 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5766 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5767 		       sizeof(req2->tcam_data));
5768 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5769 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5770 	}
5771 
5772 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
5773 	if (ret)
5774 		dev_err(&hdev->pdev->dev,
5775 			"config tcam key fail, ret=%d\n",
5776 			ret);
5777 
5778 	return ret;
5779 }
5780 
5781 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5782 			      struct hclge_fd_ad_data *action)
5783 {
5784 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5785 	struct hclge_fd_ad_config_cmd *req;
5786 	struct hclge_desc desc;
5787 	u64 ad_data = 0;
5788 	int ret;
5789 
5790 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5791 
5792 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
5793 	req->index = cpu_to_le32(loc);
5794 	req->stage = stage;
5795 
5796 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5797 		      action->write_rule_id_to_bd);
5798 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5799 			action->rule_id);
5800 	if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5801 		hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5802 			      action->override_tc);
5803 		hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5804 				HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5805 	}
5806 	ad_data <<= 32;
5807 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5808 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5809 		      action->forward_to_direct_queue);
5810 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5811 			action->queue_id);
5812 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5813 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5814 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5815 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5816 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5817 			action->counter_id);
5818 
5819 	req->ad_data = cpu_to_le64(ad_data);
5820 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5821 	if (ret)
5822 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5823 
5824 	return ret;
5825 }
5826 
5827 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5828 				   struct hclge_fd_rule *rule)
5829 {
5830 	int offset, moffset, ip_offset;
5831 	enum HCLGE_FD_KEY_OPT key_opt;
5832 	u16 tmp_x_s, tmp_y_s;
5833 	u32 tmp_x_l, tmp_y_l;
5834 	u8 *p = (u8 *)rule;
5835 	int i;
5836 
5837 	if (rule->unused_tuple & BIT(tuple_bit))
5838 		return true;
5839 
5840 	key_opt = tuple_key_info[tuple_bit].key_opt;
5841 	offset = tuple_key_info[tuple_bit].offset;
5842 	moffset = tuple_key_info[tuple_bit].moffset;
5843 
5844 	switch (key_opt) {
5845 	case KEY_OPT_U8:
5846 		calc_x(*key_x, p[offset], p[moffset]);
5847 		calc_y(*key_y, p[offset], p[moffset]);
5848 
5849 		return true;
5850 	case KEY_OPT_LE16:
5851 		calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5852 		calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5853 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5854 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5855 
5856 		return true;
5857 	case KEY_OPT_LE32:
5858 		calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5859 		calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5860 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5861 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5862 
5863 		return true;
5864 	case KEY_OPT_MAC:
5865 		for (i = 0; i < ETH_ALEN; i++) {
5866 			calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5867 			       p[moffset + i]);
5868 			calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5869 			       p[moffset + i]);
5870 		}
5871 
5872 		return true;
5873 	case KEY_OPT_IP:
5874 		ip_offset = IPV4_INDEX * sizeof(u32);
5875 		calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5876 		       *(u32 *)(&p[moffset + ip_offset]));
5877 		calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5878 		       *(u32 *)(&p[moffset + ip_offset]));
5879 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5880 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5881 
5882 		return true;
5883 	default:
5884 		return false;
5885 	}
5886 }
5887 
5888 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5889 				 u8 vf_id, u8 network_port_id)
5890 {
5891 	u32 port_number = 0;
5892 
5893 	if (port_type == HOST_PORT) {
5894 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5895 				pf_id);
5896 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5897 				vf_id);
5898 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5899 	} else {
5900 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5901 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5902 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5903 	}
5904 
5905 	return port_number;
5906 }
5907 
5908 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5909 				       __le32 *key_x, __le32 *key_y,
5910 				       struct hclge_fd_rule *rule)
5911 {
5912 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5913 	u8 cur_pos = 0, tuple_size, shift_bits;
5914 	unsigned int i;
5915 
5916 	for (i = 0; i < MAX_META_DATA; i++) {
5917 		tuple_size = meta_data_key_info[i].key_length;
5918 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5919 
5920 		switch (tuple_bit) {
5921 		case BIT(ROCE_TYPE):
5922 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5923 			cur_pos += tuple_size;
5924 			break;
5925 		case BIT(DST_VPORT):
5926 			port_number = hclge_get_port_number(HOST_PORT, 0,
5927 							    rule->vf_id, 0);
5928 			hnae3_set_field(meta_data,
5929 					GENMASK(cur_pos + tuple_size, cur_pos),
5930 					cur_pos, port_number);
5931 			cur_pos += tuple_size;
5932 			break;
5933 		default:
5934 			break;
5935 		}
5936 	}
5937 
5938 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5939 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5940 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5941 
5942 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5943 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5944 }
5945 
5946 /* A complete key is combined with meta data key and tuple key.
5947  * Meta data key is stored at the MSB region, and tuple key is stored at
5948  * the LSB region, unused bits will be filled 0.
5949  */
5950 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5951 			    struct hclge_fd_rule *rule)
5952 {
5953 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5954 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5955 	u8 *cur_key_x, *cur_key_y;
5956 	u8 meta_data_region;
5957 	u8 tuple_size;
5958 	int ret;
5959 	u32 i;
5960 
5961 	memset(key_x, 0, sizeof(key_x));
5962 	memset(key_y, 0, sizeof(key_y));
5963 	cur_key_x = key_x;
5964 	cur_key_y = key_y;
5965 
5966 	for (i = 0; i < MAX_TUPLE; i++) {
5967 		bool tuple_valid;
5968 
5969 		tuple_size = tuple_key_info[i].key_length / 8;
5970 		if (!(key_cfg->tuple_active & BIT(i)))
5971 			continue;
5972 
5973 		tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5974 						     cur_key_y, rule);
5975 		if (tuple_valid) {
5976 			cur_key_x += tuple_size;
5977 			cur_key_y += tuple_size;
5978 		}
5979 	}
5980 
5981 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5982 			MAX_META_DATA_LENGTH / 8;
5983 
5984 	hclge_fd_convert_meta_data(key_cfg,
5985 				   (__le32 *)(key_x + meta_data_region),
5986 				   (__le32 *)(key_y + meta_data_region),
5987 				   rule);
5988 
5989 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5990 				   true);
5991 	if (ret) {
5992 		dev_err(&hdev->pdev->dev,
5993 			"fd key_y config fail, loc=%u, ret=%d\n",
5994 			rule->queue_id, ret);
5995 		return ret;
5996 	}
5997 
5998 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5999 				   true);
6000 	if (ret)
6001 		dev_err(&hdev->pdev->dev,
6002 			"fd key_x config fail, loc=%u, ret=%d\n",
6003 			rule->queue_id, ret);
6004 	return ret;
6005 }
6006 
6007 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
6008 			       struct hclge_fd_rule *rule)
6009 {
6010 	struct hclge_vport *vport = hdev->vport;
6011 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
6012 	struct hclge_fd_ad_data ad_data;
6013 
6014 	memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
6015 	ad_data.ad_id = rule->location;
6016 
6017 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6018 		ad_data.drop_packet = true;
6019 	} else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
6020 		ad_data.override_tc = true;
6021 		ad_data.queue_id =
6022 			kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
6023 		ad_data.tc_size =
6024 			ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
6025 	} else {
6026 		ad_data.forward_to_direct_queue = true;
6027 		ad_data.queue_id = rule->queue_id;
6028 	}
6029 
6030 	if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
6031 		ad_data.use_counter = true;
6032 		ad_data.counter_id = rule->vf_id %
6033 				     hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
6034 	} else {
6035 		ad_data.use_counter = false;
6036 		ad_data.counter_id = 0;
6037 	}
6038 
6039 	ad_data.use_next_stage = false;
6040 	ad_data.next_input_key = 0;
6041 
6042 	ad_data.write_rule_id_to_bd = true;
6043 	ad_data.rule_id = rule->location;
6044 
6045 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
6046 }
6047 
6048 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
6049 				       u32 *unused_tuple)
6050 {
6051 	if (!spec || !unused_tuple)
6052 		return -EINVAL;
6053 
6054 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6055 
6056 	if (!spec->ip4src)
6057 		*unused_tuple |= BIT(INNER_SRC_IP);
6058 
6059 	if (!spec->ip4dst)
6060 		*unused_tuple |= BIT(INNER_DST_IP);
6061 
6062 	if (!spec->psrc)
6063 		*unused_tuple |= BIT(INNER_SRC_PORT);
6064 
6065 	if (!spec->pdst)
6066 		*unused_tuple |= BIT(INNER_DST_PORT);
6067 
6068 	if (!spec->tos)
6069 		*unused_tuple |= BIT(INNER_IP_TOS);
6070 
6071 	return 0;
6072 }
6073 
6074 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
6075 				    u32 *unused_tuple)
6076 {
6077 	if (!spec || !unused_tuple)
6078 		return -EINVAL;
6079 
6080 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6081 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6082 
6083 	if (!spec->ip4src)
6084 		*unused_tuple |= BIT(INNER_SRC_IP);
6085 
6086 	if (!spec->ip4dst)
6087 		*unused_tuple |= BIT(INNER_DST_IP);
6088 
6089 	if (!spec->tos)
6090 		*unused_tuple |= BIT(INNER_IP_TOS);
6091 
6092 	if (!spec->proto)
6093 		*unused_tuple |= BIT(INNER_IP_PROTO);
6094 
6095 	if (spec->l4_4_bytes)
6096 		return -EOPNOTSUPP;
6097 
6098 	if (spec->ip_ver != ETH_RX_NFC_IP4)
6099 		return -EOPNOTSUPP;
6100 
6101 	return 0;
6102 }
6103 
6104 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
6105 				       u32 *unused_tuple)
6106 {
6107 	if (!spec || !unused_tuple)
6108 		return -EINVAL;
6109 
6110 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6111 
6112 	/* check whether src/dst ip address used */
6113 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6114 		*unused_tuple |= BIT(INNER_SRC_IP);
6115 
6116 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6117 		*unused_tuple |= BIT(INNER_DST_IP);
6118 
6119 	if (!spec->psrc)
6120 		*unused_tuple |= BIT(INNER_SRC_PORT);
6121 
6122 	if (!spec->pdst)
6123 		*unused_tuple |= BIT(INNER_DST_PORT);
6124 
6125 	if (!spec->tclass)
6126 		*unused_tuple |= BIT(INNER_IP_TOS);
6127 
6128 	return 0;
6129 }
6130 
6131 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6132 				    u32 *unused_tuple)
6133 {
6134 	if (!spec || !unused_tuple)
6135 		return -EINVAL;
6136 
6137 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6138 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6139 
6140 	/* check whether src/dst ip address used */
6141 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6142 		*unused_tuple |= BIT(INNER_SRC_IP);
6143 
6144 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6145 		*unused_tuple |= BIT(INNER_DST_IP);
6146 
6147 	if (!spec->l4_proto)
6148 		*unused_tuple |= BIT(INNER_IP_PROTO);
6149 
6150 	if (!spec->tclass)
6151 		*unused_tuple |= BIT(INNER_IP_TOS);
6152 
6153 	if (spec->l4_4_bytes)
6154 		return -EOPNOTSUPP;
6155 
6156 	return 0;
6157 }
6158 
6159 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6160 {
6161 	if (!spec || !unused_tuple)
6162 		return -EINVAL;
6163 
6164 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6165 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6166 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6167 
6168 	if (is_zero_ether_addr(spec->h_source))
6169 		*unused_tuple |= BIT(INNER_SRC_MAC);
6170 
6171 	if (is_zero_ether_addr(spec->h_dest))
6172 		*unused_tuple |= BIT(INNER_DST_MAC);
6173 
6174 	if (!spec->h_proto)
6175 		*unused_tuple |= BIT(INNER_ETH_TYPE);
6176 
6177 	return 0;
6178 }
6179 
6180 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6181 				    struct ethtool_rx_flow_spec *fs,
6182 				    u32 *unused_tuple)
6183 {
6184 	if (fs->flow_type & FLOW_EXT) {
6185 		if (fs->h_ext.vlan_etype) {
6186 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6187 			return -EOPNOTSUPP;
6188 		}
6189 
6190 		if (!fs->h_ext.vlan_tci)
6191 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6192 
6193 		if (fs->m_ext.vlan_tci &&
6194 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6195 			dev_err(&hdev->pdev->dev,
6196 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6197 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6198 			return -EINVAL;
6199 		}
6200 	} else {
6201 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6202 	}
6203 
6204 	if (fs->flow_type & FLOW_MAC_EXT) {
6205 		if (hdev->fd_cfg.fd_mode !=
6206 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6207 			dev_err(&hdev->pdev->dev,
6208 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
6209 			return -EOPNOTSUPP;
6210 		}
6211 
6212 		if (is_zero_ether_addr(fs->h_ext.h_dest))
6213 			*unused_tuple |= BIT(INNER_DST_MAC);
6214 		else
6215 			*unused_tuple &= ~BIT(INNER_DST_MAC);
6216 	}
6217 
6218 	return 0;
6219 }
6220 
6221 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6222 				       struct hclge_fd_user_def_info *info)
6223 {
6224 	switch (flow_type) {
6225 	case ETHER_FLOW:
6226 		info->layer = HCLGE_FD_USER_DEF_L2;
6227 		*unused_tuple &= ~BIT(INNER_L2_RSV);
6228 		break;
6229 	case IP_USER_FLOW:
6230 	case IPV6_USER_FLOW:
6231 		info->layer = HCLGE_FD_USER_DEF_L3;
6232 		*unused_tuple &= ~BIT(INNER_L3_RSV);
6233 		break;
6234 	case TCP_V4_FLOW:
6235 	case UDP_V4_FLOW:
6236 	case TCP_V6_FLOW:
6237 	case UDP_V6_FLOW:
6238 		info->layer = HCLGE_FD_USER_DEF_L4;
6239 		*unused_tuple &= ~BIT(INNER_L4_RSV);
6240 		break;
6241 	default:
6242 		return -EOPNOTSUPP;
6243 	}
6244 
6245 	return 0;
6246 }
6247 
6248 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6249 {
6250 	return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6251 }
6252 
6253 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6254 					 struct ethtool_rx_flow_spec *fs,
6255 					 u32 *unused_tuple,
6256 					 struct hclge_fd_user_def_info *info)
6257 {
6258 	u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6259 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6260 	u16 data, offset, data_mask, offset_mask;
6261 	int ret;
6262 
6263 	info->layer = HCLGE_FD_USER_DEF_NONE;
6264 	*unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6265 
6266 	if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6267 		return 0;
6268 
6269 	/* user-def data from ethtool is 64 bit value, the bit0~15 is used
6270 	 * for data, and bit32~47 is used for offset.
6271 	 */
6272 	data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6273 	data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6274 	offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6275 	offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6276 
6277 	if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6278 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6279 		return -EOPNOTSUPP;
6280 	}
6281 
6282 	if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6283 		dev_err(&hdev->pdev->dev,
6284 			"user-def offset[%u] should be no more than %u\n",
6285 			offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6286 		return -EINVAL;
6287 	}
6288 
6289 	if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6290 		dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6291 		return -EINVAL;
6292 	}
6293 
6294 	ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6295 	if (ret) {
6296 		dev_err(&hdev->pdev->dev,
6297 			"unsupported flow type for user-def bytes, ret = %d\n",
6298 			ret);
6299 		return ret;
6300 	}
6301 
6302 	info->data = data;
6303 	info->data_mask = data_mask;
6304 	info->offset = offset;
6305 
6306 	return 0;
6307 }
6308 
6309 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6310 			       struct ethtool_rx_flow_spec *fs,
6311 			       u32 *unused_tuple,
6312 			       struct hclge_fd_user_def_info *info)
6313 {
6314 	u32 flow_type;
6315 	int ret;
6316 
6317 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6318 		dev_err(&hdev->pdev->dev,
6319 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
6320 			fs->location,
6321 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6322 		return -EINVAL;
6323 	}
6324 
6325 	ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6326 	if (ret)
6327 		return ret;
6328 
6329 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6330 	switch (flow_type) {
6331 	case SCTP_V4_FLOW:
6332 	case TCP_V4_FLOW:
6333 	case UDP_V4_FLOW:
6334 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6335 						  unused_tuple);
6336 		break;
6337 	case IP_USER_FLOW:
6338 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6339 					       unused_tuple);
6340 		break;
6341 	case SCTP_V6_FLOW:
6342 	case TCP_V6_FLOW:
6343 	case UDP_V6_FLOW:
6344 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6345 						  unused_tuple);
6346 		break;
6347 	case IPV6_USER_FLOW:
6348 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6349 					       unused_tuple);
6350 		break;
6351 	case ETHER_FLOW:
6352 		if (hdev->fd_cfg.fd_mode !=
6353 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6354 			dev_err(&hdev->pdev->dev,
6355 				"ETHER_FLOW is not supported in current fd mode!\n");
6356 			return -EOPNOTSUPP;
6357 		}
6358 
6359 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6360 						 unused_tuple);
6361 		break;
6362 	default:
6363 		dev_err(&hdev->pdev->dev,
6364 			"unsupported protocol type, protocol type = %#x\n",
6365 			flow_type);
6366 		return -EOPNOTSUPP;
6367 	}
6368 
6369 	if (ret) {
6370 		dev_err(&hdev->pdev->dev,
6371 			"failed to check flow union tuple, ret = %d\n",
6372 			ret);
6373 		return ret;
6374 	}
6375 
6376 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6377 }
6378 
6379 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6380 				      struct ethtool_rx_flow_spec *fs,
6381 				      struct hclge_fd_rule *rule, u8 ip_proto)
6382 {
6383 	rule->tuples.src_ip[IPV4_INDEX] =
6384 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6385 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6386 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6387 
6388 	rule->tuples.dst_ip[IPV4_INDEX] =
6389 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6390 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6391 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6392 
6393 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6394 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6395 
6396 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6397 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6398 
6399 	rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6400 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6401 
6402 	rule->tuples.ether_proto = ETH_P_IP;
6403 	rule->tuples_mask.ether_proto = 0xFFFF;
6404 
6405 	rule->tuples.ip_proto = ip_proto;
6406 	rule->tuples_mask.ip_proto = 0xFF;
6407 }
6408 
6409 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6410 				   struct ethtool_rx_flow_spec *fs,
6411 				   struct hclge_fd_rule *rule)
6412 {
6413 	rule->tuples.src_ip[IPV4_INDEX] =
6414 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6415 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6416 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6417 
6418 	rule->tuples.dst_ip[IPV4_INDEX] =
6419 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6420 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6421 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6422 
6423 	rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6424 	rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6425 
6426 	rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6427 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6428 
6429 	rule->tuples.ether_proto = ETH_P_IP;
6430 	rule->tuples_mask.ether_proto = 0xFFFF;
6431 }
6432 
6433 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6434 				      struct ethtool_rx_flow_spec *fs,
6435 				      struct hclge_fd_rule *rule, u8 ip_proto)
6436 {
6437 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6438 			  IPV6_SIZE);
6439 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6440 			  IPV6_SIZE);
6441 
6442 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6443 			  IPV6_SIZE);
6444 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6445 			  IPV6_SIZE);
6446 
6447 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6448 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6449 
6450 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6451 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6452 
6453 	rule->tuples.ether_proto = ETH_P_IPV6;
6454 	rule->tuples_mask.ether_proto = 0xFFFF;
6455 
6456 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6457 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6458 
6459 	rule->tuples.ip_proto = ip_proto;
6460 	rule->tuples_mask.ip_proto = 0xFF;
6461 }
6462 
6463 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6464 				   struct ethtool_rx_flow_spec *fs,
6465 				   struct hclge_fd_rule *rule)
6466 {
6467 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6468 			  IPV6_SIZE);
6469 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6470 			  IPV6_SIZE);
6471 
6472 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6473 			  IPV6_SIZE);
6474 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6475 			  IPV6_SIZE);
6476 
6477 	rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6478 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6479 
6480 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6481 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6482 
6483 	rule->tuples.ether_proto = ETH_P_IPV6;
6484 	rule->tuples_mask.ether_proto = 0xFFFF;
6485 }
6486 
6487 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6488 				     struct ethtool_rx_flow_spec *fs,
6489 				     struct hclge_fd_rule *rule)
6490 {
6491 	ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6492 	ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6493 
6494 	ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6495 	ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6496 
6497 	rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6498 	rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6499 }
6500 
6501 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6502 					struct hclge_fd_rule *rule)
6503 {
6504 	switch (info->layer) {
6505 	case HCLGE_FD_USER_DEF_L2:
6506 		rule->tuples.l2_user_def = info->data;
6507 		rule->tuples_mask.l2_user_def = info->data_mask;
6508 		break;
6509 	case HCLGE_FD_USER_DEF_L3:
6510 		rule->tuples.l3_user_def = info->data;
6511 		rule->tuples_mask.l3_user_def = info->data_mask;
6512 		break;
6513 	case HCLGE_FD_USER_DEF_L4:
6514 		rule->tuples.l4_user_def = (u32)info->data << 16;
6515 		rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6516 		break;
6517 	default:
6518 		break;
6519 	}
6520 
6521 	rule->ep.user_def = *info;
6522 }
6523 
6524 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6525 			      struct ethtool_rx_flow_spec *fs,
6526 			      struct hclge_fd_rule *rule,
6527 			      struct hclge_fd_user_def_info *info)
6528 {
6529 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6530 
6531 	switch (flow_type) {
6532 	case SCTP_V4_FLOW:
6533 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6534 		break;
6535 	case TCP_V4_FLOW:
6536 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6537 		break;
6538 	case UDP_V4_FLOW:
6539 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6540 		break;
6541 	case IP_USER_FLOW:
6542 		hclge_fd_get_ip4_tuple(hdev, fs, rule);
6543 		break;
6544 	case SCTP_V6_FLOW:
6545 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6546 		break;
6547 	case TCP_V6_FLOW:
6548 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6549 		break;
6550 	case UDP_V6_FLOW:
6551 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6552 		break;
6553 	case IPV6_USER_FLOW:
6554 		hclge_fd_get_ip6_tuple(hdev, fs, rule);
6555 		break;
6556 	case ETHER_FLOW:
6557 		hclge_fd_get_ether_tuple(hdev, fs, rule);
6558 		break;
6559 	default:
6560 		return -EOPNOTSUPP;
6561 	}
6562 
6563 	if (fs->flow_type & FLOW_EXT) {
6564 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6565 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6566 		hclge_fd_get_user_def_tuple(info, rule);
6567 	}
6568 
6569 	if (fs->flow_type & FLOW_MAC_EXT) {
6570 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6571 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6572 	}
6573 
6574 	return 0;
6575 }
6576 
6577 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6578 				struct hclge_fd_rule *rule)
6579 {
6580 	int ret;
6581 
6582 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6583 	if (ret)
6584 		return ret;
6585 
6586 	return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6587 }
6588 
6589 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6590 				     struct hclge_fd_rule *rule)
6591 {
6592 	int ret;
6593 
6594 	spin_lock_bh(&hdev->fd_rule_lock);
6595 
6596 	if (hdev->fd_active_type != rule->rule_type &&
6597 	    (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6598 	     hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6599 		dev_err(&hdev->pdev->dev,
6600 			"mode conflict(new type %d, active type %d), please delete existent rules first\n",
6601 			rule->rule_type, hdev->fd_active_type);
6602 		spin_unlock_bh(&hdev->fd_rule_lock);
6603 		return -EINVAL;
6604 	}
6605 
6606 	ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6607 	if (ret)
6608 		goto out;
6609 
6610 	ret = hclge_clear_arfs_rules(hdev);
6611 	if (ret)
6612 		goto out;
6613 
6614 	ret = hclge_fd_config_rule(hdev, rule);
6615 	if (ret)
6616 		goto out;
6617 
6618 	rule->state = HCLGE_FD_ACTIVE;
6619 	hdev->fd_active_type = rule->rule_type;
6620 	hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6621 
6622 out:
6623 	spin_unlock_bh(&hdev->fd_rule_lock);
6624 	return ret;
6625 }
6626 
6627 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6628 {
6629 	struct hclge_vport *vport = hclge_get_vport(handle);
6630 	struct hclge_dev *hdev = vport->back;
6631 
6632 	return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6633 }
6634 
6635 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6636 				      u16 *vport_id, u8 *action, u16 *queue_id)
6637 {
6638 	struct hclge_vport *vport = hdev->vport;
6639 
6640 	if (ring_cookie == RX_CLS_FLOW_DISC) {
6641 		*action = HCLGE_FD_ACTION_DROP_PACKET;
6642 	} else {
6643 		u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6644 		u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6645 		u16 tqps;
6646 
6647 		/* To keep consistent with user's configuration, minus 1 when
6648 		 * printing 'vf', because vf id from ethtool is added 1 for vf.
6649 		 */
6650 		if (vf > hdev->num_req_vfs) {
6651 			dev_err(&hdev->pdev->dev,
6652 				"Error: vf id (%u) should be less than %u\n",
6653 				vf - 1, hdev->num_req_vfs);
6654 			return -EINVAL;
6655 		}
6656 
6657 		*vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6658 		tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6659 
6660 		if (ring >= tqps) {
6661 			dev_err(&hdev->pdev->dev,
6662 				"Error: queue id (%u) > max tqp num (%u)\n",
6663 				ring, tqps - 1);
6664 			return -EINVAL;
6665 		}
6666 
6667 		*action = HCLGE_FD_ACTION_SELECT_QUEUE;
6668 		*queue_id = ring;
6669 	}
6670 
6671 	return 0;
6672 }
6673 
6674 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6675 			      struct ethtool_rxnfc *cmd)
6676 {
6677 	struct hclge_vport *vport = hclge_get_vport(handle);
6678 	struct hclge_dev *hdev = vport->back;
6679 	struct hclge_fd_user_def_info info;
6680 	u16 dst_vport_id = 0, q_index = 0;
6681 	struct ethtool_rx_flow_spec *fs;
6682 	struct hclge_fd_rule *rule;
6683 	u32 unused = 0;
6684 	u8 action;
6685 	int ret;
6686 
6687 	if (!hnae3_dev_fd_supported(hdev)) {
6688 		dev_err(&hdev->pdev->dev,
6689 			"flow table director is not supported\n");
6690 		return -EOPNOTSUPP;
6691 	}
6692 
6693 	if (!hdev->fd_en) {
6694 		dev_err(&hdev->pdev->dev,
6695 			"please enable flow director first\n");
6696 		return -EOPNOTSUPP;
6697 	}
6698 
6699 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6700 
6701 	ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6702 	if (ret)
6703 		return ret;
6704 
6705 	ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6706 					 &action, &q_index);
6707 	if (ret)
6708 		return ret;
6709 
6710 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6711 	if (!rule)
6712 		return -ENOMEM;
6713 
6714 	ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6715 	if (ret) {
6716 		kfree(rule);
6717 		return ret;
6718 	}
6719 
6720 	rule->flow_type = fs->flow_type;
6721 	rule->location = fs->location;
6722 	rule->unused_tuple = unused;
6723 	rule->vf_id = dst_vport_id;
6724 	rule->queue_id = q_index;
6725 	rule->action = action;
6726 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
6727 
6728 	ret = hclge_add_fd_entry_common(hdev, rule);
6729 	if (ret)
6730 		kfree(rule);
6731 
6732 	return ret;
6733 }
6734 
6735 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6736 			      struct ethtool_rxnfc *cmd)
6737 {
6738 	struct hclge_vport *vport = hclge_get_vport(handle);
6739 	struct hclge_dev *hdev = vport->back;
6740 	struct ethtool_rx_flow_spec *fs;
6741 	int ret;
6742 
6743 	if (!hnae3_dev_fd_supported(hdev))
6744 		return -EOPNOTSUPP;
6745 
6746 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6747 
6748 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6749 		return -EINVAL;
6750 
6751 	spin_lock_bh(&hdev->fd_rule_lock);
6752 	if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6753 	    !test_bit(fs->location, hdev->fd_bmap)) {
6754 		dev_err(&hdev->pdev->dev,
6755 			"Delete fail, rule %u is inexistent\n", fs->location);
6756 		spin_unlock_bh(&hdev->fd_rule_lock);
6757 		return -ENOENT;
6758 	}
6759 
6760 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6761 				   NULL, false);
6762 	if (ret)
6763 		goto out;
6764 
6765 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6766 
6767 out:
6768 	spin_unlock_bh(&hdev->fd_rule_lock);
6769 	return ret;
6770 }
6771 
6772 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6773 					 bool clear_list)
6774 {
6775 	struct hclge_fd_rule *rule;
6776 	struct hlist_node *node;
6777 	u16 location;
6778 
6779 	if (!hnae3_dev_fd_supported(hdev))
6780 		return;
6781 
6782 	spin_lock_bh(&hdev->fd_rule_lock);
6783 
6784 	for_each_set_bit(location, hdev->fd_bmap,
6785 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6786 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6787 				     NULL, false);
6788 
6789 	if (clear_list) {
6790 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6791 					  rule_node) {
6792 			hlist_del(&rule->rule_node);
6793 			kfree(rule);
6794 		}
6795 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6796 		hdev->hclge_fd_rule_num = 0;
6797 		bitmap_zero(hdev->fd_bmap,
6798 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6799 	}
6800 
6801 	spin_unlock_bh(&hdev->fd_rule_lock);
6802 }
6803 
6804 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6805 {
6806 	hclge_clear_fd_rules_in_list(hdev, true);
6807 	hclge_fd_disable_user_def(hdev);
6808 }
6809 
6810 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6811 {
6812 	struct hclge_vport *vport = hclge_get_vport(handle);
6813 	struct hclge_dev *hdev = vport->back;
6814 	struct hclge_fd_rule *rule;
6815 	struct hlist_node *node;
6816 
6817 	/* Return ok here, because reset error handling will check this
6818 	 * return value. If error is returned here, the reset process will
6819 	 * fail.
6820 	 */
6821 	if (!hnae3_dev_fd_supported(hdev))
6822 		return 0;
6823 
6824 	/* if fd is disabled, should not restore it when reset */
6825 	if (!hdev->fd_en)
6826 		return 0;
6827 
6828 	spin_lock_bh(&hdev->fd_rule_lock);
6829 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6830 		if (rule->state == HCLGE_FD_ACTIVE)
6831 			rule->state = HCLGE_FD_TO_ADD;
6832 	}
6833 	spin_unlock_bh(&hdev->fd_rule_lock);
6834 	set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6835 
6836 	return 0;
6837 }
6838 
6839 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6840 				 struct ethtool_rxnfc *cmd)
6841 {
6842 	struct hclge_vport *vport = hclge_get_vport(handle);
6843 	struct hclge_dev *hdev = vport->back;
6844 
6845 	if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6846 		return -EOPNOTSUPP;
6847 
6848 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
6849 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6850 
6851 	return 0;
6852 }
6853 
6854 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6855 				     struct ethtool_tcpip4_spec *spec,
6856 				     struct ethtool_tcpip4_spec *spec_mask)
6857 {
6858 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6859 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6860 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6861 
6862 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6863 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6864 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6865 
6866 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6867 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6868 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6869 
6870 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6871 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6872 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6873 
6874 	spec->tos = rule->tuples.ip_tos;
6875 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6876 			0 : rule->tuples_mask.ip_tos;
6877 }
6878 
6879 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6880 				  struct ethtool_usrip4_spec *spec,
6881 				  struct ethtool_usrip4_spec *spec_mask)
6882 {
6883 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6884 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6885 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6886 
6887 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6888 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6889 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6890 
6891 	spec->tos = rule->tuples.ip_tos;
6892 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6893 			0 : rule->tuples_mask.ip_tos;
6894 
6895 	spec->proto = rule->tuples.ip_proto;
6896 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6897 			0 : rule->tuples_mask.ip_proto;
6898 
6899 	spec->ip_ver = ETH_RX_NFC_IP4;
6900 }
6901 
6902 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6903 				     struct ethtool_tcpip6_spec *spec,
6904 				     struct ethtool_tcpip6_spec *spec_mask)
6905 {
6906 	cpu_to_be32_array(spec->ip6src,
6907 			  rule->tuples.src_ip, IPV6_SIZE);
6908 	cpu_to_be32_array(spec->ip6dst,
6909 			  rule->tuples.dst_ip, IPV6_SIZE);
6910 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6911 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6912 	else
6913 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6914 				  IPV6_SIZE);
6915 
6916 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6917 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6918 	else
6919 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6920 				  IPV6_SIZE);
6921 
6922 	spec->tclass = rule->tuples.ip_tos;
6923 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6924 			0 : rule->tuples_mask.ip_tos;
6925 
6926 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6927 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6928 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6929 
6930 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6931 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6932 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6933 }
6934 
6935 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6936 				  struct ethtool_usrip6_spec *spec,
6937 				  struct ethtool_usrip6_spec *spec_mask)
6938 {
6939 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6940 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6941 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6942 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6943 	else
6944 		cpu_to_be32_array(spec_mask->ip6src,
6945 				  rule->tuples_mask.src_ip, IPV6_SIZE);
6946 
6947 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6948 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6949 	else
6950 		cpu_to_be32_array(spec_mask->ip6dst,
6951 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
6952 
6953 	spec->tclass = rule->tuples.ip_tos;
6954 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6955 			0 : rule->tuples_mask.ip_tos;
6956 
6957 	spec->l4_proto = rule->tuples.ip_proto;
6958 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6959 			0 : rule->tuples_mask.ip_proto;
6960 }
6961 
6962 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6963 				    struct ethhdr *spec,
6964 				    struct ethhdr *spec_mask)
6965 {
6966 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6967 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6968 
6969 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6970 		eth_zero_addr(spec_mask->h_source);
6971 	else
6972 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6973 
6974 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
6975 		eth_zero_addr(spec_mask->h_dest);
6976 	else
6977 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6978 
6979 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6980 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6981 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6982 }
6983 
6984 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6985 				       struct hclge_fd_rule *rule)
6986 {
6987 	if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6988 	    HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6989 		fs->h_ext.data[0] = 0;
6990 		fs->h_ext.data[1] = 0;
6991 		fs->m_ext.data[0] = 0;
6992 		fs->m_ext.data[1] = 0;
6993 	} else {
6994 		fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6995 		fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6996 		fs->m_ext.data[0] =
6997 				cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6998 		fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6999 	}
7000 }
7001 
7002 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
7003 				  struct hclge_fd_rule *rule)
7004 {
7005 	if (fs->flow_type & FLOW_EXT) {
7006 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
7007 		fs->m_ext.vlan_tci =
7008 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
7009 				0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
7010 
7011 		hclge_fd_get_user_def_info(fs, rule);
7012 	}
7013 
7014 	if (fs->flow_type & FLOW_MAC_EXT) {
7015 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
7016 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
7017 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
7018 		else
7019 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
7020 					rule->tuples_mask.dst_mac);
7021 	}
7022 }
7023 
7024 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
7025 				  struct ethtool_rxnfc *cmd)
7026 {
7027 	struct hclge_vport *vport = hclge_get_vport(handle);
7028 	struct hclge_fd_rule *rule = NULL;
7029 	struct hclge_dev *hdev = vport->back;
7030 	struct ethtool_rx_flow_spec *fs;
7031 	struct hlist_node *node2;
7032 
7033 	if (!hnae3_dev_fd_supported(hdev))
7034 		return -EOPNOTSUPP;
7035 
7036 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
7037 
7038 	spin_lock_bh(&hdev->fd_rule_lock);
7039 
7040 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
7041 		if (rule->location >= fs->location)
7042 			break;
7043 	}
7044 
7045 	if (!rule || fs->location != rule->location) {
7046 		spin_unlock_bh(&hdev->fd_rule_lock);
7047 
7048 		return -ENOENT;
7049 	}
7050 
7051 	fs->flow_type = rule->flow_type;
7052 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
7053 	case SCTP_V4_FLOW:
7054 	case TCP_V4_FLOW:
7055 	case UDP_V4_FLOW:
7056 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
7057 					 &fs->m_u.tcp_ip4_spec);
7058 		break;
7059 	case IP_USER_FLOW:
7060 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
7061 				      &fs->m_u.usr_ip4_spec);
7062 		break;
7063 	case SCTP_V6_FLOW:
7064 	case TCP_V6_FLOW:
7065 	case UDP_V6_FLOW:
7066 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
7067 					 &fs->m_u.tcp_ip6_spec);
7068 		break;
7069 	case IPV6_USER_FLOW:
7070 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
7071 				      &fs->m_u.usr_ip6_spec);
7072 		break;
7073 	/* The flow type of fd rule has been checked before adding in to rule
7074 	 * list. As other flow types have been handled, it must be ETHER_FLOW
7075 	 * for the default case
7076 	 */
7077 	default:
7078 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
7079 					&fs->m_u.ether_spec);
7080 		break;
7081 	}
7082 
7083 	hclge_fd_get_ext_info(fs, rule);
7084 
7085 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
7086 		fs->ring_cookie = RX_CLS_FLOW_DISC;
7087 	} else {
7088 		u64 vf_id;
7089 
7090 		fs->ring_cookie = rule->queue_id;
7091 		vf_id = rule->vf_id;
7092 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
7093 		fs->ring_cookie |= vf_id;
7094 	}
7095 
7096 	spin_unlock_bh(&hdev->fd_rule_lock);
7097 
7098 	return 0;
7099 }
7100 
7101 static int hclge_get_all_rules(struct hnae3_handle *handle,
7102 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
7103 {
7104 	struct hclge_vport *vport = hclge_get_vport(handle);
7105 	struct hclge_dev *hdev = vport->back;
7106 	struct hclge_fd_rule *rule;
7107 	struct hlist_node *node2;
7108 	int cnt = 0;
7109 
7110 	if (!hnae3_dev_fd_supported(hdev))
7111 		return -EOPNOTSUPP;
7112 
7113 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7114 
7115 	spin_lock_bh(&hdev->fd_rule_lock);
7116 	hlist_for_each_entry_safe(rule, node2,
7117 				  &hdev->fd_rule_list, rule_node) {
7118 		if (cnt == cmd->rule_cnt) {
7119 			spin_unlock_bh(&hdev->fd_rule_lock);
7120 			return -EMSGSIZE;
7121 		}
7122 
7123 		if (rule->state == HCLGE_FD_TO_DEL)
7124 			continue;
7125 
7126 		rule_locs[cnt] = rule->location;
7127 		cnt++;
7128 	}
7129 
7130 	spin_unlock_bh(&hdev->fd_rule_lock);
7131 
7132 	cmd->rule_cnt = cnt;
7133 
7134 	return 0;
7135 }
7136 
7137 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7138 				     struct hclge_fd_rule_tuples *tuples)
7139 {
7140 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7141 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7142 
7143 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7144 	tuples->ip_proto = fkeys->basic.ip_proto;
7145 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7146 
7147 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7148 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7149 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7150 	} else {
7151 		int i;
7152 
7153 		for (i = 0; i < IPV6_SIZE; i++) {
7154 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7155 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7156 		}
7157 	}
7158 }
7159 
7160 /* traverse all rules, check whether an existed rule has the same tuples */
7161 static struct hclge_fd_rule *
7162 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7163 			  const struct hclge_fd_rule_tuples *tuples)
7164 {
7165 	struct hclge_fd_rule *rule = NULL;
7166 	struct hlist_node *node;
7167 
7168 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7169 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7170 			return rule;
7171 	}
7172 
7173 	return NULL;
7174 }
7175 
7176 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7177 				     struct hclge_fd_rule *rule)
7178 {
7179 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7180 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7181 			     BIT(INNER_SRC_PORT);
7182 	rule->action = 0;
7183 	rule->vf_id = 0;
7184 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7185 	rule->state = HCLGE_FD_TO_ADD;
7186 	if (tuples->ether_proto == ETH_P_IP) {
7187 		if (tuples->ip_proto == IPPROTO_TCP)
7188 			rule->flow_type = TCP_V4_FLOW;
7189 		else
7190 			rule->flow_type = UDP_V4_FLOW;
7191 	} else {
7192 		if (tuples->ip_proto == IPPROTO_TCP)
7193 			rule->flow_type = TCP_V6_FLOW;
7194 		else
7195 			rule->flow_type = UDP_V6_FLOW;
7196 	}
7197 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7198 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7199 }
7200 
7201 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7202 				      u16 flow_id, struct flow_keys *fkeys)
7203 {
7204 	struct hclge_vport *vport = hclge_get_vport(handle);
7205 	struct hclge_fd_rule_tuples new_tuples = {};
7206 	struct hclge_dev *hdev = vport->back;
7207 	struct hclge_fd_rule *rule;
7208 	u16 bit_id;
7209 
7210 	if (!hnae3_dev_fd_supported(hdev))
7211 		return -EOPNOTSUPP;
7212 
7213 	/* when there is already fd rule existed add by user,
7214 	 * arfs should not work
7215 	 */
7216 	spin_lock_bh(&hdev->fd_rule_lock);
7217 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7218 	    hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7219 		spin_unlock_bh(&hdev->fd_rule_lock);
7220 		return -EOPNOTSUPP;
7221 	}
7222 
7223 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7224 
7225 	/* check is there flow director filter existed for this flow,
7226 	 * if not, create a new filter for it;
7227 	 * if filter exist with different queue id, modify the filter;
7228 	 * if filter exist with same queue id, do nothing
7229 	 */
7230 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7231 	if (!rule) {
7232 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7233 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7234 			spin_unlock_bh(&hdev->fd_rule_lock);
7235 			return -ENOSPC;
7236 		}
7237 
7238 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7239 		if (!rule) {
7240 			spin_unlock_bh(&hdev->fd_rule_lock);
7241 			return -ENOMEM;
7242 		}
7243 
7244 		rule->location = bit_id;
7245 		rule->arfs.flow_id = flow_id;
7246 		rule->queue_id = queue_id;
7247 		hclge_fd_build_arfs_rule(&new_tuples, rule);
7248 		hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7249 		hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7250 	} else if (rule->queue_id != queue_id) {
7251 		rule->queue_id = queue_id;
7252 		rule->state = HCLGE_FD_TO_ADD;
7253 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7254 		hclge_task_schedule(hdev, 0);
7255 	}
7256 	spin_unlock_bh(&hdev->fd_rule_lock);
7257 	return rule->location;
7258 }
7259 
7260 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7261 {
7262 #ifdef CONFIG_RFS_ACCEL
7263 	struct hnae3_handle *handle = &hdev->vport[0].nic;
7264 	struct hclge_fd_rule *rule;
7265 	struct hlist_node *node;
7266 
7267 	spin_lock_bh(&hdev->fd_rule_lock);
7268 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7269 		spin_unlock_bh(&hdev->fd_rule_lock);
7270 		return;
7271 	}
7272 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7273 		if (rule->state != HCLGE_FD_ACTIVE)
7274 			continue;
7275 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7276 					rule->arfs.flow_id, rule->location)) {
7277 			rule->state = HCLGE_FD_TO_DEL;
7278 			set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7279 		}
7280 	}
7281 	spin_unlock_bh(&hdev->fd_rule_lock);
7282 #endif
7283 }
7284 
7285 /* make sure being called after lock up with fd_rule_lock */
7286 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7287 {
7288 #ifdef CONFIG_RFS_ACCEL
7289 	struct hclge_fd_rule *rule;
7290 	struct hlist_node *node;
7291 	int ret;
7292 
7293 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7294 		return 0;
7295 
7296 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7297 		switch (rule->state) {
7298 		case HCLGE_FD_TO_DEL:
7299 		case HCLGE_FD_ACTIVE:
7300 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7301 						   rule->location, NULL, false);
7302 			if (ret)
7303 				return ret;
7304 			fallthrough;
7305 		case HCLGE_FD_TO_ADD:
7306 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7307 			hlist_del(&rule->rule_node);
7308 			kfree(rule);
7309 			break;
7310 		default:
7311 			break;
7312 		}
7313 	}
7314 	hclge_sync_fd_state(hdev);
7315 
7316 #endif
7317 	return 0;
7318 }
7319 
7320 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7321 				    struct hclge_fd_rule *rule)
7322 {
7323 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7324 		struct flow_match_basic match;
7325 		u16 ethtype_key, ethtype_mask;
7326 
7327 		flow_rule_match_basic(flow, &match);
7328 		ethtype_key = ntohs(match.key->n_proto);
7329 		ethtype_mask = ntohs(match.mask->n_proto);
7330 
7331 		if (ethtype_key == ETH_P_ALL) {
7332 			ethtype_key = 0;
7333 			ethtype_mask = 0;
7334 		}
7335 		rule->tuples.ether_proto = ethtype_key;
7336 		rule->tuples_mask.ether_proto = ethtype_mask;
7337 		rule->tuples.ip_proto = match.key->ip_proto;
7338 		rule->tuples_mask.ip_proto = match.mask->ip_proto;
7339 	} else {
7340 		rule->unused_tuple |= BIT(INNER_IP_PROTO);
7341 		rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7342 	}
7343 }
7344 
7345 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7346 				  struct hclge_fd_rule *rule)
7347 {
7348 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7349 		struct flow_match_eth_addrs match;
7350 
7351 		flow_rule_match_eth_addrs(flow, &match);
7352 		ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7353 		ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7354 		ether_addr_copy(rule->tuples.src_mac, match.key->src);
7355 		ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7356 	} else {
7357 		rule->unused_tuple |= BIT(INNER_DST_MAC);
7358 		rule->unused_tuple |= BIT(INNER_SRC_MAC);
7359 	}
7360 }
7361 
7362 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7363 				   struct hclge_fd_rule *rule)
7364 {
7365 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7366 		struct flow_match_vlan match;
7367 
7368 		flow_rule_match_vlan(flow, &match);
7369 		rule->tuples.vlan_tag1 = match.key->vlan_id |
7370 				(match.key->vlan_priority << VLAN_PRIO_SHIFT);
7371 		rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7372 				(match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7373 	} else {
7374 		rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7375 	}
7376 }
7377 
7378 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7379 				 struct hclge_fd_rule *rule)
7380 {
7381 	u16 addr_type = 0;
7382 
7383 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7384 		struct flow_match_control match;
7385 
7386 		flow_rule_match_control(flow, &match);
7387 		addr_type = match.key->addr_type;
7388 	}
7389 
7390 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7391 		struct flow_match_ipv4_addrs match;
7392 
7393 		flow_rule_match_ipv4_addrs(flow, &match);
7394 		rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7395 		rule->tuples_mask.src_ip[IPV4_INDEX] =
7396 						be32_to_cpu(match.mask->src);
7397 		rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7398 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
7399 						be32_to_cpu(match.mask->dst);
7400 	} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7401 		struct flow_match_ipv6_addrs match;
7402 
7403 		flow_rule_match_ipv6_addrs(flow, &match);
7404 		be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7405 				  IPV6_SIZE);
7406 		be32_to_cpu_array(rule->tuples_mask.src_ip,
7407 				  match.mask->src.s6_addr32, IPV6_SIZE);
7408 		be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7409 				  IPV6_SIZE);
7410 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
7411 				  match.mask->dst.s6_addr32, IPV6_SIZE);
7412 	} else {
7413 		rule->unused_tuple |= BIT(INNER_SRC_IP);
7414 		rule->unused_tuple |= BIT(INNER_DST_IP);
7415 	}
7416 }
7417 
7418 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7419 				   struct hclge_fd_rule *rule)
7420 {
7421 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7422 		struct flow_match_ports match;
7423 
7424 		flow_rule_match_ports(flow, &match);
7425 
7426 		rule->tuples.src_port = be16_to_cpu(match.key->src);
7427 		rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7428 		rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7429 		rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7430 	} else {
7431 		rule->unused_tuple |= BIT(INNER_SRC_PORT);
7432 		rule->unused_tuple |= BIT(INNER_DST_PORT);
7433 	}
7434 }
7435 
7436 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7437 				  struct flow_cls_offload *cls_flower,
7438 				  struct hclge_fd_rule *rule)
7439 {
7440 	struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7441 	struct flow_dissector *dissector = flow->match.dissector;
7442 
7443 	if (dissector->used_keys &
7444 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7445 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
7446 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7447 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
7448 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7449 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7450 	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7451 		dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7452 			dissector->used_keys);
7453 		return -EOPNOTSUPP;
7454 	}
7455 
7456 	hclge_get_cls_key_basic(flow, rule);
7457 	hclge_get_cls_key_mac(flow, rule);
7458 	hclge_get_cls_key_vlan(flow, rule);
7459 	hclge_get_cls_key_ip(flow, rule);
7460 	hclge_get_cls_key_port(flow, rule);
7461 
7462 	return 0;
7463 }
7464 
7465 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7466 				  struct flow_cls_offload *cls_flower, int tc)
7467 {
7468 	u32 prio = cls_flower->common.prio;
7469 
7470 	if (tc < 0 || tc > hdev->tc_max) {
7471 		dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7472 		return -EINVAL;
7473 	}
7474 
7475 	if (prio == 0 ||
7476 	    prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7477 		dev_err(&hdev->pdev->dev,
7478 			"prio %u should be in range[1, %u]\n",
7479 			prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7480 		return -EINVAL;
7481 	}
7482 
7483 	if (test_bit(prio - 1, hdev->fd_bmap)) {
7484 		dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7485 		return -EINVAL;
7486 	}
7487 	return 0;
7488 }
7489 
7490 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7491 				struct flow_cls_offload *cls_flower,
7492 				int tc)
7493 {
7494 	struct hclge_vport *vport = hclge_get_vport(handle);
7495 	struct hclge_dev *hdev = vport->back;
7496 	struct hclge_fd_rule *rule;
7497 	int ret;
7498 
7499 	ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7500 	if (ret) {
7501 		dev_err(&hdev->pdev->dev,
7502 			"failed to check cls flower params, ret = %d\n", ret);
7503 		return ret;
7504 	}
7505 
7506 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7507 	if (!rule)
7508 		return -ENOMEM;
7509 
7510 	ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7511 	if (ret) {
7512 		kfree(rule);
7513 		return ret;
7514 	}
7515 
7516 	rule->action = HCLGE_FD_ACTION_SELECT_TC;
7517 	rule->cls_flower.tc = tc;
7518 	rule->location = cls_flower->common.prio - 1;
7519 	rule->vf_id = 0;
7520 	rule->cls_flower.cookie = cls_flower->cookie;
7521 	rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7522 
7523 	ret = hclge_add_fd_entry_common(hdev, rule);
7524 	if (ret)
7525 		kfree(rule);
7526 
7527 	return ret;
7528 }
7529 
7530 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7531 						   unsigned long cookie)
7532 {
7533 	struct hclge_fd_rule *rule;
7534 	struct hlist_node *node;
7535 
7536 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7537 		if (rule->cls_flower.cookie == cookie)
7538 			return rule;
7539 	}
7540 
7541 	return NULL;
7542 }
7543 
7544 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7545 				struct flow_cls_offload *cls_flower)
7546 {
7547 	struct hclge_vport *vport = hclge_get_vport(handle);
7548 	struct hclge_dev *hdev = vport->back;
7549 	struct hclge_fd_rule *rule;
7550 	int ret;
7551 
7552 	spin_lock_bh(&hdev->fd_rule_lock);
7553 
7554 	rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7555 	if (!rule) {
7556 		spin_unlock_bh(&hdev->fd_rule_lock);
7557 		return -EINVAL;
7558 	}
7559 
7560 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7561 				   NULL, false);
7562 	if (ret) {
7563 		spin_unlock_bh(&hdev->fd_rule_lock);
7564 		return ret;
7565 	}
7566 
7567 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7568 	spin_unlock_bh(&hdev->fd_rule_lock);
7569 
7570 	return 0;
7571 }
7572 
7573 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7574 {
7575 	struct hclge_fd_rule *rule;
7576 	struct hlist_node *node;
7577 	int ret = 0;
7578 
7579 	if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7580 		return;
7581 
7582 	spin_lock_bh(&hdev->fd_rule_lock);
7583 
7584 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7585 		switch (rule->state) {
7586 		case HCLGE_FD_TO_ADD:
7587 			ret = hclge_fd_config_rule(hdev, rule);
7588 			if (ret)
7589 				goto out;
7590 			rule->state = HCLGE_FD_ACTIVE;
7591 			break;
7592 		case HCLGE_FD_TO_DEL:
7593 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7594 						   rule->location, NULL, false);
7595 			if (ret)
7596 				goto out;
7597 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7598 			hclge_fd_free_node(hdev, rule);
7599 			break;
7600 		default:
7601 			break;
7602 		}
7603 	}
7604 
7605 out:
7606 	if (ret)
7607 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7608 
7609 	spin_unlock_bh(&hdev->fd_rule_lock);
7610 }
7611 
7612 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7613 {
7614 	if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7615 		bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7616 
7617 		hclge_clear_fd_rules_in_list(hdev, clear_list);
7618 	}
7619 
7620 	hclge_sync_fd_user_def_cfg(hdev, false);
7621 
7622 	hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7623 }
7624 
7625 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7626 {
7627 	struct hclge_vport *vport = hclge_get_vport(handle);
7628 	struct hclge_dev *hdev = vport->back;
7629 
7630 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7631 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7632 }
7633 
7634 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7635 {
7636 	struct hclge_vport *vport = hclge_get_vport(handle);
7637 	struct hclge_dev *hdev = vport->back;
7638 
7639 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7640 }
7641 
7642 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7643 {
7644 	struct hclge_vport *vport = hclge_get_vport(handle);
7645 	struct hclge_dev *hdev = vport->back;
7646 
7647 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7648 }
7649 
7650 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7651 {
7652 	struct hclge_vport *vport = hclge_get_vport(handle);
7653 	struct hclge_dev *hdev = vport->back;
7654 
7655 	return hdev->rst_stats.hw_reset_done_cnt;
7656 }
7657 
7658 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7659 {
7660 	struct hclge_vport *vport = hclge_get_vport(handle);
7661 	struct hclge_dev *hdev = vport->back;
7662 
7663 	hdev->fd_en = enable;
7664 
7665 	if (!enable)
7666 		set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7667 	else
7668 		hclge_restore_fd_entries(handle);
7669 
7670 	hclge_task_schedule(hdev, 0);
7671 }
7672 
7673 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7674 {
7675 	struct hclge_desc desc;
7676 	struct hclge_config_mac_mode_cmd *req =
7677 		(struct hclge_config_mac_mode_cmd *)desc.data;
7678 	u32 loop_en = 0;
7679 	int ret;
7680 
7681 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7682 
7683 	if (enable) {
7684 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7685 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7686 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7687 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7688 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7689 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7690 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7691 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7692 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7693 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7694 	}
7695 
7696 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7697 
7698 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7699 	if (ret)
7700 		dev_err(&hdev->pdev->dev,
7701 			"mac enable fail, ret =%d.\n", ret);
7702 }
7703 
7704 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7705 				     u8 switch_param, u8 param_mask)
7706 {
7707 	struct hclge_mac_vlan_switch_cmd *req;
7708 	struct hclge_desc desc;
7709 	u32 func_id;
7710 	int ret;
7711 
7712 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7713 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7714 
7715 	/* read current config parameter */
7716 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7717 				   true);
7718 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7719 	req->func_id = cpu_to_le32(func_id);
7720 
7721 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7722 	if (ret) {
7723 		dev_err(&hdev->pdev->dev,
7724 			"read mac vlan switch parameter fail, ret = %d\n", ret);
7725 		return ret;
7726 	}
7727 
7728 	/* modify and write new config parameter */
7729 	hclge_cmd_reuse_desc(&desc, false);
7730 	req->switch_param = (req->switch_param & param_mask) | switch_param;
7731 	req->param_mask = param_mask;
7732 
7733 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7734 	if (ret)
7735 		dev_err(&hdev->pdev->dev,
7736 			"set mac vlan switch parameter fail, ret = %d\n", ret);
7737 	return ret;
7738 }
7739 
7740 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7741 				       int link_ret)
7742 {
7743 #define HCLGE_PHY_LINK_STATUS_NUM  200
7744 
7745 	struct phy_device *phydev = hdev->hw.mac.phydev;
7746 	int i = 0;
7747 	int ret;
7748 
7749 	do {
7750 		ret = phy_read_status(phydev);
7751 		if (ret) {
7752 			dev_err(&hdev->pdev->dev,
7753 				"phy update link status fail, ret = %d\n", ret);
7754 			return;
7755 		}
7756 
7757 		if (phydev->link == link_ret)
7758 			break;
7759 
7760 		msleep(HCLGE_LINK_STATUS_MS);
7761 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7762 }
7763 
7764 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7765 {
7766 #define HCLGE_MAC_LINK_STATUS_NUM  100
7767 
7768 	int link_status;
7769 	int i = 0;
7770 	int ret;
7771 
7772 	do {
7773 		ret = hclge_get_mac_link_status(hdev, &link_status);
7774 		if (ret)
7775 			return ret;
7776 		if (link_status == link_ret)
7777 			return 0;
7778 
7779 		msleep(HCLGE_LINK_STATUS_MS);
7780 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7781 	return -EBUSY;
7782 }
7783 
7784 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7785 					  bool is_phy)
7786 {
7787 	int link_ret;
7788 
7789 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7790 
7791 	if (is_phy)
7792 		hclge_phy_link_status_wait(hdev, link_ret);
7793 
7794 	return hclge_mac_link_status_wait(hdev, link_ret);
7795 }
7796 
7797 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7798 {
7799 	struct hclge_config_mac_mode_cmd *req;
7800 	struct hclge_desc desc;
7801 	u32 loop_en;
7802 	int ret;
7803 
7804 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7805 	/* 1 Read out the MAC mode config at first */
7806 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7807 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7808 	if (ret) {
7809 		dev_err(&hdev->pdev->dev,
7810 			"mac loopback get fail, ret =%d.\n", ret);
7811 		return ret;
7812 	}
7813 
7814 	/* 2 Then setup the loopback flag */
7815 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7816 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7817 
7818 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7819 
7820 	/* 3 Config mac work mode with loopback flag
7821 	 * and its original configure parameters
7822 	 */
7823 	hclge_cmd_reuse_desc(&desc, false);
7824 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7825 	if (ret)
7826 		dev_err(&hdev->pdev->dev,
7827 			"mac loopback set fail, ret =%d.\n", ret);
7828 	return ret;
7829 }
7830 
7831 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7832 				     enum hnae3_loop loop_mode)
7833 {
7834 #define HCLGE_COMMON_LB_RETRY_MS	10
7835 #define HCLGE_COMMON_LB_RETRY_NUM	100
7836 
7837 	struct hclge_common_lb_cmd *req;
7838 	struct hclge_desc desc;
7839 	int ret, i = 0;
7840 	u8 loop_mode_b;
7841 
7842 	req = (struct hclge_common_lb_cmd *)desc.data;
7843 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7844 
7845 	switch (loop_mode) {
7846 	case HNAE3_LOOP_SERIAL_SERDES:
7847 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7848 		break;
7849 	case HNAE3_LOOP_PARALLEL_SERDES:
7850 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7851 		break;
7852 	case HNAE3_LOOP_PHY:
7853 		loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7854 		break;
7855 	default:
7856 		dev_err(&hdev->pdev->dev,
7857 			"unsupported common loopback mode %d\n", loop_mode);
7858 		return -ENOTSUPP;
7859 	}
7860 
7861 	if (en) {
7862 		req->enable = loop_mode_b;
7863 		req->mask = loop_mode_b;
7864 	} else {
7865 		req->mask = loop_mode_b;
7866 	}
7867 
7868 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7869 	if (ret) {
7870 		dev_err(&hdev->pdev->dev,
7871 			"common loopback set fail, ret = %d\n", ret);
7872 		return ret;
7873 	}
7874 
7875 	do {
7876 		msleep(HCLGE_COMMON_LB_RETRY_MS);
7877 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7878 					   true);
7879 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7880 		if (ret) {
7881 			dev_err(&hdev->pdev->dev,
7882 				"common loopback get, ret = %d\n", ret);
7883 			return ret;
7884 		}
7885 	} while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7886 		 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7887 
7888 	if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7889 		dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7890 		return -EBUSY;
7891 	} else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7892 		dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7893 		return -EIO;
7894 	}
7895 	return ret;
7896 }
7897 
7898 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7899 				     enum hnae3_loop loop_mode)
7900 {
7901 	int ret;
7902 
7903 	ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7904 	if (ret)
7905 		return ret;
7906 
7907 	hclge_cfg_mac_mode(hdev, en);
7908 
7909 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7910 	if (ret)
7911 		dev_err(&hdev->pdev->dev,
7912 			"serdes loopback config mac mode timeout\n");
7913 
7914 	return ret;
7915 }
7916 
7917 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7918 				     struct phy_device *phydev)
7919 {
7920 	int ret;
7921 
7922 	if (!phydev->suspended) {
7923 		ret = phy_suspend(phydev);
7924 		if (ret)
7925 			return ret;
7926 	}
7927 
7928 	ret = phy_resume(phydev);
7929 	if (ret)
7930 		return ret;
7931 
7932 	return phy_loopback(phydev, true);
7933 }
7934 
7935 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7936 				      struct phy_device *phydev)
7937 {
7938 	int ret;
7939 
7940 	ret = phy_loopback(phydev, false);
7941 	if (ret)
7942 		return ret;
7943 
7944 	return phy_suspend(phydev);
7945 }
7946 
7947 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7948 {
7949 	struct phy_device *phydev = hdev->hw.mac.phydev;
7950 	int ret;
7951 
7952 	if (!phydev) {
7953 		if (hnae3_dev_phy_imp_supported(hdev))
7954 			return hclge_set_common_loopback(hdev, en,
7955 							 HNAE3_LOOP_PHY);
7956 		return -ENOTSUPP;
7957 	}
7958 
7959 	if (en)
7960 		ret = hclge_enable_phy_loopback(hdev, phydev);
7961 	else
7962 		ret = hclge_disable_phy_loopback(hdev, phydev);
7963 	if (ret) {
7964 		dev_err(&hdev->pdev->dev,
7965 			"set phy loopback fail, ret = %d\n", ret);
7966 		return ret;
7967 	}
7968 
7969 	hclge_cfg_mac_mode(hdev, en);
7970 
7971 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7972 	if (ret)
7973 		dev_err(&hdev->pdev->dev,
7974 			"phy loopback config mac mode timeout\n");
7975 
7976 	return ret;
7977 }
7978 
7979 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7980 				     u16 stream_id, bool enable)
7981 {
7982 	struct hclge_desc desc;
7983 	struct hclge_cfg_com_tqp_queue_cmd *req =
7984 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7985 
7986 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7987 	req->tqp_id = cpu_to_le16(tqp_id);
7988 	req->stream_id = cpu_to_le16(stream_id);
7989 	if (enable)
7990 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7991 
7992 	return hclge_cmd_send(&hdev->hw, &desc, 1);
7993 }
7994 
7995 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7996 {
7997 	struct hclge_vport *vport = hclge_get_vport(handle);
7998 	struct hclge_dev *hdev = vport->back;
7999 	int ret;
8000 	u16 i;
8001 
8002 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
8003 		ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
8004 		if (ret)
8005 			return ret;
8006 	}
8007 	return 0;
8008 }
8009 
8010 static int hclge_set_loopback(struct hnae3_handle *handle,
8011 			      enum hnae3_loop loop_mode, bool en)
8012 {
8013 	struct hclge_vport *vport = hclge_get_vport(handle);
8014 	struct hclge_dev *hdev = vport->back;
8015 	int ret;
8016 
8017 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
8018 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
8019 	 * the same, the packets are looped back in the SSU. If SSU loopback
8020 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
8021 	 */
8022 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8023 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
8024 
8025 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
8026 						HCLGE_SWITCH_ALW_LPBK_MASK);
8027 		if (ret)
8028 			return ret;
8029 	}
8030 
8031 	switch (loop_mode) {
8032 	case HNAE3_LOOP_APP:
8033 		ret = hclge_set_app_loopback(hdev, en);
8034 		break;
8035 	case HNAE3_LOOP_SERIAL_SERDES:
8036 	case HNAE3_LOOP_PARALLEL_SERDES:
8037 		ret = hclge_set_common_loopback(hdev, en, loop_mode);
8038 		break;
8039 	case HNAE3_LOOP_PHY:
8040 		ret = hclge_set_phy_loopback(hdev, en);
8041 		break;
8042 	default:
8043 		ret = -ENOTSUPP;
8044 		dev_err(&hdev->pdev->dev,
8045 			"loop_mode %d is not supported\n", loop_mode);
8046 		break;
8047 	}
8048 
8049 	if (ret)
8050 		return ret;
8051 
8052 	ret = hclge_tqp_enable(handle, en);
8053 	if (ret)
8054 		dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
8055 			en ? "enable" : "disable", ret);
8056 
8057 	return ret;
8058 }
8059 
8060 static int hclge_set_default_loopback(struct hclge_dev *hdev)
8061 {
8062 	int ret;
8063 
8064 	ret = hclge_set_app_loopback(hdev, false);
8065 	if (ret)
8066 		return ret;
8067 
8068 	ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8069 	if (ret)
8070 		return ret;
8071 
8072 	return hclge_cfg_common_loopback(hdev, false,
8073 					 HNAE3_LOOP_PARALLEL_SERDES);
8074 }
8075 
8076 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
8077 {
8078 	struct hclge_vport *vport = hclge_get_vport(handle);
8079 	struct hnae3_knic_private_info *kinfo;
8080 	struct hnae3_queue *queue;
8081 	struct hclge_tqp *tqp;
8082 	int i;
8083 
8084 	kinfo = &vport->nic.kinfo;
8085 	for (i = 0; i < kinfo->num_tqps; i++) {
8086 		queue = handle->kinfo.tqp[i];
8087 		tqp = container_of(queue, struct hclge_tqp, q);
8088 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
8089 	}
8090 }
8091 
8092 static void hclge_flush_link_update(struct hclge_dev *hdev)
8093 {
8094 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
8095 
8096 	unsigned long last = hdev->serv_processed_cnt;
8097 	int i = 0;
8098 
8099 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8100 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8101 	       last == hdev->serv_processed_cnt)
8102 		usleep_range(1, 1);
8103 }
8104 
8105 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8106 {
8107 	struct hclge_vport *vport = hclge_get_vport(handle);
8108 	struct hclge_dev *hdev = vport->back;
8109 
8110 	if (enable) {
8111 		hclge_task_schedule(hdev, 0);
8112 	} else {
8113 		/* Set the DOWN flag here to disable link updating */
8114 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
8115 
8116 		/* flush memory to make sure DOWN is seen by service task */
8117 		smp_mb__before_atomic();
8118 		hclge_flush_link_update(hdev);
8119 	}
8120 }
8121 
8122 static int hclge_ae_start(struct hnae3_handle *handle)
8123 {
8124 	struct hclge_vport *vport = hclge_get_vport(handle);
8125 	struct hclge_dev *hdev = vport->back;
8126 
8127 	/* mac enable */
8128 	hclge_cfg_mac_mode(hdev, true);
8129 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8130 	hdev->hw.mac.link = 0;
8131 
8132 	/* reset tqp stats */
8133 	hclge_reset_tqp_stats(handle);
8134 
8135 	hclge_mac_start_phy(hdev);
8136 
8137 	return 0;
8138 }
8139 
8140 static void hclge_ae_stop(struct hnae3_handle *handle)
8141 {
8142 	struct hclge_vport *vport = hclge_get_vport(handle);
8143 	struct hclge_dev *hdev = vport->back;
8144 
8145 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8146 	spin_lock_bh(&hdev->fd_rule_lock);
8147 	hclge_clear_arfs_rules(hdev);
8148 	spin_unlock_bh(&hdev->fd_rule_lock);
8149 
8150 	/* If it is not PF reset or FLR, the firmware will disable the MAC,
8151 	 * so it only need to stop phy here.
8152 	 */
8153 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8154 	    hdev->reset_type != HNAE3_FUNC_RESET &&
8155 	    hdev->reset_type != HNAE3_FLR_RESET) {
8156 		hclge_mac_stop_phy(hdev);
8157 		hclge_update_link_status(hdev);
8158 		return;
8159 	}
8160 
8161 	hclge_reset_tqp(handle);
8162 
8163 	hclge_config_mac_tnl_int(hdev, false);
8164 
8165 	/* Mac disable */
8166 	hclge_cfg_mac_mode(hdev, false);
8167 
8168 	hclge_mac_stop_phy(hdev);
8169 
8170 	/* reset tqp stats */
8171 	hclge_reset_tqp_stats(handle);
8172 	hclge_update_link_status(hdev);
8173 }
8174 
8175 int hclge_vport_start(struct hclge_vport *vport)
8176 {
8177 	struct hclge_dev *hdev = vport->back;
8178 
8179 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8180 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8181 	vport->last_active_jiffies = jiffies;
8182 
8183 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8184 		if (vport->vport_id) {
8185 			hclge_restore_mac_table_common(vport);
8186 			hclge_restore_vport_vlan_table(vport);
8187 		} else {
8188 			hclge_restore_hw_table(hdev);
8189 		}
8190 	}
8191 
8192 	clear_bit(vport->vport_id, hdev->vport_config_block);
8193 
8194 	return 0;
8195 }
8196 
8197 void hclge_vport_stop(struct hclge_vport *vport)
8198 {
8199 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8200 }
8201 
8202 static int hclge_client_start(struct hnae3_handle *handle)
8203 {
8204 	struct hclge_vport *vport = hclge_get_vport(handle);
8205 
8206 	return hclge_vport_start(vport);
8207 }
8208 
8209 static void hclge_client_stop(struct hnae3_handle *handle)
8210 {
8211 	struct hclge_vport *vport = hclge_get_vport(handle);
8212 
8213 	hclge_vport_stop(vport);
8214 }
8215 
8216 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8217 					 u16 cmdq_resp, u8  resp_code,
8218 					 enum hclge_mac_vlan_tbl_opcode op)
8219 {
8220 	struct hclge_dev *hdev = vport->back;
8221 
8222 	if (cmdq_resp) {
8223 		dev_err(&hdev->pdev->dev,
8224 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8225 			cmdq_resp);
8226 		return -EIO;
8227 	}
8228 
8229 	if (op == HCLGE_MAC_VLAN_ADD) {
8230 		if (!resp_code || resp_code == 1)
8231 			return 0;
8232 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8233 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
8234 			return -ENOSPC;
8235 
8236 		dev_err(&hdev->pdev->dev,
8237 			"add mac addr failed for undefined, code=%u.\n",
8238 			resp_code);
8239 		return -EIO;
8240 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
8241 		if (!resp_code) {
8242 			return 0;
8243 		} else if (resp_code == 1) {
8244 			dev_dbg(&hdev->pdev->dev,
8245 				"remove mac addr failed for miss.\n");
8246 			return -ENOENT;
8247 		}
8248 
8249 		dev_err(&hdev->pdev->dev,
8250 			"remove mac addr failed for undefined, code=%u.\n",
8251 			resp_code);
8252 		return -EIO;
8253 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
8254 		if (!resp_code) {
8255 			return 0;
8256 		} else if (resp_code == 1) {
8257 			dev_dbg(&hdev->pdev->dev,
8258 				"lookup mac addr failed for miss.\n");
8259 			return -ENOENT;
8260 		}
8261 
8262 		dev_err(&hdev->pdev->dev,
8263 			"lookup mac addr failed for undefined, code=%u.\n",
8264 			resp_code);
8265 		return -EIO;
8266 	}
8267 
8268 	dev_err(&hdev->pdev->dev,
8269 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8270 
8271 	return -EINVAL;
8272 }
8273 
8274 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8275 {
8276 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8277 
8278 	unsigned int word_num;
8279 	unsigned int bit_num;
8280 
8281 	if (vfid > 255 || vfid < 0)
8282 		return -EIO;
8283 
8284 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8285 		word_num = vfid / 32;
8286 		bit_num  = vfid % 32;
8287 		if (clr)
8288 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8289 		else
8290 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8291 	} else {
8292 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8293 		bit_num  = vfid % 32;
8294 		if (clr)
8295 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8296 		else
8297 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8298 	}
8299 
8300 	return 0;
8301 }
8302 
8303 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8304 {
8305 #define HCLGE_DESC_NUMBER 3
8306 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8307 	int i, j;
8308 
8309 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8310 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8311 			if (desc[i].data[j])
8312 				return false;
8313 
8314 	return true;
8315 }
8316 
8317 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8318 				   const u8 *addr, bool is_mc)
8319 {
8320 	const unsigned char *mac_addr = addr;
8321 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8322 		       (mac_addr[0]) | (mac_addr[1] << 8);
8323 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8324 
8325 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8326 	if (is_mc) {
8327 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8328 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8329 	}
8330 
8331 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8332 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8333 }
8334 
8335 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8336 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
8337 {
8338 	struct hclge_dev *hdev = vport->back;
8339 	struct hclge_desc desc;
8340 	u8 resp_code;
8341 	u16 retval;
8342 	int ret;
8343 
8344 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8345 
8346 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8347 
8348 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8349 	if (ret) {
8350 		dev_err(&hdev->pdev->dev,
8351 			"del mac addr failed for cmd_send, ret =%d.\n",
8352 			ret);
8353 		return ret;
8354 	}
8355 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8356 	retval = le16_to_cpu(desc.retval);
8357 
8358 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8359 					     HCLGE_MAC_VLAN_REMOVE);
8360 }
8361 
8362 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8363 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
8364 				     struct hclge_desc *desc,
8365 				     bool is_mc)
8366 {
8367 	struct hclge_dev *hdev = vport->back;
8368 	u8 resp_code;
8369 	u16 retval;
8370 	int ret;
8371 
8372 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8373 	if (is_mc) {
8374 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8375 		memcpy(desc[0].data,
8376 		       req,
8377 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8378 		hclge_cmd_setup_basic_desc(&desc[1],
8379 					   HCLGE_OPC_MAC_VLAN_ADD,
8380 					   true);
8381 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8382 		hclge_cmd_setup_basic_desc(&desc[2],
8383 					   HCLGE_OPC_MAC_VLAN_ADD,
8384 					   true);
8385 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
8386 	} else {
8387 		memcpy(desc[0].data,
8388 		       req,
8389 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8390 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
8391 	}
8392 	if (ret) {
8393 		dev_err(&hdev->pdev->dev,
8394 			"lookup mac addr failed for cmd_send, ret =%d.\n",
8395 			ret);
8396 		return ret;
8397 	}
8398 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8399 	retval = le16_to_cpu(desc[0].retval);
8400 
8401 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8402 					     HCLGE_MAC_VLAN_LKUP);
8403 }
8404 
8405 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8406 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
8407 				  struct hclge_desc *mc_desc)
8408 {
8409 	struct hclge_dev *hdev = vport->back;
8410 	int cfg_status;
8411 	u8 resp_code;
8412 	u16 retval;
8413 	int ret;
8414 
8415 	if (!mc_desc) {
8416 		struct hclge_desc desc;
8417 
8418 		hclge_cmd_setup_basic_desc(&desc,
8419 					   HCLGE_OPC_MAC_VLAN_ADD,
8420 					   false);
8421 		memcpy(desc.data, req,
8422 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8423 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8424 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8425 		retval = le16_to_cpu(desc.retval);
8426 
8427 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8428 							   resp_code,
8429 							   HCLGE_MAC_VLAN_ADD);
8430 	} else {
8431 		hclge_cmd_reuse_desc(&mc_desc[0], false);
8432 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8433 		hclge_cmd_reuse_desc(&mc_desc[1], false);
8434 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8435 		hclge_cmd_reuse_desc(&mc_desc[2], false);
8436 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8437 		memcpy(mc_desc[0].data, req,
8438 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8439 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8440 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8441 		retval = le16_to_cpu(mc_desc[0].retval);
8442 
8443 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8444 							   resp_code,
8445 							   HCLGE_MAC_VLAN_ADD);
8446 	}
8447 
8448 	if (ret) {
8449 		dev_err(&hdev->pdev->dev,
8450 			"add mac addr failed for cmd_send, ret =%d.\n",
8451 			ret);
8452 		return ret;
8453 	}
8454 
8455 	return cfg_status;
8456 }
8457 
8458 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8459 			       u16 *allocated_size)
8460 {
8461 	struct hclge_umv_spc_alc_cmd *req;
8462 	struct hclge_desc desc;
8463 	int ret;
8464 
8465 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8466 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8467 
8468 	req->space_size = cpu_to_le32(space_size);
8469 
8470 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8471 	if (ret) {
8472 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8473 			ret);
8474 		return ret;
8475 	}
8476 
8477 	*allocated_size = le32_to_cpu(desc.data[1]);
8478 
8479 	return 0;
8480 }
8481 
8482 static int hclge_init_umv_space(struct hclge_dev *hdev)
8483 {
8484 	u16 allocated_size = 0;
8485 	int ret;
8486 
8487 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8488 	if (ret)
8489 		return ret;
8490 
8491 	if (allocated_size < hdev->wanted_umv_size)
8492 		dev_warn(&hdev->pdev->dev,
8493 			 "failed to alloc umv space, want %u, get %u\n",
8494 			 hdev->wanted_umv_size, allocated_size);
8495 
8496 	hdev->max_umv_size = allocated_size;
8497 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8498 	hdev->share_umv_size = hdev->priv_umv_size +
8499 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8500 
8501 	return 0;
8502 }
8503 
8504 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8505 {
8506 	struct hclge_vport *vport;
8507 	int i;
8508 
8509 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8510 		vport = &hdev->vport[i];
8511 		vport->used_umv_num = 0;
8512 	}
8513 
8514 	mutex_lock(&hdev->vport_lock);
8515 	hdev->share_umv_size = hdev->priv_umv_size +
8516 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8517 	mutex_unlock(&hdev->vport_lock);
8518 }
8519 
8520 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8521 {
8522 	struct hclge_dev *hdev = vport->back;
8523 	bool is_full;
8524 
8525 	if (need_lock)
8526 		mutex_lock(&hdev->vport_lock);
8527 
8528 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8529 		   hdev->share_umv_size == 0);
8530 
8531 	if (need_lock)
8532 		mutex_unlock(&hdev->vport_lock);
8533 
8534 	return is_full;
8535 }
8536 
8537 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8538 {
8539 	struct hclge_dev *hdev = vport->back;
8540 
8541 	if (is_free) {
8542 		if (vport->used_umv_num > hdev->priv_umv_size)
8543 			hdev->share_umv_size++;
8544 
8545 		if (vport->used_umv_num > 0)
8546 			vport->used_umv_num--;
8547 	} else {
8548 		if (vport->used_umv_num >= hdev->priv_umv_size &&
8549 		    hdev->share_umv_size > 0)
8550 			hdev->share_umv_size--;
8551 		vport->used_umv_num++;
8552 	}
8553 }
8554 
8555 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8556 						  const u8 *mac_addr)
8557 {
8558 	struct hclge_mac_node *mac_node, *tmp;
8559 
8560 	list_for_each_entry_safe(mac_node, tmp, list, node)
8561 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8562 			return mac_node;
8563 
8564 	return NULL;
8565 }
8566 
8567 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8568 				  enum HCLGE_MAC_NODE_STATE state)
8569 {
8570 	switch (state) {
8571 	/* from set_rx_mode or tmp_add_list */
8572 	case HCLGE_MAC_TO_ADD:
8573 		if (mac_node->state == HCLGE_MAC_TO_DEL)
8574 			mac_node->state = HCLGE_MAC_ACTIVE;
8575 		break;
8576 	/* only from set_rx_mode */
8577 	case HCLGE_MAC_TO_DEL:
8578 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
8579 			list_del(&mac_node->node);
8580 			kfree(mac_node);
8581 		} else {
8582 			mac_node->state = HCLGE_MAC_TO_DEL;
8583 		}
8584 		break;
8585 	/* only from tmp_add_list, the mac_node->state won't be
8586 	 * ACTIVE.
8587 	 */
8588 	case HCLGE_MAC_ACTIVE:
8589 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8590 			mac_node->state = HCLGE_MAC_ACTIVE;
8591 
8592 		break;
8593 	}
8594 }
8595 
8596 int hclge_update_mac_list(struct hclge_vport *vport,
8597 			  enum HCLGE_MAC_NODE_STATE state,
8598 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
8599 			  const unsigned char *addr)
8600 {
8601 	struct hclge_dev *hdev = vport->back;
8602 	struct hclge_mac_node *mac_node;
8603 	struct list_head *list;
8604 
8605 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8606 		&vport->uc_mac_list : &vport->mc_mac_list;
8607 
8608 	spin_lock_bh(&vport->mac_list_lock);
8609 
8610 	/* if the mac addr is already in the mac list, no need to add a new
8611 	 * one into it, just check the mac addr state, convert it to a new
8612 	 * state, or just remove it, or do nothing.
8613 	 */
8614 	mac_node = hclge_find_mac_node(list, addr);
8615 	if (mac_node) {
8616 		hclge_update_mac_node(mac_node, state);
8617 		spin_unlock_bh(&vport->mac_list_lock);
8618 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8619 		return 0;
8620 	}
8621 
8622 	/* if this address is never added, unnecessary to delete */
8623 	if (state == HCLGE_MAC_TO_DEL) {
8624 		spin_unlock_bh(&vport->mac_list_lock);
8625 		dev_err(&hdev->pdev->dev,
8626 			"failed to delete address %pM from mac list\n",
8627 			addr);
8628 		return -ENOENT;
8629 	}
8630 
8631 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8632 	if (!mac_node) {
8633 		spin_unlock_bh(&vport->mac_list_lock);
8634 		return -ENOMEM;
8635 	}
8636 
8637 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8638 
8639 	mac_node->state = state;
8640 	ether_addr_copy(mac_node->mac_addr, addr);
8641 	list_add_tail(&mac_node->node, list);
8642 
8643 	spin_unlock_bh(&vport->mac_list_lock);
8644 
8645 	return 0;
8646 }
8647 
8648 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8649 			     const unsigned char *addr)
8650 {
8651 	struct hclge_vport *vport = hclge_get_vport(handle);
8652 
8653 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8654 				     addr);
8655 }
8656 
8657 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8658 			     const unsigned char *addr)
8659 {
8660 	struct hclge_dev *hdev = vport->back;
8661 	struct hclge_mac_vlan_tbl_entry_cmd req;
8662 	struct hclge_desc desc;
8663 	u16 egress_port = 0;
8664 	int ret;
8665 
8666 	/* mac addr check */
8667 	if (is_zero_ether_addr(addr) ||
8668 	    is_broadcast_ether_addr(addr) ||
8669 	    is_multicast_ether_addr(addr)) {
8670 		dev_err(&hdev->pdev->dev,
8671 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8672 			 addr, is_zero_ether_addr(addr),
8673 			 is_broadcast_ether_addr(addr),
8674 			 is_multicast_ether_addr(addr));
8675 		return -EINVAL;
8676 	}
8677 
8678 	memset(&req, 0, sizeof(req));
8679 
8680 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8681 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8682 
8683 	req.egress_port = cpu_to_le16(egress_port);
8684 
8685 	hclge_prepare_mac_addr(&req, addr, false);
8686 
8687 	/* Lookup the mac address in the mac_vlan table, and add
8688 	 * it if the entry is inexistent. Repeated unicast entry
8689 	 * is not allowed in the mac vlan table.
8690 	 */
8691 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8692 	if (ret == -ENOENT) {
8693 		mutex_lock(&hdev->vport_lock);
8694 		if (!hclge_is_umv_space_full(vport, false)) {
8695 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8696 			if (!ret)
8697 				hclge_update_umv_space(vport, false);
8698 			mutex_unlock(&hdev->vport_lock);
8699 			return ret;
8700 		}
8701 		mutex_unlock(&hdev->vport_lock);
8702 
8703 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8704 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8705 				hdev->priv_umv_size);
8706 
8707 		return -ENOSPC;
8708 	}
8709 
8710 	/* check if we just hit the duplicate */
8711 	if (!ret) {
8712 		dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8713 			 vport->vport_id, addr);
8714 		return 0;
8715 	}
8716 
8717 	dev_err(&hdev->pdev->dev,
8718 		"PF failed to add unicast entry(%pM) in the MAC table\n",
8719 		addr);
8720 
8721 	return ret;
8722 }
8723 
8724 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8725 			    const unsigned char *addr)
8726 {
8727 	struct hclge_vport *vport = hclge_get_vport(handle);
8728 
8729 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8730 				     addr);
8731 }
8732 
8733 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8734 			    const unsigned char *addr)
8735 {
8736 	struct hclge_dev *hdev = vport->back;
8737 	struct hclge_mac_vlan_tbl_entry_cmd req;
8738 	int ret;
8739 
8740 	/* mac addr check */
8741 	if (is_zero_ether_addr(addr) ||
8742 	    is_broadcast_ether_addr(addr) ||
8743 	    is_multicast_ether_addr(addr)) {
8744 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8745 			addr);
8746 		return -EINVAL;
8747 	}
8748 
8749 	memset(&req, 0, sizeof(req));
8750 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8751 	hclge_prepare_mac_addr(&req, addr, false);
8752 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
8753 	if (!ret) {
8754 		mutex_lock(&hdev->vport_lock);
8755 		hclge_update_umv_space(vport, true);
8756 		mutex_unlock(&hdev->vport_lock);
8757 	} else if (ret == -ENOENT) {
8758 		ret = 0;
8759 	}
8760 
8761 	return ret;
8762 }
8763 
8764 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8765 			     const unsigned char *addr)
8766 {
8767 	struct hclge_vport *vport = hclge_get_vport(handle);
8768 
8769 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8770 				     addr);
8771 }
8772 
8773 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8774 			     const unsigned char *addr)
8775 {
8776 	struct hclge_dev *hdev = vport->back;
8777 	struct hclge_mac_vlan_tbl_entry_cmd req;
8778 	struct hclge_desc desc[3];
8779 	int status;
8780 
8781 	/* mac addr check */
8782 	if (!is_multicast_ether_addr(addr)) {
8783 		dev_err(&hdev->pdev->dev,
8784 			"Add mc mac err! invalid mac:%pM.\n",
8785 			 addr);
8786 		return -EINVAL;
8787 	}
8788 	memset(&req, 0, sizeof(req));
8789 	hclge_prepare_mac_addr(&req, addr, true);
8790 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8791 	if (status) {
8792 		/* This mac addr do not exist, add new entry for it */
8793 		memset(desc[0].data, 0, sizeof(desc[0].data));
8794 		memset(desc[1].data, 0, sizeof(desc[0].data));
8795 		memset(desc[2].data, 0, sizeof(desc[0].data));
8796 	}
8797 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8798 	if (status)
8799 		return status;
8800 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8801 	/* if already overflow, not to print each time */
8802 	if (status == -ENOSPC &&
8803 	    !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8804 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8805 
8806 	return status;
8807 }
8808 
8809 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8810 			    const unsigned char *addr)
8811 {
8812 	struct hclge_vport *vport = hclge_get_vport(handle);
8813 
8814 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8815 				     addr);
8816 }
8817 
8818 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8819 			    const unsigned char *addr)
8820 {
8821 	struct hclge_dev *hdev = vport->back;
8822 	struct hclge_mac_vlan_tbl_entry_cmd req;
8823 	enum hclge_cmd_status status;
8824 	struct hclge_desc desc[3];
8825 
8826 	/* mac addr check */
8827 	if (!is_multicast_ether_addr(addr)) {
8828 		dev_dbg(&hdev->pdev->dev,
8829 			"Remove mc mac err! invalid mac:%pM.\n",
8830 			 addr);
8831 		return -EINVAL;
8832 	}
8833 
8834 	memset(&req, 0, sizeof(req));
8835 	hclge_prepare_mac_addr(&req, addr, true);
8836 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8837 	if (!status) {
8838 		/* This mac addr exist, remove this handle's VFID for it */
8839 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8840 		if (status)
8841 			return status;
8842 
8843 		if (hclge_is_all_function_id_zero(desc))
8844 			/* All the vfid is zero, so need to delete this entry */
8845 			status = hclge_remove_mac_vlan_tbl(vport, &req);
8846 		else
8847 			/* Not all the vfid is zero, update the vfid */
8848 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8849 	} else if (status == -ENOENT) {
8850 		status = 0;
8851 	}
8852 
8853 	return status;
8854 }
8855 
8856 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8857 				      struct list_head *list,
8858 				      int (*sync)(struct hclge_vport *,
8859 						  const unsigned char *))
8860 {
8861 	struct hclge_mac_node *mac_node, *tmp;
8862 	int ret;
8863 
8864 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8865 		ret = sync(vport, mac_node->mac_addr);
8866 		if (!ret) {
8867 			mac_node->state = HCLGE_MAC_ACTIVE;
8868 		} else {
8869 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8870 				&vport->state);
8871 			break;
8872 		}
8873 	}
8874 }
8875 
8876 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8877 					struct list_head *list,
8878 					int (*unsync)(struct hclge_vport *,
8879 						      const unsigned char *))
8880 {
8881 	struct hclge_mac_node *mac_node, *tmp;
8882 	int ret;
8883 
8884 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8885 		ret = unsync(vport, mac_node->mac_addr);
8886 		if (!ret || ret == -ENOENT) {
8887 			list_del(&mac_node->node);
8888 			kfree(mac_node);
8889 		} else {
8890 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8891 				&vport->state);
8892 			break;
8893 		}
8894 	}
8895 }
8896 
8897 static bool hclge_sync_from_add_list(struct list_head *add_list,
8898 				     struct list_head *mac_list)
8899 {
8900 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8901 	bool all_added = true;
8902 
8903 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8904 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8905 			all_added = false;
8906 
8907 		/* if the mac address from tmp_add_list is not in the
8908 		 * uc/mc_mac_list, it means have received a TO_DEL request
8909 		 * during the time window of adding the mac address into mac
8910 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8911 		 * then it will be removed at next time. else it must be TO_ADD,
8912 		 * this address hasn't been added into mac table,
8913 		 * so just remove the mac node.
8914 		 */
8915 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8916 		if (new_node) {
8917 			hclge_update_mac_node(new_node, mac_node->state);
8918 			list_del(&mac_node->node);
8919 			kfree(mac_node);
8920 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8921 			mac_node->state = HCLGE_MAC_TO_DEL;
8922 			list_move_tail(&mac_node->node, mac_list);
8923 		} else {
8924 			list_del(&mac_node->node);
8925 			kfree(mac_node);
8926 		}
8927 	}
8928 
8929 	return all_added;
8930 }
8931 
8932 static void hclge_sync_from_del_list(struct list_head *del_list,
8933 				     struct list_head *mac_list)
8934 {
8935 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8936 
8937 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8938 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8939 		if (new_node) {
8940 			/* If the mac addr exists in the mac list, it means
8941 			 * received a new TO_ADD request during the time window
8942 			 * of configuring the mac address. For the mac node
8943 			 * state is TO_ADD, and the address is already in the
8944 			 * in the hardware(due to delete fail), so we just need
8945 			 * to change the mac node state to ACTIVE.
8946 			 */
8947 			new_node->state = HCLGE_MAC_ACTIVE;
8948 			list_del(&mac_node->node);
8949 			kfree(mac_node);
8950 		} else {
8951 			list_move_tail(&mac_node->node, mac_list);
8952 		}
8953 	}
8954 }
8955 
8956 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8957 					enum HCLGE_MAC_ADDR_TYPE mac_type,
8958 					bool is_all_added)
8959 {
8960 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8961 		if (is_all_added)
8962 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8963 		else
8964 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8965 	} else {
8966 		if (is_all_added)
8967 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8968 		else
8969 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8970 	}
8971 }
8972 
8973 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8974 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
8975 {
8976 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8977 	struct list_head tmp_add_list, tmp_del_list;
8978 	struct list_head *list;
8979 	bool all_added;
8980 
8981 	INIT_LIST_HEAD(&tmp_add_list);
8982 	INIT_LIST_HEAD(&tmp_del_list);
8983 
8984 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
8985 	 * we can add/delete these mac addr outside the spin lock
8986 	 */
8987 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8988 		&vport->uc_mac_list : &vport->mc_mac_list;
8989 
8990 	spin_lock_bh(&vport->mac_list_lock);
8991 
8992 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8993 		switch (mac_node->state) {
8994 		case HCLGE_MAC_TO_DEL:
8995 			list_move_tail(&mac_node->node, &tmp_del_list);
8996 			break;
8997 		case HCLGE_MAC_TO_ADD:
8998 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8999 			if (!new_node)
9000 				goto stop_traverse;
9001 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
9002 			new_node->state = mac_node->state;
9003 			list_add_tail(&new_node->node, &tmp_add_list);
9004 			break;
9005 		default:
9006 			break;
9007 		}
9008 	}
9009 
9010 stop_traverse:
9011 	spin_unlock_bh(&vport->mac_list_lock);
9012 
9013 	/* delete first, in order to get max mac table space for adding */
9014 	if (mac_type == HCLGE_MAC_ADDR_UC) {
9015 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9016 					    hclge_rm_uc_addr_common);
9017 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
9018 					  hclge_add_uc_addr_common);
9019 	} else {
9020 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9021 					    hclge_rm_mc_addr_common);
9022 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
9023 					  hclge_add_mc_addr_common);
9024 	}
9025 
9026 	/* if some mac addresses were added/deleted fail, move back to the
9027 	 * mac_list, and retry at next time.
9028 	 */
9029 	spin_lock_bh(&vport->mac_list_lock);
9030 
9031 	hclge_sync_from_del_list(&tmp_del_list, list);
9032 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
9033 
9034 	spin_unlock_bh(&vport->mac_list_lock);
9035 
9036 	hclge_update_overflow_flags(vport, mac_type, all_added);
9037 }
9038 
9039 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
9040 {
9041 	struct hclge_dev *hdev = vport->back;
9042 
9043 	if (test_bit(vport->vport_id, hdev->vport_config_block))
9044 		return false;
9045 
9046 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9047 		return true;
9048 
9049 	return false;
9050 }
9051 
9052 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9053 {
9054 	int i;
9055 
9056 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9057 		struct hclge_vport *vport = &hdev->vport[i];
9058 
9059 		if (!hclge_need_sync_mac_table(vport))
9060 			continue;
9061 
9062 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9063 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9064 	}
9065 }
9066 
9067 static void hclge_build_del_list(struct list_head *list,
9068 				 bool is_del_list,
9069 				 struct list_head *tmp_del_list)
9070 {
9071 	struct hclge_mac_node *mac_cfg, *tmp;
9072 
9073 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9074 		switch (mac_cfg->state) {
9075 		case HCLGE_MAC_TO_DEL:
9076 		case HCLGE_MAC_ACTIVE:
9077 			list_move_tail(&mac_cfg->node, tmp_del_list);
9078 			break;
9079 		case HCLGE_MAC_TO_ADD:
9080 			if (is_del_list) {
9081 				list_del(&mac_cfg->node);
9082 				kfree(mac_cfg);
9083 			}
9084 			break;
9085 		}
9086 	}
9087 }
9088 
9089 static void hclge_unsync_del_list(struct hclge_vport *vport,
9090 				  int (*unsync)(struct hclge_vport *vport,
9091 						const unsigned char *addr),
9092 				  bool is_del_list,
9093 				  struct list_head *tmp_del_list)
9094 {
9095 	struct hclge_mac_node *mac_cfg, *tmp;
9096 	int ret;
9097 
9098 	list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9099 		ret = unsync(vport, mac_cfg->mac_addr);
9100 		if (!ret || ret == -ENOENT) {
9101 			/* clear all mac addr from hardware, but remain these
9102 			 * mac addr in the mac list, and restore them after
9103 			 * vf reset finished.
9104 			 */
9105 			if (!is_del_list &&
9106 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
9107 				mac_cfg->state = HCLGE_MAC_TO_ADD;
9108 			} else {
9109 				list_del(&mac_cfg->node);
9110 				kfree(mac_cfg);
9111 			}
9112 		} else if (is_del_list) {
9113 			mac_cfg->state = HCLGE_MAC_TO_DEL;
9114 		}
9115 	}
9116 }
9117 
9118 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9119 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
9120 {
9121 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9122 	struct hclge_dev *hdev = vport->back;
9123 	struct list_head tmp_del_list, *list;
9124 
9125 	if (mac_type == HCLGE_MAC_ADDR_UC) {
9126 		list = &vport->uc_mac_list;
9127 		unsync = hclge_rm_uc_addr_common;
9128 	} else {
9129 		list = &vport->mc_mac_list;
9130 		unsync = hclge_rm_mc_addr_common;
9131 	}
9132 
9133 	INIT_LIST_HEAD(&tmp_del_list);
9134 
9135 	if (!is_del_list)
9136 		set_bit(vport->vport_id, hdev->vport_config_block);
9137 
9138 	spin_lock_bh(&vport->mac_list_lock);
9139 
9140 	hclge_build_del_list(list, is_del_list, &tmp_del_list);
9141 
9142 	spin_unlock_bh(&vport->mac_list_lock);
9143 
9144 	hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9145 
9146 	spin_lock_bh(&vport->mac_list_lock);
9147 
9148 	hclge_sync_from_del_list(&tmp_del_list, list);
9149 
9150 	spin_unlock_bh(&vport->mac_list_lock);
9151 }
9152 
9153 /* remove all mac address when uninitailize */
9154 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9155 					enum HCLGE_MAC_ADDR_TYPE mac_type)
9156 {
9157 	struct hclge_mac_node *mac_node, *tmp;
9158 	struct hclge_dev *hdev = vport->back;
9159 	struct list_head tmp_del_list, *list;
9160 
9161 	INIT_LIST_HEAD(&tmp_del_list);
9162 
9163 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9164 		&vport->uc_mac_list : &vport->mc_mac_list;
9165 
9166 	spin_lock_bh(&vport->mac_list_lock);
9167 
9168 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9169 		switch (mac_node->state) {
9170 		case HCLGE_MAC_TO_DEL:
9171 		case HCLGE_MAC_ACTIVE:
9172 			list_move_tail(&mac_node->node, &tmp_del_list);
9173 			break;
9174 		case HCLGE_MAC_TO_ADD:
9175 			list_del(&mac_node->node);
9176 			kfree(mac_node);
9177 			break;
9178 		}
9179 	}
9180 
9181 	spin_unlock_bh(&vport->mac_list_lock);
9182 
9183 	if (mac_type == HCLGE_MAC_ADDR_UC)
9184 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9185 					    hclge_rm_uc_addr_common);
9186 	else
9187 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9188 					    hclge_rm_mc_addr_common);
9189 
9190 	if (!list_empty(&tmp_del_list))
9191 		dev_warn(&hdev->pdev->dev,
9192 			 "uninit %s mac list for vport %u not completely.\n",
9193 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9194 			 vport->vport_id);
9195 
9196 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9197 		list_del(&mac_node->node);
9198 		kfree(mac_node);
9199 	}
9200 }
9201 
9202 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9203 {
9204 	struct hclge_vport *vport;
9205 	int i;
9206 
9207 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9208 		vport = &hdev->vport[i];
9209 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9210 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9211 	}
9212 }
9213 
9214 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9215 					      u16 cmdq_resp, u8 resp_code)
9216 {
9217 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
9218 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
9219 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
9220 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
9221 
9222 	int return_status;
9223 
9224 	if (cmdq_resp) {
9225 		dev_err(&hdev->pdev->dev,
9226 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9227 			cmdq_resp);
9228 		return -EIO;
9229 	}
9230 
9231 	switch (resp_code) {
9232 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
9233 	case HCLGE_ETHERTYPE_ALREADY_ADD:
9234 		return_status = 0;
9235 		break;
9236 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9237 		dev_err(&hdev->pdev->dev,
9238 			"add mac ethertype failed for manager table overflow.\n");
9239 		return_status = -EIO;
9240 		break;
9241 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
9242 		dev_err(&hdev->pdev->dev,
9243 			"add mac ethertype failed for key conflict.\n");
9244 		return_status = -EIO;
9245 		break;
9246 	default:
9247 		dev_err(&hdev->pdev->dev,
9248 			"add mac ethertype failed for undefined, code=%u.\n",
9249 			resp_code);
9250 		return_status = -EIO;
9251 	}
9252 
9253 	return return_status;
9254 }
9255 
9256 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9257 				     u8 *mac_addr)
9258 {
9259 	struct hclge_mac_vlan_tbl_entry_cmd req;
9260 	struct hclge_dev *hdev = vport->back;
9261 	struct hclge_desc desc;
9262 	u16 egress_port = 0;
9263 	int i;
9264 
9265 	if (is_zero_ether_addr(mac_addr))
9266 		return false;
9267 
9268 	memset(&req, 0, sizeof(req));
9269 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9270 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9271 	req.egress_port = cpu_to_le16(egress_port);
9272 	hclge_prepare_mac_addr(&req, mac_addr, false);
9273 
9274 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9275 		return true;
9276 
9277 	vf_idx += HCLGE_VF_VPORT_START_NUM;
9278 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9279 		if (i != vf_idx &&
9280 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9281 			return true;
9282 
9283 	return false;
9284 }
9285 
9286 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9287 			    u8 *mac_addr)
9288 {
9289 	struct hclge_vport *vport = hclge_get_vport(handle);
9290 	struct hclge_dev *hdev = vport->back;
9291 
9292 	vport = hclge_get_vf_vport(hdev, vf);
9293 	if (!vport)
9294 		return -EINVAL;
9295 
9296 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9297 		dev_info(&hdev->pdev->dev,
9298 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
9299 			 mac_addr);
9300 		return 0;
9301 	}
9302 
9303 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9304 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9305 			mac_addr);
9306 		return -EEXIST;
9307 	}
9308 
9309 	ether_addr_copy(vport->vf_info.mac, mac_addr);
9310 
9311 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9312 		dev_info(&hdev->pdev->dev,
9313 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9314 			 vf, mac_addr);
9315 		return hclge_inform_reset_assert_to_vf(vport);
9316 	}
9317 
9318 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9319 		 vf, mac_addr);
9320 	return 0;
9321 }
9322 
9323 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9324 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
9325 {
9326 	struct hclge_desc desc;
9327 	u8 resp_code;
9328 	u16 retval;
9329 	int ret;
9330 
9331 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9332 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9333 
9334 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9335 	if (ret) {
9336 		dev_err(&hdev->pdev->dev,
9337 			"add mac ethertype failed for cmd_send, ret =%d.\n",
9338 			ret);
9339 		return ret;
9340 	}
9341 
9342 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9343 	retval = le16_to_cpu(desc.retval);
9344 
9345 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9346 }
9347 
9348 static int init_mgr_tbl(struct hclge_dev *hdev)
9349 {
9350 	int ret;
9351 	int i;
9352 
9353 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9354 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9355 		if (ret) {
9356 			dev_err(&hdev->pdev->dev,
9357 				"add mac ethertype failed, ret =%d.\n",
9358 				ret);
9359 			return ret;
9360 		}
9361 	}
9362 
9363 	return 0;
9364 }
9365 
9366 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9367 {
9368 	struct hclge_vport *vport = hclge_get_vport(handle);
9369 	struct hclge_dev *hdev = vport->back;
9370 
9371 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
9372 }
9373 
9374 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9375 				       const u8 *old_addr, const u8 *new_addr)
9376 {
9377 	struct list_head *list = &vport->uc_mac_list;
9378 	struct hclge_mac_node *old_node, *new_node;
9379 
9380 	new_node = hclge_find_mac_node(list, new_addr);
9381 	if (!new_node) {
9382 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9383 		if (!new_node)
9384 			return -ENOMEM;
9385 
9386 		new_node->state = HCLGE_MAC_TO_ADD;
9387 		ether_addr_copy(new_node->mac_addr, new_addr);
9388 		list_add(&new_node->node, list);
9389 	} else {
9390 		if (new_node->state == HCLGE_MAC_TO_DEL)
9391 			new_node->state = HCLGE_MAC_ACTIVE;
9392 
9393 		/* make sure the new addr is in the list head, avoid dev
9394 		 * addr may be not re-added into mac table for the umv space
9395 		 * limitation after global/imp reset which will clear mac
9396 		 * table by hardware.
9397 		 */
9398 		list_move(&new_node->node, list);
9399 	}
9400 
9401 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9402 		old_node = hclge_find_mac_node(list, old_addr);
9403 		if (old_node) {
9404 			if (old_node->state == HCLGE_MAC_TO_ADD) {
9405 				list_del(&old_node->node);
9406 				kfree(old_node);
9407 			} else {
9408 				old_node->state = HCLGE_MAC_TO_DEL;
9409 			}
9410 		}
9411 	}
9412 
9413 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9414 
9415 	return 0;
9416 }
9417 
9418 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9419 			      bool is_first)
9420 {
9421 	const unsigned char *new_addr = (const unsigned char *)p;
9422 	struct hclge_vport *vport = hclge_get_vport(handle);
9423 	struct hclge_dev *hdev = vport->back;
9424 	unsigned char *old_addr = NULL;
9425 	int ret;
9426 
9427 	/* mac addr check */
9428 	if (is_zero_ether_addr(new_addr) ||
9429 	    is_broadcast_ether_addr(new_addr) ||
9430 	    is_multicast_ether_addr(new_addr)) {
9431 		dev_err(&hdev->pdev->dev,
9432 			"change uc mac err! invalid mac: %pM.\n",
9433 			 new_addr);
9434 		return -EINVAL;
9435 	}
9436 
9437 	ret = hclge_pause_addr_cfg(hdev, new_addr);
9438 	if (ret) {
9439 		dev_err(&hdev->pdev->dev,
9440 			"failed to configure mac pause address, ret = %d\n",
9441 			ret);
9442 		return ret;
9443 	}
9444 
9445 	if (!is_first)
9446 		old_addr = hdev->hw.mac.mac_addr;
9447 
9448 	spin_lock_bh(&vport->mac_list_lock);
9449 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9450 	if (ret) {
9451 		dev_err(&hdev->pdev->dev,
9452 			"failed to change the mac addr:%pM, ret = %d\n",
9453 			new_addr, ret);
9454 		spin_unlock_bh(&vport->mac_list_lock);
9455 
9456 		if (!is_first)
9457 			hclge_pause_addr_cfg(hdev, old_addr);
9458 
9459 		return ret;
9460 	}
9461 	/* we must update dev addr with spin lock protect, preventing dev addr
9462 	 * being removed by set_rx_mode path.
9463 	 */
9464 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9465 	spin_unlock_bh(&vport->mac_list_lock);
9466 
9467 	hclge_task_schedule(hdev, 0);
9468 
9469 	return 0;
9470 }
9471 
9472 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9473 {
9474 	struct mii_ioctl_data *data = if_mii(ifr);
9475 
9476 	if (!hnae3_dev_phy_imp_supported(hdev))
9477 		return -EOPNOTSUPP;
9478 
9479 	switch (cmd) {
9480 	case SIOCGMIIPHY:
9481 		data->phy_id = hdev->hw.mac.phy_addr;
9482 		/* this command reads phy id and register at the same time */
9483 		fallthrough;
9484 	case SIOCGMIIREG:
9485 		data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9486 		return 0;
9487 
9488 	case SIOCSMIIREG:
9489 		return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9490 	default:
9491 		return -EOPNOTSUPP;
9492 	}
9493 }
9494 
9495 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9496 			  int cmd)
9497 {
9498 	struct hclge_vport *vport = hclge_get_vport(handle);
9499 	struct hclge_dev *hdev = vport->back;
9500 
9501 	switch (cmd) {
9502 	case SIOCGHWTSTAMP:
9503 		return hclge_ptp_get_cfg(hdev, ifr);
9504 	case SIOCSHWTSTAMP:
9505 		return hclge_ptp_set_cfg(hdev, ifr);
9506 	default:
9507 		if (!hdev->hw.mac.phydev)
9508 			return hclge_mii_ioctl(hdev, ifr, cmd);
9509 	}
9510 
9511 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9512 }
9513 
9514 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9515 					     bool bypass_en)
9516 {
9517 	struct hclge_port_vlan_filter_bypass_cmd *req;
9518 	struct hclge_desc desc;
9519 	int ret;
9520 
9521 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9522 	req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9523 	req->vf_id = vf_id;
9524 	hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9525 		      bypass_en ? 1 : 0);
9526 
9527 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9528 	if (ret)
9529 		dev_err(&hdev->pdev->dev,
9530 			"failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9531 			vf_id, ret);
9532 
9533 	return ret;
9534 }
9535 
9536 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9537 				      u8 fe_type, bool filter_en, u8 vf_id)
9538 {
9539 	struct hclge_vlan_filter_ctrl_cmd *req;
9540 	struct hclge_desc desc;
9541 	int ret;
9542 
9543 	/* read current vlan filter parameter */
9544 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9545 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9546 	req->vlan_type = vlan_type;
9547 	req->vf_id = vf_id;
9548 
9549 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9550 	if (ret) {
9551 		dev_err(&hdev->pdev->dev,
9552 			"failed to get vlan filter config, ret = %d.\n", ret);
9553 		return ret;
9554 	}
9555 
9556 	/* modify and write new config parameter */
9557 	hclge_cmd_reuse_desc(&desc, false);
9558 	req->vlan_fe = filter_en ?
9559 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9560 
9561 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9562 	if (ret)
9563 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9564 			ret);
9565 
9566 	return ret;
9567 }
9568 
9569 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9570 {
9571 	struct hclge_dev *hdev = vport->back;
9572 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9573 	int ret;
9574 
9575 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9576 		return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9577 						  HCLGE_FILTER_FE_EGRESS_V1_B,
9578 						  enable, vport->vport_id);
9579 
9580 	ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9581 					 HCLGE_FILTER_FE_EGRESS, enable,
9582 					 vport->vport_id);
9583 	if (ret)
9584 		return ret;
9585 
9586 	if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9587 		ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9588 							!enable);
9589 	} else if (!vport->vport_id) {
9590 		if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9591 			enable = false;
9592 
9593 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9594 						 HCLGE_FILTER_FE_INGRESS,
9595 						 enable, 0);
9596 	}
9597 
9598 	return ret;
9599 }
9600 
9601 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9602 {
9603 	struct hnae3_handle *handle = &vport->nic;
9604 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9605 	struct hclge_dev *hdev = vport->back;
9606 
9607 	if (vport->vport_id) {
9608 		if (vport->port_base_vlan_cfg.state !=
9609 			HNAE3_PORT_BASE_VLAN_DISABLE)
9610 			return true;
9611 
9612 		if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9613 			return false;
9614 	} else if (handle->netdev_flags & HNAE3_USER_UPE) {
9615 		return false;
9616 	}
9617 
9618 	if (!vport->req_vlan_fltr_en)
9619 		return false;
9620 
9621 	/* compatible with former device, always enable vlan filter */
9622 	if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9623 		return true;
9624 
9625 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9626 		if (vlan->vlan_id != 0)
9627 			return true;
9628 
9629 	return false;
9630 }
9631 
9632 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9633 {
9634 	struct hclge_dev *hdev = vport->back;
9635 	bool need_en;
9636 	int ret;
9637 
9638 	mutex_lock(&hdev->vport_lock);
9639 
9640 	vport->req_vlan_fltr_en = request_en;
9641 
9642 	need_en = hclge_need_enable_vport_vlan_filter(vport);
9643 	if (need_en == vport->cur_vlan_fltr_en) {
9644 		mutex_unlock(&hdev->vport_lock);
9645 		return 0;
9646 	}
9647 
9648 	ret = hclge_set_vport_vlan_filter(vport, need_en);
9649 	if (ret) {
9650 		mutex_unlock(&hdev->vport_lock);
9651 		return ret;
9652 	}
9653 
9654 	vport->cur_vlan_fltr_en = need_en;
9655 
9656 	mutex_unlock(&hdev->vport_lock);
9657 
9658 	return 0;
9659 }
9660 
9661 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9662 {
9663 	struct hclge_vport *vport = hclge_get_vport(handle);
9664 
9665 	return hclge_enable_vport_vlan_filter(vport, enable);
9666 }
9667 
9668 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9669 					bool is_kill, u16 vlan,
9670 					struct hclge_desc *desc)
9671 {
9672 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
9673 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
9674 	u8 vf_byte_val;
9675 	u8 vf_byte_off;
9676 	int ret;
9677 
9678 	hclge_cmd_setup_basic_desc(&desc[0],
9679 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9680 	hclge_cmd_setup_basic_desc(&desc[1],
9681 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9682 
9683 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9684 
9685 	vf_byte_off = vfid / 8;
9686 	vf_byte_val = 1 << (vfid % 8);
9687 
9688 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9689 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9690 
9691 	req0->vlan_id  = cpu_to_le16(vlan);
9692 	req0->vlan_cfg = is_kill;
9693 
9694 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9695 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9696 	else
9697 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9698 
9699 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
9700 	if (ret) {
9701 		dev_err(&hdev->pdev->dev,
9702 			"Send vf vlan command fail, ret =%d.\n",
9703 			ret);
9704 		return ret;
9705 	}
9706 
9707 	return 0;
9708 }
9709 
9710 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9711 					  bool is_kill, struct hclge_desc *desc)
9712 {
9713 	struct hclge_vlan_filter_vf_cfg_cmd *req;
9714 
9715 	req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9716 
9717 	if (!is_kill) {
9718 #define HCLGE_VF_VLAN_NO_ENTRY	2
9719 		if (!req->resp_code || req->resp_code == 1)
9720 			return 0;
9721 
9722 		if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9723 			set_bit(vfid, hdev->vf_vlan_full);
9724 			dev_warn(&hdev->pdev->dev,
9725 				 "vf vlan table is full, vf vlan filter is disabled\n");
9726 			return 0;
9727 		}
9728 
9729 		dev_err(&hdev->pdev->dev,
9730 			"Add vf vlan filter fail, ret =%u.\n",
9731 			req->resp_code);
9732 	} else {
9733 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
9734 		if (!req->resp_code)
9735 			return 0;
9736 
9737 		/* vf vlan filter is disabled when vf vlan table is full,
9738 		 * then new vlan id will not be added into vf vlan table.
9739 		 * Just return 0 without warning, avoid massive verbose
9740 		 * print logs when unload.
9741 		 */
9742 		if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9743 			return 0;
9744 
9745 		dev_err(&hdev->pdev->dev,
9746 			"Kill vf vlan filter fail, ret =%u.\n",
9747 			req->resp_code);
9748 	}
9749 
9750 	return -EIO;
9751 }
9752 
9753 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9754 				    bool is_kill, u16 vlan)
9755 {
9756 	struct hclge_vport *vport = &hdev->vport[vfid];
9757 	struct hclge_desc desc[2];
9758 	int ret;
9759 
9760 	/* if vf vlan table is full, firmware will close vf vlan filter, it
9761 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
9762 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
9763 	 * new vlan, because tx packets with these vlan id will be dropped.
9764 	 */
9765 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9766 		if (vport->vf_info.spoofchk && vlan) {
9767 			dev_err(&hdev->pdev->dev,
9768 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
9769 			return -EPERM;
9770 		}
9771 		return 0;
9772 	}
9773 
9774 	ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9775 	if (ret)
9776 		return ret;
9777 
9778 	return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9779 }
9780 
9781 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9782 				      u16 vlan_id, bool is_kill)
9783 {
9784 	struct hclge_vlan_filter_pf_cfg_cmd *req;
9785 	struct hclge_desc desc;
9786 	u8 vlan_offset_byte_val;
9787 	u8 vlan_offset_byte;
9788 	u8 vlan_offset_160;
9789 	int ret;
9790 
9791 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9792 
9793 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9794 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9795 			   HCLGE_VLAN_BYTE_SIZE;
9796 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9797 
9798 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9799 	req->vlan_offset = vlan_offset_160;
9800 	req->vlan_cfg = is_kill;
9801 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9802 
9803 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9804 	if (ret)
9805 		dev_err(&hdev->pdev->dev,
9806 			"port vlan command, send fail, ret =%d.\n", ret);
9807 	return ret;
9808 }
9809 
9810 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9811 				    u16 vport_id, u16 vlan_id,
9812 				    bool is_kill)
9813 {
9814 	u16 vport_idx, vport_num = 0;
9815 	int ret;
9816 
9817 	if (is_kill && !vlan_id)
9818 		return 0;
9819 
9820 	if (vlan_id >= VLAN_N_VID)
9821 		return -EINVAL;
9822 
9823 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9824 	if (ret) {
9825 		dev_err(&hdev->pdev->dev,
9826 			"Set %u vport vlan filter config fail, ret =%d.\n",
9827 			vport_id, ret);
9828 		return ret;
9829 	}
9830 
9831 	/* vlan 0 may be added twice when 8021q module is enabled */
9832 	if (!is_kill && !vlan_id &&
9833 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
9834 		return 0;
9835 
9836 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9837 		dev_err(&hdev->pdev->dev,
9838 			"Add port vlan failed, vport %u is already in vlan %u\n",
9839 			vport_id, vlan_id);
9840 		return -EINVAL;
9841 	}
9842 
9843 	if (is_kill &&
9844 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9845 		dev_err(&hdev->pdev->dev,
9846 			"Delete port vlan failed, vport %u is not in vlan %u\n",
9847 			vport_id, vlan_id);
9848 		return -EINVAL;
9849 	}
9850 
9851 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9852 		vport_num++;
9853 
9854 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9855 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9856 						 is_kill);
9857 
9858 	return ret;
9859 }
9860 
9861 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9862 {
9863 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9864 	struct hclge_vport_vtag_tx_cfg_cmd *req;
9865 	struct hclge_dev *hdev = vport->back;
9866 	struct hclge_desc desc;
9867 	u16 bmap_index;
9868 	int status;
9869 
9870 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9871 
9872 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9873 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9874 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9875 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9876 		      vcfg->accept_tag1 ? 1 : 0);
9877 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9878 		      vcfg->accept_untag1 ? 1 : 0);
9879 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9880 		      vcfg->accept_tag2 ? 1 : 0);
9881 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9882 		      vcfg->accept_untag2 ? 1 : 0);
9883 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9884 		      vcfg->insert_tag1_en ? 1 : 0);
9885 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9886 		      vcfg->insert_tag2_en ? 1 : 0);
9887 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9888 		      vcfg->tag_shift_mode_en ? 1 : 0);
9889 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9890 
9891 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9892 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9893 			HCLGE_VF_NUM_PER_BYTE;
9894 	req->vf_bitmap[bmap_index] =
9895 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9896 
9897 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9898 	if (status)
9899 		dev_err(&hdev->pdev->dev,
9900 			"Send port txvlan cfg command fail, ret =%d\n",
9901 			status);
9902 
9903 	return status;
9904 }
9905 
9906 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9907 {
9908 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9909 	struct hclge_vport_vtag_rx_cfg_cmd *req;
9910 	struct hclge_dev *hdev = vport->back;
9911 	struct hclge_desc desc;
9912 	u16 bmap_index;
9913 	int status;
9914 
9915 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9916 
9917 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9918 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9919 		      vcfg->strip_tag1_en ? 1 : 0);
9920 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9921 		      vcfg->strip_tag2_en ? 1 : 0);
9922 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9923 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
9924 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9925 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
9926 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9927 		      vcfg->strip_tag1_discard_en ? 1 : 0);
9928 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9929 		      vcfg->strip_tag2_discard_en ? 1 : 0);
9930 
9931 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9932 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9933 			HCLGE_VF_NUM_PER_BYTE;
9934 	req->vf_bitmap[bmap_index] =
9935 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9936 
9937 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9938 	if (status)
9939 		dev_err(&hdev->pdev->dev,
9940 			"Send port rxvlan cfg command fail, ret =%d\n",
9941 			status);
9942 
9943 	return status;
9944 }
9945 
9946 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9947 				  u16 port_base_vlan_state,
9948 				  u16 vlan_tag, u8 qos)
9949 {
9950 	int ret;
9951 
9952 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9953 		vport->txvlan_cfg.accept_tag1 = true;
9954 		vport->txvlan_cfg.insert_tag1_en = false;
9955 		vport->txvlan_cfg.default_tag1 = 0;
9956 	} else {
9957 		struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9958 
9959 		vport->txvlan_cfg.accept_tag1 =
9960 			ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9961 		vport->txvlan_cfg.insert_tag1_en = true;
9962 		vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9963 						 vlan_tag;
9964 	}
9965 
9966 	vport->txvlan_cfg.accept_untag1 = true;
9967 
9968 	/* accept_tag2 and accept_untag2 are not supported on
9969 	 * pdev revision(0x20), new revision support them,
9970 	 * this two fields can not be configured by user.
9971 	 */
9972 	vport->txvlan_cfg.accept_tag2 = true;
9973 	vport->txvlan_cfg.accept_untag2 = true;
9974 	vport->txvlan_cfg.insert_tag2_en = false;
9975 	vport->txvlan_cfg.default_tag2 = 0;
9976 	vport->txvlan_cfg.tag_shift_mode_en = true;
9977 
9978 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9979 		vport->rxvlan_cfg.strip_tag1_en = false;
9980 		vport->rxvlan_cfg.strip_tag2_en =
9981 				vport->rxvlan_cfg.rx_vlan_offload_en;
9982 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
9983 	} else {
9984 		vport->rxvlan_cfg.strip_tag1_en =
9985 				vport->rxvlan_cfg.rx_vlan_offload_en;
9986 		vport->rxvlan_cfg.strip_tag2_en = true;
9987 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
9988 	}
9989 
9990 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
9991 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9992 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9993 
9994 	ret = hclge_set_vlan_tx_offload_cfg(vport);
9995 	if (ret)
9996 		return ret;
9997 
9998 	return hclge_set_vlan_rx_offload_cfg(vport);
9999 }
10000 
10001 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
10002 {
10003 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
10004 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
10005 	struct hclge_desc desc;
10006 	int status;
10007 
10008 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
10009 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
10010 	rx_req->ot_fst_vlan_type =
10011 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
10012 	rx_req->ot_sec_vlan_type =
10013 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
10014 	rx_req->in_fst_vlan_type =
10015 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
10016 	rx_req->in_sec_vlan_type =
10017 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
10018 
10019 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
10020 	if (status) {
10021 		dev_err(&hdev->pdev->dev,
10022 			"Send rxvlan protocol type command fail, ret =%d\n",
10023 			status);
10024 		return status;
10025 	}
10026 
10027 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
10028 
10029 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
10030 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
10031 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
10032 
10033 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
10034 	if (status)
10035 		dev_err(&hdev->pdev->dev,
10036 			"Send txvlan protocol type command fail, ret =%d\n",
10037 			status);
10038 
10039 	return status;
10040 }
10041 
10042 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10043 {
10044 #define HCLGE_DEF_VLAN_TYPE		0x8100
10045 
10046 	struct hnae3_handle *handle = &hdev->vport[0].nic;
10047 	struct hclge_vport *vport;
10048 	int ret;
10049 	int i;
10050 
10051 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
10052 		/* for revision 0x21, vf vlan filter is per function */
10053 		for (i = 0; i < hdev->num_alloc_vport; i++) {
10054 			vport = &hdev->vport[i];
10055 			ret = hclge_set_vlan_filter_ctrl(hdev,
10056 							 HCLGE_FILTER_TYPE_VF,
10057 							 HCLGE_FILTER_FE_EGRESS,
10058 							 true,
10059 							 vport->vport_id);
10060 			if (ret)
10061 				return ret;
10062 			vport->cur_vlan_fltr_en = true;
10063 		}
10064 
10065 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10066 						 HCLGE_FILTER_FE_INGRESS, true,
10067 						 0);
10068 		if (ret)
10069 			return ret;
10070 	} else {
10071 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10072 						 HCLGE_FILTER_FE_EGRESS_V1_B,
10073 						 true, 0);
10074 		if (ret)
10075 			return ret;
10076 	}
10077 
10078 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10079 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10080 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10081 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10082 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
10083 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
10084 
10085 	ret = hclge_set_vlan_protocol_type(hdev);
10086 	if (ret)
10087 		return ret;
10088 
10089 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10090 		u16 vlan_tag;
10091 		u8 qos;
10092 
10093 		vport = &hdev->vport[i];
10094 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10095 		qos = vport->port_base_vlan_cfg.vlan_info.qos;
10096 
10097 		ret = hclge_vlan_offload_cfg(vport,
10098 					     vport->port_base_vlan_cfg.state,
10099 					     vlan_tag, qos);
10100 		if (ret)
10101 			return ret;
10102 	}
10103 
10104 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10105 }
10106 
10107 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10108 				       bool writen_to_tbl)
10109 {
10110 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10111 
10112 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
10113 		if (vlan->vlan_id == vlan_id)
10114 			return;
10115 
10116 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10117 	if (!vlan)
10118 		return;
10119 
10120 	vlan->hd_tbl_status = writen_to_tbl;
10121 	vlan->vlan_id = vlan_id;
10122 
10123 	list_add_tail(&vlan->node, &vport->vlan_list);
10124 }
10125 
10126 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10127 {
10128 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10129 	struct hclge_dev *hdev = vport->back;
10130 	int ret;
10131 
10132 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10133 		if (!vlan->hd_tbl_status) {
10134 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10135 						       vport->vport_id,
10136 						       vlan->vlan_id, false);
10137 			if (ret) {
10138 				dev_err(&hdev->pdev->dev,
10139 					"restore vport vlan list failed, ret=%d\n",
10140 					ret);
10141 				return ret;
10142 			}
10143 		}
10144 		vlan->hd_tbl_status = true;
10145 	}
10146 
10147 	return 0;
10148 }
10149 
10150 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10151 				      bool is_write_tbl)
10152 {
10153 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10154 	struct hclge_dev *hdev = vport->back;
10155 
10156 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10157 		if (vlan->vlan_id == vlan_id) {
10158 			if (is_write_tbl && vlan->hd_tbl_status)
10159 				hclge_set_vlan_filter_hw(hdev,
10160 							 htons(ETH_P_8021Q),
10161 							 vport->vport_id,
10162 							 vlan_id,
10163 							 true);
10164 
10165 			list_del(&vlan->node);
10166 			kfree(vlan);
10167 			break;
10168 		}
10169 	}
10170 }
10171 
10172 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10173 {
10174 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10175 	struct hclge_dev *hdev = vport->back;
10176 
10177 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10178 		if (vlan->hd_tbl_status)
10179 			hclge_set_vlan_filter_hw(hdev,
10180 						 htons(ETH_P_8021Q),
10181 						 vport->vport_id,
10182 						 vlan->vlan_id,
10183 						 true);
10184 
10185 		vlan->hd_tbl_status = false;
10186 		if (is_del_list) {
10187 			list_del(&vlan->node);
10188 			kfree(vlan);
10189 		}
10190 	}
10191 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
10192 }
10193 
10194 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10195 {
10196 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10197 	struct hclge_vport *vport;
10198 	int i;
10199 
10200 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10201 		vport = &hdev->vport[i];
10202 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10203 			list_del(&vlan->node);
10204 			kfree(vlan);
10205 		}
10206 	}
10207 }
10208 
10209 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10210 {
10211 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10212 	struct hclge_dev *hdev = vport->back;
10213 	u16 vlan_proto;
10214 	u16 vlan_id;
10215 	u16 state;
10216 	int ret;
10217 
10218 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10219 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10220 	state = vport->port_base_vlan_cfg.state;
10221 
10222 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10223 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10224 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10225 					 vport->vport_id, vlan_id,
10226 					 false);
10227 		return;
10228 	}
10229 
10230 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10231 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10232 					       vport->vport_id,
10233 					       vlan->vlan_id, false);
10234 		if (ret)
10235 			break;
10236 		vlan->hd_tbl_status = true;
10237 	}
10238 }
10239 
10240 /* For global reset and imp reset, hardware will clear the mac table,
10241  * so we change the mac address state from ACTIVE to TO_ADD, then they
10242  * can be restored in the service task after reset complete. Furtherly,
10243  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10244  * be restored after reset, so just remove these mac nodes from mac_list.
10245  */
10246 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10247 {
10248 	struct hclge_mac_node *mac_node, *tmp;
10249 
10250 	list_for_each_entry_safe(mac_node, tmp, list, node) {
10251 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
10252 			mac_node->state = HCLGE_MAC_TO_ADD;
10253 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10254 			list_del(&mac_node->node);
10255 			kfree(mac_node);
10256 		}
10257 	}
10258 }
10259 
10260 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10261 {
10262 	spin_lock_bh(&vport->mac_list_lock);
10263 
10264 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10265 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10266 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10267 
10268 	spin_unlock_bh(&vport->mac_list_lock);
10269 }
10270 
10271 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10272 {
10273 	struct hclge_vport *vport = &hdev->vport[0];
10274 	struct hnae3_handle *handle = &vport->nic;
10275 
10276 	hclge_restore_mac_table_common(vport);
10277 	hclge_restore_vport_vlan_table(vport);
10278 	set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10279 	hclge_restore_fd_entries(handle);
10280 }
10281 
10282 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10283 {
10284 	struct hclge_vport *vport = hclge_get_vport(handle);
10285 
10286 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10287 		vport->rxvlan_cfg.strip_tag1_en = false;
10288 		vport->rxvlan_cfg.strip_tag2_en = enable;
10289 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
10290 	} else {
10291 		vport->rxvlan_cfg.strip_tag1_en = enable;
10292 		vport->rxvlan_cfg.strip_tag2_en = true;
10293 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
10294 	}
10295 
10296 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
10297 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10298 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10299 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10300 
10301 	return hclge_set_vlan_rx_offload_cfg(vport);
10302 }
10303 
10304 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10305 {
10306 	struct hclge_dev *hdev = vport->back;
10307 
10308 	if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10309 		set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10310 }
10311 
10312 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10313 					    u16 port_base_vlan_state,
10314 					    struct hclge_vlan_info *new_info,
10315 					    struct hclge_vlan_info *old_info)
10316 {
10317 	struct hclge_dev *hdev = vport->back;
10318 	int ret;
10319 
10320 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10321 		hclge_rm_vport_all_vlan_table(vport, false);
10322 		/* force clear VLAN 0 */
10323 		ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10324 		if (ret)
10325 			return ret;
10326 		return hclge_set_vlan_filter_hw(hdev,
10327 						 htons(new_info->vlan_proto),
10328 						 vport->vport_id,
10329 						 new_info->vlan_tag,
10330 						 false);
10331 	}
10332 
10333 	/* force add VLAN 0 */
10334 	ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10335 	if (ret)
10336 		return ret;
10337 
10338 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10339 				       vport->vport_id, old_info->vlan_tag,
10340 				       true);
10341 	if (ret)
10342 		return ret;
10343 
10344 	return hclge_add_vport_all_vlan_table(vport);
10345 }
10346 
10347 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10348 					  const struct hclge_vlan_info *old_cfg)
10349 {
10350 	if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10351 		return true;
10352 
10353 	if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10354 		return true;
10355 
10356 	return false;
10357 }
10358 
10359 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10360 				    struct hclge_vlan_info *vlan_info)
10361 {
10362 	struct hnae3_handle *nic = &vport->nic;
10363 	struct hclge_vlan_info *old_vlan_info;
10364 	struct hclge_dev *hdev = vport->back;
10365 	int ret;
10366 
10367 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10368 
10369 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10370 				     vlan_info->qos);
10371 	if (ret)
10372 		return ret;
10373 
10374 	if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10375 		goto out;
10376 
10377 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10378 		/* add new VLAN tag */
10379 		ret = hclge_set_vlan_filter_hw(hdev,
10380 					       htons(vlan_info->vlan_proto),
10381 					       vport->vport_id,
10382 					       vlan_info->vlan_tag,
10383 					       false);
10384 		if (ret)
10385 			return ret;
10386 
10387 		/* remove old VLAN tag */
10388 		if (old_vlan_info->vlan_tag == 0)
10389 			ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10390 						       true, 0);
10391 		else
10392 			ret = hclge_set_vlan_filter_hw(hdev,
10393 						       htons(ETH_P_8021Q),
10394 						       vport->vport_id,
10395 						       old_vlan_info->vlan_tag,
10396 						       true);
10397 		if (ret) {
10398 			dev_err(&hdev->pdev->dev,
10399 				"failed to clear vport%u port base vlan %u, ret = %d.\n",
10400 				vport->vport_id, old_vlan_info->vlan_tag, ret);
10401 			return ret;
10402 		}
10403 
10404 		goto out;
10405 	}
10406 
10407 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10408 					       old_vlan_info);
10409 	if (ret)
10410 		return ret;
10411 
10412 out:
10413 	vport->port_base_vlan_cfg.state = state;
10414 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10415 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10416 	else
10417 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10418 
10419 	vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10420 	hclge_set_vport_vlan_fltr_change(vport);
10421 
10422 	return 0;
10423 }
10424 
10425 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10426 					  enum hnae3_port_base_vlan_state state,
10427 					  u16 vlan, u8 qos)
10428 {
10429 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10430 		if (!vlan && !qos)
10431 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10432 
10433 		return HNAE3_PORT_BASE_VLAN_ENABLE;
10434 	}
10435 
10436 	if (!vlan && !qos)
10437 		return HNAE3_PORT_BASE_VLAN_DISABLE;
10438 
10439 	if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10440 	    vport->port_base_vlan_cfg.vlan_info.qos == qos)
10441 		return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10442 
10443 	return HNAE3_PORT_BASE_VLAN_MODIFY;
10444 }
10445 
10446 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10447 				    u16 vlan, u8 qos, __be16 proto)
10448 {
10449 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10450 	struct hclge_vport *vport = hclge_get_vport(handle);
10451 	struct hclge_dev *hdev = vport->back;
10452 	struct hclge_vlan_info vlan_info;
10453 	u16 state;
10454 	int ret;
10455 
10456 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10457 		return -EOPNOTSUPP;
10458 
10459 	vport = hclge_get_vf_vport(hdev, vfid);
10460 	if (!vport)
10461 		return -EINVAL;
10462 
10463 	/* qos is a 3 bits value, so can not be bigger than 7 */
10464 	if (vlan > VLAN_N_VID - 1 || qos > 7)
10465 		return -EINVAL;
10466 	if (proto != htons(ETH_P_8021Q))
10467 		return -EPROTONOSUPPORT;
10468 
10469 	state = hclge_get_port_base_vlan_state(vport,
10470 					       vport->port_base_vlan_cfg.state,
10471 					       vlan, qos);
10472 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10473 		return 0;
10474 
10475 	vlan_info.vlan_tag = vlan;
10476 	vlan_info.qos = qos;
10477 	vlan_info.vlan_proto = ntohs(proto);
10478 
10479 	ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10480 	if (ret) {
10481 		dev_err(&hdev->pdev->dev,
10482 			"failed to update port base vlan for vf %d, ret = %d\n",
10483 			vfid, ret);
10484 		return ret;
10485 	}
10486 
10487 	/* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10488 	 * VLAN state.
10489 	 */
10490 	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10491 	    test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10492 		hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10493 						  vport->vport_id, state,
10494 						  &vlan_info);
10495 
10496 	return 0;
10497 }
10498 
10499 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10500 {
10501 	struct hclge_vlan_info *vlan_info;
10502 	struct hclge_vport *vport;
10503 	int ret;
10504 	int vf;
10505 
10506 	/* clear port base vlan for all vf */
10507 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10508 		vport = &hdev->vport[vf];
10509 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10510 
10511 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10512 					       vport->vport_id,
10513 					       vlan_info->vlan_tag, true);
10514 		if (ret)
10515 			dev_err(&hdev->pdev->dev,
10516 				"failed to clear vf vlan for vf%d, ret = %d\n",
10517 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10518 	}
10519 }
10520 
10521 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10522 			  u16 vlan_id, bool is_kill)
10523 {
10524 	struct hclge_vport *vport = hclge_get_vport(handle);
10525 	struct hclge_dev *hdev = vport->back;
10526 	bool writen_to_tbl = false;
10527 	int ret = 0;
10528 
10529 	/* When device is resetting or reset failed, firmware is unable to
10530 	 * handle mailbox. Just record the vlan id, and remove it after
10531 	 * reset finished.
10532 	 */
10533 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10534 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10535 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10536 		return -EBUSY;
10537 	}
10538 
10539 	/* when port base vlan enabled, we use port base vlan as the vlan
10540 	 * filter entry. In this case, we don't update vlan filter table
10541 	 * when user add new vlan or remove exist vlan, just update the vport
10542 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
10543 	 * table until port base vlan disabled
10544 	 */
10545 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10546 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10547 					       vlan_id, is_kill);
10548 		writen_to_tbl = true;
10549 	}
10550 
10551 	if (!ret) {
10552 		if (is_kill)
10553 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10554 		else
10555 			hclge_add_vport_vlan_table(vport, vlan_id,
10556 						   writen_to_tbl);
10557 	} else if (is_kill) {
10558 		/* when remove hw vlan filter failed, record the vlan id,
10559 		 * and try to remove it from hw later, to be consistence
10560 		 * with stack
10561 		 */
10562 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10563 	}
10564 
10565 	hclge_set_vport_vlan_fltr_change(vport);
10566 
10567 	return ret;
10568 }
10569 
10570 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10571 {
10572 	struct hclge_vport *vport;
10573 	int ret;
10574 	u16 i;
10575 
10576 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10577 		vport = &hdev->vport[i];
10578 		if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10579 					&vport->state))
10580 			continue;
10581 
10582 		ret = hclge_enable_vport_vlan_filter(vport,
10583 						     vport->req_vlan_fltr_en);
10584 		if (ret) {
10585 			dev_err(&hdev->pdev->dev,
10586 				"failed to sync vlan filter state for vport%u, ret = %d\n",
10587 				vport->vport_id, ret);
10588 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10589 				&vport->state);
10590 			return;
10591 		}
10592 	}
10593 }
10594 
10595 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10596 {
10597 #define HCLGE_MAX_SYNC_COUNT	60
10598 
10599 	int i, ret, sync_cnt = 0;
10600 	u16 vlan_id;
10601 
10602 	/* start from vport 1 for PF is always alive */
10603 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10604 		struct hclge_vport *vport = &hdev->vport[i];
10605 
10606 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10607 					 VLAN_N_VID);
10608 		while (vlan_id != VLAN_N_VID) {
10609 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10610 						       vport->vport_id, vlan_id,
10611 						       true);
10612 			if (ret && ret != -EINVAL)
10613 				return;
10614 
10615 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10616 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10617 			hclge_set_vport_vlan_fltr_change(vport);
10618 
10619 			sync_cnt++;
10620 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10621 				return;
10622 
10623 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10624 						 VLAN_N_VID);
10625 		}
10626 	}
10627 
10628 	hclge_sync_vlan_fltr_state(hdev);
10629 }
10630 
10631 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10632 {
10633 	struct hclge_config_max_frm_size_cmd *req;
10634 	struct hclge_desc desc;
10635 
10636 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10637 
10638 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10639 	req->max_frm_size = cpu_to_le16(new_mps);
10640 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10641 
10642 	return hclge_cmd_send(&hdev->hw, &desc, 1);
10643 }
10644 
10645 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10646 {
10647 	struct hclge_vport *vport = hclge_get_vport(handle);
10648 
10649 	return hclge_set_vport_mtu(vport, new_mtu);
10650 }
10651 
10652 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10653 {
10654 	struct hclge_dev *hdev = vport->back;
10655 	int i, max_frm_size, ret;
10656 
10657 	/* HW supprt 2 layer vlan */
10658 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10659 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10660 	    max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10661 		return -EINVAL;
10662 
10663 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10664 	mutex_lock(&hdev->vport_lock);
10665 	/* VF's mps must fit within hdev->mps */
10666 	if (vport->vport_id && max_frm_size > hdev->mps) {
10667 		mutex_unlock(&hdev->vport_lock);
10668 		return -EINVAL;
10669 	} else if (vport->vport_id) {
10670 		vport->mps = max_frm_size;
10671 		mutex_unlock(&hdev->vport_lock);
10672 		return 0;
10673 	}
10674 
10675 	/* PF's mps must be greater then VF's mps */
10676 	for (i = 1; i < hdev->num_alloc_vport; i++)
10677 		if (max_frm_size < hdev->vport[i].mps) {
10678 			mutex_unlock(&hdev->vport_lock);
10679 			return -EINVAL;
10680 		}
10681 
10682 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10683 
10684 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
10685 	if (ret) {
10686 		dev_err(&hdev->pdev->dev,
10687 			"Change mtu fail, ret =%d\n", ret);
10688 		goto out;
10689 	}
10690 
10691 	hdev->mps = max_frm_size;
10692 	vport->mps = max_frm_size;
10693 
10694 	ret = hclge_buffer_alloc(hdev);
10695 	if (ret)
10696 		dev_err(&hdev->pdev->dev,
10697 			"Allocate buffer fail, ret =%d\n", ret);
10698 
10699 out:
10700 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10701 	mutex_unlock(&hdev->vport_lock);
10702 	return ret;
10703 }
10704 
10705 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10706 				    bool enable)
10707 {
10708 	struct hclge_reset_tqp_queue_cmd *req;
10709 	struct hclge_desc desc;
10710 	int ret;
10711 
10712 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10713 
10714 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10715 	req->tqp_id = cpu_to_le16(queue_id);
10716 	if (enable)
10717 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10718 
10719 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10720 	if (ret) {
10721 		dev_err(&hdev->pdev->dev,
10722 			"Send tqp reset cmd error, status =%d\n", ret);
10723 		return ret;
10724 	}
10725 
10726 	return 0;
10727 }
10728 
10729 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
10730 				  u8 *reset_status)
10731 {
10732 	struct hclge_reset_tqp_queue_cmd *req;
10733 	struct hclge_desc desc;
10734 	int ret;
10735 
10736 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10737 
10738 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10739 	req->tqp_id = cpu_to_le16(queue_id);
10740 
10741 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10742 	if (ret) {
10743 		dev_err(&hdev->pdev->dev,
10744 			"Get reset status error, status =%d\n", ret);
10745 		return ret;
10746 	}
10747 
10748 	*reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10749 
10750 	return 0;
10751 }
10752 
10753 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10754 {
10755 	struct hnae3_queue *queue;
10756 	struct hclge_tqp *tqp;
10757 
10758 	queue = handle->kinfo.tqp[queue_id];
10759 	tqp = container_of(queue, struct hclge_tqp, q);
10760 
10761 	return tqp->index;
10762 }
10763 
10764 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10765 {
10766 	struct hclge_vport *vport = hclge_get_vport(handle);
10767 	struct hclge_dev *hdev = vport->back;
10768 	u16 reset_try_times = 0;
10769 	u8 reset_status;
10770 	u16 queue_gid;
10771 	int ret;
10772 	u16 i;
10773 
10774 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
10775 		queue_gid = hclge_covert_handle_qid_global(handle, i);
10776 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10777 		if (ret) {
10778 			dev_err(&hdev->pdev->dev,
10779 				"failed to send reset tqp cmd, ret = %d\n",
10780 				ret);
10781 			return ret;
10782 		}
10783 
10784 		while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10785 			ret = hclge_get_reset_status(hdev, queue_gid,
10786 						     &reset_status);
10787 			if (ret)
10788 				return ret;
10789 
10790 			if (reset_status)
10791 				break;
10792 
10793 			/* Wait for tqp hw reset */
10794 			usleep_range(1000, 1200);
10795 		}
10796 
10797 		if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10798 			dev_err(&hdev->pdev->dev,
10799 				"wait for tqp hw reset timeout\n");
10800 			return -ETIME;
10801 		}
10802 
10803 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10804 		if (ret) {
10805 			dev_err(&hdev->pdev->dev,
10806 				"failed to deassert soft reset, ret = %d\n",
10807 				ret);
10808 			return ret;
10809 		}
10810 		reset_try_times = 0;
10811 	}
10812 	return 0;
10813 }
10814 
10815 static int hclge_reset_rcb(struct hnae3_handle *handle)
10816 {
10817 #define HCLGE_RESET_RCB_NOT_SUPPORT	0U
10818 #define HCLGE_RESET_RCB_SUCCESS		1U
10819 
10820 	struct hclge_vport *vport = hclge_get_vport(handle);
10821 	struct hclge_dev *hdev = vport->back;
10822 	struct hclge_reset_cmd *req;
10823 	struct hclge_desc desc;
10824 	u8 return_status;
10825 	u16 queue_gid;
10826 	int ret;
10827 
10828 	queue_gid = hclge_covert_handle_qid_global(handle, 0);
10829 
10830 	req = (struct hclge_reset_cmd *)desc.data;
10831 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10832 	hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10833 	req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10834 	req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10835 
10836 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10837 	if (ret) {
10838 		dev_err(&hdev->pdev->dev,
10839 			"failed to send rcb reset cmd, ret = %d\n", ret);
10840 		return ret;
10841 	}
10842 
10843 	return_status = req->fun_reset_rcb_return_status;
10844 	if (return_status == HCLGE_RESET_RCB_SUCCESS)
10845 		return 0;
10846 
10847 	if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10848 		dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10849 			return_status);
10850 		return -EIO;
10851 	}
10852 
10853 	/* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10854 	 * again to reset all tqps
10855 	 */
10856 	return hclge_reset_tqp_cmd(handle);
10857 }
10858 
10859 int hclge_reset_tqp(struct hnae3_handle *handle)
10860 {
10861 	struct hclge_vport *vport = hclge_get_vport(handle);
10862 	struct hclge_dev *hdev = vport->back;
10863 	int ret;
10864 
10865 	/* only need to disable PF's tqp */
10866 	if (!vport->vport_id) {
10867 		ret = hclge_tqp_enable(handle, false);
10868 		if (ret) {
10869 			dev_err(&hdev->pdev->dev,
10870 				"failed to disable tqp, ret = %d\n", ret);
10871 			return ret;
10872 		}
10873 	}
10874 
10875 	return hclge_reset_rcb(handle);
10876 }
10877 
10878 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10879 {
10880 	struct hclge_vport *vport = hclge_get_vport(handle);
10881 	struct hclge_dev *hdev = vport->back;
10882 
10883 	return hdev->fw_version;
10884 }
10885 
10886 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10887 {
10888 	struct phy_device *phydev = hdev->hw.mac.phydev;
10889 
10890 	if (!phydev)
10891 		return;
10892 
10893 	phy_set_asym_pause(phydev, rx_en, tx_en);
10894 }
10895 
10896 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10897 {
10898 	int ret;
10899 
10900 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10901 		return 0;
10902 
10903 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10904 	if (ret)
10905 		dev_err(&hdev->pdev->dev,
10906 			"configure pauseparam error, ret = %d.\n", ret);
10907 
10908 	return ret;
10909 }
10910 
10911 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10912 {
10913 	struct phy_device *phydev = hdev->hw.mac.phydev;
10914 	u16 remote_advertising = 0;
10915 	u16 local_advertising;
10916 	u32 rx_pause, tx_pause;
10917 	u8 flowctl;
10918 
10919 	if (!phydev->link || !phydev->autoneg)
10920 		return 0;
10921 
10922 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10923 
10924 	if (phydev->pause)
10925 		remote_advertising = LPA_PAUSE_CAP;
10926 
10927 	if (phydev->asym_pause)
10928 		remote_advertising |= LPA_PAUSE_ASYM;
10929 
10930 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10931 					   remote_advertising);
10932 	tx_pause = flowctl & FLOW_CTRL_TX;
10933 	rx_pause = flowctl & FLOW_CTRL_RX;
10934 
10935 	if (phydev->duplex == HCLGE_MAC_HALF) {
10936 		tx_pause = 0;
10937 		rx_pause = 0;
10938 	}
10939 
10940 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10941 }
10942 
10943 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10944 				 u32 *rx_en, u32 *tx_en)
10945 {
10946 	struct hclge_vport *vport = hclge_get_vport(handle);
10947 	struct hclge_dev *hdev = vport->back;
10948 	u8 media_type = hdev->hw.mac.media_type;
10949 
10950 	*auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10951 		    hclge_get_autoneg(handle) : 0;
10952 
10953 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10954 		*rx_en = 0;
10955 		*tx_en = 0;
10956 		return;
10957 	}
10958 
10959 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10960 		*rx_en = 1;
10961 		*tx_en = 0;
10962 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10963 		*tx_en = 1;
10964 		*rx_en = 0;
10965 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10966 		*rx_en = 1;
10967 		*tx_en = 1;
10968 	} else {
10969 		*rx_en = 0;
10970 		*tx_en = 0;
10971 	}
10972 }
10973 
10974 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10975 					 u32 rx_en, u32 tx_en)
10976 {
10977 	if (rx_en && tx_en)
10978 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
10979 	else if (rx_en && !tx_en)
10980 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10981 	else if (!rx_en && tx_en)
10982 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10983 	else
10984 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
10985 
10986 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10987 }
10988 
10989 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10990 				u32 rx_en, u32 tx_en)
10991 {
10992 	struct hclge_vport *vport = hclge_get_vport(handle);
10993 	struct hclge_dev *hdev = vport->back;
10994 	struct phy_device *phydev = hdev->hw.mac.phydev;
10995 	u32 fc_autoneg;
10996 
10997 	if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10998 		fc_autoneg = hclge_get_autoneg(handle);
10999 		if (auto_neg != fc_autoneg) {
11000 			dev_info(&hdev->pdev->dev,
11001 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
11002 			return -EOPNOTSUPP;
11003 		}
11004 	}
11005 
11006 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
11007 		dev_info(&hdev->pdev->dev,
11008 			 "Priority flow control enabled. Cannot set link flow control.\n");
11009 		return -EOPNOTSUPP;
11010 	}
11011 
11012 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
11013 
11014 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
11015 
11016 	if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
11017 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
11018 
11019 	if (phydev)
11020 		return phy_start_aneg(phydev);
11021 
11022 	return -EOPNOTSUPP;
11023 }
11024 
11025 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
11026 					  u8 *auto_neg, u32 *speed, u8 *duplex)
11027 {
11028 	struct hclge_vport *vport = hclge_get_vport(handle);
11029 	struct hclge_dev *hdev = vport->back;
11030 
11031 	if (speed)
11032 		*speed = hdev->hw.mac.speed;
11033 	if (duplex)
11034 		*duplex = hdev->hw.mac.duplex;
11035 	if (auto_neg)
11036 		*auto_neg = hdev->hw.mac.autoneg;
11037 }
11038 
11039 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11040 				 u8 *module_type)
11041 {
11042 	struct hclge_vport *vport = hclge_get_vport(handle);
11043 	struct hclge_dev *hdev = vport->back;
11044 
11045 	/* When nic is down, the service task is not running, doesn't update
11046 	 * the port information per second. Query the port information before
11047 	 * return the media type, ensure getting the correct media information.
11048 	 */
11049 	hclge_update_port_info(hdev);
11050 
11051 	if (media_type)
11052 		*media_type = hdev->hw.mac.media_type;
11053 
11054 	if (module_type)
11055 		*module_type = hdev->hw.mac.module_type;
11056 }
11057 
11058 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11059 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
11060 {
11061 	struct hclge_vport *vport = hclge_get_vport(handle);
11062 	struct hclge_dev *hdev = vport->back;
11063 	struct phy_device *phydev = hdev->hw.mac.phydev;
11064 	int mdix_ctrl, mdix, is_resolved;
11065 	unsigned int retval;
11066 
11067 	if (!phydev) {
11068 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11069 		*tp_mdix = ETH_TP_MDI_INVALID;
11070 		return;
11071 	}
11072 
11073 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11074 
11075 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11076 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11077 				    HCLGE_PHY_MDIX_CTRL_S);
11078 
11079 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11080 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11081 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11082 
11083 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11084 
11085 	switch (mdix_ctrl) {
11086 	case 0x0:
11087 		*tp_mdix_ctrl = ETH_TP_MDI;
11088 		break;
11089 	case 0x1:
11090 		*tp_mdix_ctrl = ETH_TP_MDI_X;
11091 		break;
11092 	case 0x3:
11093 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11094 		break;
11095 	default:
11096 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11097 		break;
11098 	}
11099 
11100 	if (!is_resolved)
11101 		*tp_mdix = ETH_TP_MDI_INVALID;
11102 	else if (mdix)
11103 		*tp_mdix = ETH_TP_MDI_X;
11104 	else
11105 		*tp_mdix = ETH_TP_MDI;
11106 }
11107 
11108 static void hclge_info_show(struct hclge_dev *hdev)
11109 {
11110 	struct device *dev = &hdev->pdev->dev;
11111 
11112 	dev_info(dev, "PF info begin:\n");
11113 
11114 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11115 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11116 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11117 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11118 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11119 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11120 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11121 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11122 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11123 	dev_info(dev, "This is %s PF\n",
11124 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11125 	dev_info(dev, "DCB %s\n",
11126 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11127 	dev_info(dev, "MQPRIO %s\n",
11128 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11129 	dev_info(dev, "Default tx spare buffer size: %u\n",
11130 		 hdev->tx_spare_buf_size);
11131 
11132 	dev_info(dev, "PF info end.\n");
11133 }
11134 
11135 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11136 					  struct hclge_vport *vport)
11137 {
11138 	struct hnae3_client *client = vport->nic.client;
11139 	struct hclge_dev *hdev = ae_dev->priv;
11140 	int rst_cnt = hdev->rst_stats.reset_cnt;
11141 	int ret;
11142 
11143 	ret = client->ops->init_instance(&vport->nic);
11144 	if (ret)
11145 		return ret;
11146 
11147 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11148 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11149 	    rst_cnt != hdev->rst_stats.reset_cnt) {
11150 		ret = -EBUSY;
11151 		goto init_nic_err;
11152 	}
11153 
11154 	/* Enable nic hw error interrupts */
11155 	ret = hclge_config_nic_hw_error(hdev, true);
11156 	if (ret) {
11157 		dev_err(&ae_dev->pdev->dev,
11158 			"fail(%d) to enable hw error interrupts\n", ret);
11159 		goto init_nic_err;
11160 	}
11161 
11162 	hnae3_set_client_init_flag(client, ae_dev, 1);
11163 
11164 	if (netif_msg_drv(&hdev->vport->nic))
11165 		hclge_info_show(hdev);
11166 
11167 	return ret;
11168 
11169 init_nic_err:
11170 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11171 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11172 		msleep(HCLGE_WAIT_RESET_DONE);
11173 
11174 	client->ops->uninit_instance(&vport->nic, 0);
11175 
11176 	return ret;
11177 }
11178 
11179 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11180 					   struct hclge_vport *vport)
11181 {
11182 	struct hclge_dev *hdev = ae_dev->priv;
11183 	struct hnae3_client *client;
11184 	int rst_cnt;
11185 	int ret;
11186 
11187 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11188 	    !hdev->nic_client)
11189 		return 0;
11190 
11191 	client = hdev->roce_client;
11192 	ret = hclge_init_roce_base_info(vport);
11193 	if (ret)
11194 		return ret;
11195 
11196 	rst_cnt = hdev->rst_stats.reset_cnt;
11197 	ret = client->ops->init_instance(&vport->roce);
11198 	if (ret)
11199 		return ret;
11200 
11201 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11202 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11203 	    rst_cnt != hdev->rst_stats.reset_cnt) {
11204 		ret = -EBUSY;
11205 		goto init_roce_err;
11206 	}
11207 
11208 	/* Enable roce ras interrupts */
11209 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
11210 	if (ret) {
11211 		dev_err(&ae_dev->pdev->dev,
11212 			"fail(%d) to enable roce ras interrupts\n", ret);
11213 		goto init_roce_err;
11214 	}
11215 
11216 	hnae3_set_client_init_flag(client, ae_dev, 1);
11217 
11218 	return 0;
11219 
11220 init_roce_err:
11221 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11222 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11223 		msleep(HCLGE_WAIT_RESET_DONE);
11224 
11225 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11226 
11227 	return ret;
11228 }
11229 
11230 static int hclge_init_client_instance(struct hnae3_client *client,
11231 				      struct hnae3_ae_dev *ae_dev)
11232 {
11233 	struct hclge_dev *hdev = ae_dev->priv;
11234 	struct hclge_vport *vport = &hdev->vport[0];
11235 	int ret;
11236 
11237 	switch (client->type) {
11238 	case HNAE3_CLIENT_KNIC:
11239 		hdev->nic_client = client;
11240 		vport->nic.client = client;
11241 		ret = hclge_init_nic_client_instance(ae_dev, vport);
11242 		if (ret)
11243 			goto clear_nic;
11244 
11245 		ret = hclge_init_roce_client_instance(ae_dev, vport);
11246 		if (ret)
11247 			goto clear_roce;
11248 
11249 		break;
11250 	case HNAE3_CLIENT_ROCE:
11251 		if (hnae3_dev_roce_supported(hdev)) {
11252 			hdev->roce_client = client;
11253 			vport->roce.client = client;
11254 		}
11255 
11256 		ret = hclge_init_roce_client_instance(ae_dev, vport);
11257 		if (ret)
11258 			goto clear_roce;
11259 
11260 		break;
11261 	default:
11262 		return -EINVAL;
11263 	}
11264 
11265 	return 0;
11266 
11267 clear_nic:
11268 	hdev->nic_client = NULL;
11269 	vport->nic.client = NULL;
11270 	return ret;
11271 clear_roce:
11272 	hdev->roce_client = NULL;
11273 	vport->roce.client = NULL;
11274 	return ret;
11275 }
11276 
11277 static void hclge_uninit_client_instance(struct hnae3_client *client,
11278 					 struct hnae3_ae_dev *ae_dev)
11279 {
11280 	struct hclge_dev *hdev = ae_dev->priv;
11281 	struct hclge_vport *vport = &hdev->vport[0];
11282 
11283 	if (hdev->roce_client) {
11284 		clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11285 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11286 			msleep(HCLGE_WAIT_RESET_DONE);
11287 
11288 		hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11289 		hdev->roce_client = NULL;
11290 		vport->roce.client = NULL;
11291 	}
11292 	if (client->type == HNAE3_CLIENT_ROCE)
11293 		return;
11294 	if (hdev->nic_client && client->ops->uninit_instance) {
11295 		clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11296 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11297 			msleep(HCLGE_WAIT_RESET_DONE);
11298 
11299 		client->ops->uninit_instance(&vport->nic, 0);
11300 		hdev->nic_client = NULL;
11301 		vport->nic.client = NULL;
11302 	}
11303 }
11304 
11305 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11306 {
11307 #define HCLGE_MEM_BAR		4
11308 
11309 	struct pci_dev *pdev = hdev->pdev;
11310 	struct hclge_hw *hw = &hdev->hw;
11311 
11312 	/* for device does not have device memory, return directly */
11313 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11314 		return 0;
11315 
11316 	hw->mem_base = devm_ioremap_wc(&pdev->dev,
11317 				       pci_resource_start(pdev, HCLGE_MEM_BAR),
11318 				       pci_resource_len(pdev, HCLGE_MEM_BAR));
11319 	if (!hw->mem_base) {
11320 		dev_err(&pdev->dev, "failed to map device memory\n");
11321 		return -EFAULT;
11322 	}
11323 
11324 	return 0;
11325 }
11326 
11327 static int hclge_pci_init(struct hclge_dev *hdev)
11328 {
11329 	struct pci_dev *pdev = hdev->pdev;
11330 	struct hclge_hw *hw;
11331 	int ret;
11332 
11333 	ret = pci_enable_device(pdev);
11334 	if (ret) {
11335 		dev_err(&pdev->dev, "failed to enable PCI device\n");
11336 		return ret;
11337 	}
11338 
11339 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11340 	if (ret) {
11341 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11342 		if (ret) {
11343 			dev_err(&pdev->dev,
11344 				"can't set consistent PCI DMA");
11345 			goto err_disable_device;
11346 		}
11347 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11348 	}
11349 
11350 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11351 	if (ret) {
11352 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11353 		goto err_disable_device;
11354 	}
11355 
11356 	pci_set_master(pdev);
11357 	hw = &hdev->hw;
11358 	hw->io_base = pcim_iomap(pdev, 2, 0);
11359 	if (!hw->io_base) {
11360 		dev_err(&pdev->dev, "Can't map configuration register space\n");
11361 		ret = -ENOMEM;
11362 		goto err_clr_master;
11363 	}
11364 
11365 	ret = hclge_dev_mem_map(hdev);
11366 	if (ret)
11367 		goto err_unmap_io_base;
11368 
11369 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11370 
11371 	return 0;
11372 
11373 err_unmap_io_base:
11374 	pcim_iounmap(pdev, hdev->hw.io_base);
11375 err_clr_master:
11376 	pci_clear_master(pdev);
11377 	pci_release_regions(pdev);
11378 err_disable_device:
11379 	pci_disable_device(pdev);
11380 
11381 	return ret;
11382 }
11383 
11384 static void hclge_pci_uninit(struct hclge_dev *hdev)
11385 {
11386 	struct pci_dev *pdev = hdev->pdev;
11387 
11388 	if (hdev->hw.mem_base)
11389 		devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11390 
11391 	pcim_iounmap(pdev, hdev->hw.io_base);
11392 	pci_free_irq_vectors(pdev);
11393 	pci_clear_master(pdev);
11394 	pci_release_mem_regions(pdev);
11395 	pci_disable_device(pdev);
11396 }
11397 
11398 static void hclge_state_init(struct hclge_dev *hdev)
11399 {
11400 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11401 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11402 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11403 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11404 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11405 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11406 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11407 }
11408 
11409 static void hclge_state_uninit(struct hclge_dev *hdev)
11410 {
11411 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11412 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11413 
11414 	if (hdev->reset_timer.function)
11415 		del_timer_sync(&hdev->reset_timer);
11416 	if (hdev->service_task.work.func)
11417 		cancel_delayed_work_sync(&hdev->service_task);
11418 }
11419 
11420 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11421 					enum hnae3_reset_type rst_type)
11422 {
11423 #define HCLGE_RESET_RETRY_WAIT_MS	500
11424 #define HCLGE_RESET_RETRY_CNT	5
11425 
11426 	struct hclge_dev *hdev = ae_dev->priv;
11427 	int retry_cnt = 0;
11428 	int ret;
11429 
11430 retry:
11431 	down(&hdev->reset_sem);
11432 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11433 	hdev->reset_type = rst_type;
11434 	ret = hclge_reset_prepare(hdev);
11435 	if (ret || hdev->reset_pending) {
11436 		dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11437 			ret);
11438 		if (hdev->reset_pending ||
11439 		    retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11440 			dev_err(&hdev->pdev->dev,
11441 				"reset_pending:0x%lx, retry_cnt:%d\n",
11442 				hdev->reset_pending, retry_cnt);
11443 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11444 			up(&hdev->reset_sem);
11445 			msleep(HCLGE_RESET_RETRY_WAIT_MS);
11446 			goto retry;
11447 		}
11448 	}
11449 
11450 	/* disable misc vector before reset done */
11451 	hclge_enable_vector(&hdev->misc_vector, false);
11452 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11453 
11454 	if (hdev->reset_type == HNAE3_FLR_RESET)
11455 		hdev->rst_stats.flr_rst_cnt++;
11456 }
11457 
11458 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11459 {
11460 	struct hclge_dev *hdev = ae_dev->priv;
11461 	int ret;
11462 
11463 	hclge_enable_vector(&hdev->misc_vector, true);
11464 
11465 	ret = hclge_reset_rebuild(hdev);
11466 	if (ret)
11467 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11468 
11469 	hdev->reset_type = HNAE3_NONE_RESET;
11470 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11471 	up(&hdev->reset_sem);
11472 }
11473 
11474 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11475 {
11476 	u16 i;
11477 
11478 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11479 		struct hclge_vport *vport = &hdev->vport[i];
11480 		int ret;
11481 
11482 		 /* Send cmd to clear vport's FUNC_RST_ING */
11483 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11484 		if (ret)
11485 			dev_warn(&hdev->pdev->dev,
11486 				 "clear vport(%u) rst failed %d!\n",
11487 				 vport->vport_id, ret);
11488 	}
11489 }
11490 
11491 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11492 {
11493 	struct hclge_desc desc;
11494 	int ret;
11495 
11496 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11497 
11498 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11499 	/* This new command is only supported by new firmware, it will
11500 	 * fail with older firmware. Error value -EOPNOSUPP can only be
11501 	 * returned by older firmware running this command, to keep code
11502 	 * backward compatible we will override this value and return
11503 	 * success.
11504 	 */
11505 	if (ret && ret != -EOPNOTSUPP) {
11506 		dev_err(&hdev->pdev->dev,
11507 			"failed to clear hw resource, ret = %d\n", ret);
11508 		return ret;
11509 	}
11510 	return 0;
11511 }
11512 
11513 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11514 {
11515 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11516 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11517 }
11518 
11519 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11520 {
11521 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11522 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11523 }
11524 
11525 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11526 {
11527 	struct pci_dev *pdev = ae_dev->pdev;
11528 	struct hclge_dev *hdev;
11529 	int ret;
11530 
11531 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11532 	if (!hdev)
11533 		return -ENOMEM;
11534 
11535 	hdev->pdev = pdev;
11536 	hdev->ae_dev = ae_dev;
11537 	hdev->reset_type = HNAE3_NONE_RESET;
11538 	hdev->reset_level = HNAE3_FUNC_RESET;
11539 	ae_dev->priv = hdev;
11540 
11541 	/* HW supprt 2 layer vlan */
11542 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11543 
11544 	mutex_init(&hdev->vport_lock);
11545 	spin_lock_init(&hdev->fd_rule_lock);
11546 	sema_init(&hdev->reset_sem, 1);
11547 
11548 	ret = hclge_pci_init(hdev);
11549 	if (ret)
11550 		goto out;
11551 
11552 	ret = hclge_devlink_init(hdev);
11553 	if (ret)
11554 		goto err_pci_uninit;
11555 
11556 	/* Firmware command queue initialize */
11557 	ret = hclge_cmd_queue_init(hdev);
11558 	if (ret)
11559 		goto err_devlink_uninit;
11560 
11561 	/* Firmware command initialize */
11562 	ret = hclge_cmd_init(hdev);
11563 	if (ret)
11564 		goto err_cmd_uninit;
11565 
11566 	ret  = hclge_clear_hw_resource(hdev);
11567 	if (ret)
11568 		goto err_cmd_uninit;
11569 
11570 	ret = hclge_get_cap(hdev);
11571 	if (ret)
11572 		goto err_cmd_uninit;
11573 
11574 	ret = hclge_query_dev_specs(hdev);
11575 	if (ret) {
11576 		dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11577 			ret);
11578 		goto err_cmd_uninit;
11579 	}
11580 
11581 	ret = hclge_configure(hdev);
11582 	if (ret) {
11583 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11584 		goto err_cmd_uninit;
11585 	}
11586 
11587 	ret = hclge_init_msi(hdev);
11588 	if (ret) {
11589 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11590 		goto err_cmd_uninit;
11591 	}
11592 
11593 	ret = hclge_misc_irq_init(hdev);
11594 	if (ret)
11595 		goto err_msi_uninit;
11596 
11597 	ret = hclge_alloc_tqps(hdev);
11598 	if (ret) {
11599 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11600 		goto err_msi_irq_uninit;
11601 	}
11602 
11603 	ret = hclge_alloc_vport(hdev);
11604 	if (ret)
11605 		goto err_msi_irq_uninit;
11606 
11607 	ret = hclge_map_tqp(hdev);
11608 	if (ret)
11609 		goto err_msi_irq_uninit;
11610 
11611 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11612 	    !hnae3_dev_phy_imp_supported(hdev)) {
11613 		ret = hclge_mac_mdio_config(hdev);
11614 		if (ret)
11615 			goto err_msi_irq_uninit;
11616 	}
11617 
11618 	ret = hclge_init_umv_space(hdev);
11619 	if (ret)
11620 		goto err_mdiobus_unreg;
11621 
11622 	ret = hclge_mac_init(hdev);
11623 	if (ret) {
11624 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11625 		goto err_mdiobus_unreg;
11626 	}
11627 
11628 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11629 	if (ret) {
11630 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11631 		goto err_mdiobus_unreg;
11632 	}
11633 
11634 	ret = hclge_config_gro(hdev);
11635 	if (ret)
11636 		goto err_mdiobus_unreg;
11637 
11638 	ret = hclge_init_vlan_config(hdev);
11639 	if (ret) {
11640 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11641 		goto err_mdiobus_unreg;
11642 	}
11643 
11644 	ret = hclge_tm_schd_init(hdev);
11645 	if (ret) {
11646 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11647 		goto err_mdiobus_unreg;
11648 	}
11649 
11650 	ret = hclge_rss_init_cfg(hdev);
11651 	if (ret) {
11652 		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11653 		goto err_mdiobus_unreg;
11654 	}
11655 
11656 	ret = hclge_rss_init_hw(hdev);
11657 	if (ret) {
11658 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11659 		goto err_mdiobus_unreg;
11660 	}
11661 
11662 	ret = init_mgr_tbl(hdev);
11663 	if (ret) {
11664 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11665 		goto err_mdiobus_unreg;
11666 	}
11667 
11668 	ret = hclge_init_fd_config(hdev);
11669 	if (ret) {
11670 		dev_err(&pdev->dev,
11671 			"fd table init fail, ret=%d\n", ret);
11672 		goto err_mdiobus_unreg;
11673 	}
11674 
11675 	ret = hclge_ptp_init(hdev);
11676 	if (ret)
11677 		goto err_mdiobus_unreg;
11678 
11679 	INIT_KFIFO(hdev->mac_tnl_log);
11680 
11681 	hclge_dcb_ops_set(hdev);
11682 
11683 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11684 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11685 
11686 	/* Setup affinity after service timer setup because add_timer_on
11687 	 * is called in affinity notify.
11688 	 */
11689 	hclge_misc_affinity_setup(hdev);
11690 
11691 	hclge_clear_all_event_cause(hdev);
11692 	hclge_clear_resetting_state(hdev);
11693 
11694 	/* Log and clear the hw errors those already occurred */
11695 	if (hnae3_dev_ras_imp_supported(hdev))
11696 		hclge_handle_occurred_error(hdev);
11697 	else
11698 		hclge_handle_all_hns_hw_errors(ae_dev);
11699 
11700 	/* request delayed reset for the error recovery because an immediate
11701 	 * global reset on a PF affecting pending initialization of other PFs
11702 	 */
11703 	if (ae_dev->hw_err_reset_req) {
11704 		enum hnae3_reset_type reset_level;
11705 
11706 		reset_level = hclge_get_reset_level(ae_dev,
11707 						    &ae_dev->hw_err_reset_req);
11708 		hclge_set_def_reset_request(ae_dev, reset_level);
11709 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11710 	}
11711 
11712 	hclge_init_rxd_adv_layout(hdev);
11713 
11714 	/* Enable MISC vector(vector0) */
11715 	hclge_enable_vector(&hdev->misc_vector, true);
11716 
11717 	hclge_state_init(hdev);
11718 	hdev->last_reset_time = jiffies;
11719 
11720 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11721 		 HCLGE_DRIVER_NAME);
11722 
11723 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11724 
11725 	return 0;
11726 
11727 err_mdiobus_unreg:
11728 	if (hdev->hw.mac.phydev)
11729 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
11730 err_msi_irq_uninit:
11731 	hclge_misc_irq_uninit(hdev);
11732 err_msi_uninit:
11733 	pci_free_irq_vectors(pdev);
11734 err_cmd_uninit:
11735 	hclge_cmd_uninit(hdev);
11736 err_devlink_uninit:
11737 	hclge_devlink_uninit(hdev);
11738 err_pci_uninit:
11739 	pcim_iounmap(pdev, hdev->hw.io_base);
11740 	pci_clear_master(pdev);
11741 	pci_release_regions(pdev);
11742 	pci_disable_device(pdev);
11743 out:
11744 	mutex_destroy(&hdev->vport_lock);
11745 	return ret;
11746 }
11747 
11748 static void hclge_stats_clear(struct hclge_dev *hdev)
11749 {
11750 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11751 }
11752 
11753 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11754 {
11755 	return hclge_config_switch_param(hdev, vf, enable,
11756 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11757 }
11758 
11759 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11760 {
11761 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11762 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
11763 					  enable, vf);
11764 }
11765 
11766 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11767 {
11768 	int ret;
11769 
11770 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11771 	if (ret) {
11772 		dev_err(&hdev->pdev->dev,
11773 			"Set vf %d mac spoof check %s failed, ret=%d\n",
11774 			vf, enable ? "on" : "off", ret);
11775 		return ret;
11776 	}
11777 
11778 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11779 	if (ret)
11780 		dev_err(&hdev->pdev->dev,
11781 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
11782 			vf, enable ? "on" : "off", ret);
11783 
11784 	return ret;
11785 }
11786 
11787 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11788 				 bool enable)
11789 {
11790 	struct hclge_vport *vport = hclge_get_vport(handle);
11791 	struct hclge_dev *hdev = vport->back;
11792 	u32 new_spoofchk = enable ? 1 : 0;
11793 	int ret;
11794 
11795 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11796 		return -EOPNOTSUPP;
11797 
11798 	vport = hclge_get_vf_vport(hdev, vf);
11799 	if (!vport)
11800 		return -EINVAL;
11801 
11802 	if (vport->vf_info.spoofchk == new_spoofchk)
11803 		return 0;
11804 
11805 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11806 		dev_warn(&hdev->pdev->dev,
11807 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11808 			 vf);
11809 	else if (enable && hclge_is_umv_space_full(vport, true))
11810 		dev_warn(&hdev->pdev->dev,
11811 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11812 			 vf);
11813 
11814 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11815 	if (ret)
11816 		return ret;
11817 
11818 	vport->vf_info.spoofchk = new_spoofchk;
11819 	return 0;
11820 }
11821 
11822 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11823 {
11824 	struct hclge_vport *vport = hdev->vport;
11825 	int ret;
11826 	int i;
11827 
11828 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11829 		return 0;
11830 
11831 	/* resume the vf spoof check state after reset */
11832 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11833 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11834 					       vport->vf_info.spoofchk);
11835 		if (ret)
11836 			return ret;
11837 
11838 		vport++;
11839 	}
11840 
11841 	return 0;
11842 }
11843 
11844 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11845 {
11846 	struct hclge_vport *vport = hclge_get_vport(handle);
11847 	struct hclge_dev *hdev = vport->back;
11848 	u32 new_trusted = enable ? 1 : 0;
11849 
11850 	vport = hclge_get_vf_vport(hdev, vf);
11851 	if (!vport)
11852 		return -EINVAL;
11853 
11854 	if (vport->vf_info.trusted == new_trusted)
11855 		return 0;
11856 
11857 	vport->vf_info.trusted = new_trusted;
11858 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11859 	hclge_task_schedule(hdev, 0);
11860 
11861 	return 0;
11862 }
11863 
11864 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11865 {
11866 	int ret;
11867 	int vf;
11868 
11869 	/* reset vf rate to default value */
11870 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11871 		struct hclge_vport *vport = &hdev->vport[vf];
11872 
11873 		vport->vf_info.max_tx_rate = 0;
11874 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11875 		if (ret)
11876 			dev_err(&hdev->pdev->dev,
11877 				"vf%d failed to reset to default, ret=%d\n",
11878 				vf - HCLGE_VF_VPORT_START_NUM, ret);
11879 	}
11880 }
11881 
11882 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11883 				     int min_tx_rate, int max_tx_rate)
11884 {
11885 	if (min_tx_rate != 0 ||
11886 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11887 		dev_err(&hdev->pdev->dev,
11888 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11889 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11890 		return -EINVAL;
11891 	}
11892 
11893 	return 0;
11894 }
11895 
11896 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11897 			     int min_tx_rate, int max_tx_rate, bool force)
11898 {
11899 	struct hclge_vport *vport = hclge_get_vport(handle);
11900 	struct hclge_dev *hdev = vport->back;
11901 	int ret;
11902 
11903 	ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11904 	if (ret)
11905 		return ret;
11906 
11907 	vport = hclge_get_vf_vport(hdev, vf);
11908 	if (!vport)
11909 		return -EINVAL;
11910 
11911 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11912 		return 0;
11913 
11914 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11915 	if (ret)
11916 		return ret;
11917 
11918 	vport->vf_info.max_tx_rate = max_tx_rate;
11919 
11920 	return 0;
11921 }
11922 
11923 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11924 {
11925 	struct hnae3_handle *handle = &hdev->vport->nic;
11926 	struct hclge_vport *vport;
11927 	int ret;
11928 	int vf;
11929 
11930 	/* resume the vf max_tx_rate after reset */
11931 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11932 		vport = hclge_get_vf_vport(hdev, vf);
11933 		if (!vport)
11934 			return -EINVAL;
11935 
11936 		/* zero means max rate, after reset, firmware already set it to
11937 		 * max rate, so just continue.
11938 		 */
11939 		if (!vport->vf_info.max_tx_rate)
11940 			continue;
11941 
11942 		ret = hclge_set_vf_rate(handle, vf, 0,
11943 					vport->vf_info.max_tx_rate, true);
11944 		if (ret) {
11945 			dev_err(&hdev->pdev->dev,
11946 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
11947 				vf, vport->vf_info.max_tx_rate, ret);
11948 			return ret;
11949 		}
11950 	}
11951 
11952 	return 0;
11953 }
11954 
11955 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11956 {
11957 	struct hclge_vport *vport = hdev->vport;
11958 	int i;
11959 
11960 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11961 		hclge_vport_stop(vport);
11962 		vport++;
11963 	}
11964 }
11965 
11966 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11967 {
11968 	struct hclge_dev *hdev = ae_dev->priv;
11969 	struct pci_dev *pdev = ae_dev->pdev;
11970 	int ret;
11971 
11972 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11973 
11974 	hclge_stats_clear(hdev);
11975 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11976 	 * so here should not clean table in memory.
11977 	 */
11978 	if (hdev->reset_type == HNAE3_IMP_RESET ||
11979 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
11980 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11981 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11982 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11983 		hclge_reset_umv_space(hdev);
11984 	}
11985 
11986 	ret = hclge_cmd_init(hdev);
11987 	if (ret) {
11988 		dev_err(&pdev->dev, "Cmd queue init failed\n");
11989 		return ret;
11990 	}
11991 
11992 	ret = hclge_map_tqp(hdev);
11993 	if (ret) {
11994 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11995 		return ret;
11996 	}
11997 
11998 	ret = hclge_mac_init(hdev);
11999 	if (ret) {
12000 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
12001 		return ret;
12002 	}
12003 
12004 	ret = hclge_tp_port_init(hdev);
12005 	if (ret) {
12006 		dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
12007 			ret);
12008 		return ret;
12009 	}
12010 
12011 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
12012 	if (ret) {
12013 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
12014 		return ret;
12015 	}
12016 
12017 	ret = hclge_config_gro(hdev);
12018 	if (ret)
12019 		return ret;
12020 
12021 	ret = hclge_init_vlan_config(hdev);
12022 	if (ret) {
12023 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
12024 		return ret;
12025 	}
12026 
12027 	ret = hclge_tm_init_hw(hdev, true);
12028 	if (ret) {
12029 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
12030 		return ret;
12031 	}
12032 
12033 	ret = hclge_rss_init_hw(hdev);
12034 	if (ret) {
12035 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
12036 		return ret;
12037 	}
12038 
12039 	ret = init_mgr_tbl(hdev);
12040 	if (ret) {
12041 		dev_err(&pdev->dev,
12042 			"failed to reinit manager table, ret = %d\n", ret);
12043 		return ret;
12044 	}
12045 
12046 	ret = hclge_init_fd_config(hdev);
12047 	if (ret) {
12048 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12049 		return ret;
12050 	}
12051 
12052 	ret = hclge_ptp_init(hdev);
12053 	if (ret)
12054 		return ret;
12055 
12056 	/* Log and clear the hw errors those already occurred */
12057 	if (hnae3_dev_ras_imp_supported(hdev))
12058 		hclge_handle_occurred_error(hdev);
12059 	else
12060 		hclge_handle_all_hns_hw_errors(ae_dev);
12061 
12062 	/* Re-enable the hw error interrupts because
12063 	 * the interrupts get disabled on global reset.
12064 	 */
12065 	ret = hclge_config_nic_hw_error(hdev, true);
12066 	if (ret) {
12067 		dev_err(&pdev->dev,
12068 			"fail(%d) to re-enable NIC hw error interrupts\n",
12069 			ret);
12070 		return ret;
12071 	}
12072 
12073 	if (hdev->roce_client) {
12074 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
12075 		if (ret) {
12076 			dev_err(&pdev->dev,
12077 				"fail(%d) to re-enable roce ras interrupts\n",
12078 				ret);
12079 			return ret;
12080 		}
12081 	}
12082 
12083 	hclge_reset_vport_state(hdev);
12084 	ret = hclge_reset_vport_spoofchk(hdev);
12085 	if (ret)
12086 		return ret;
12087 
12088 	ret = hclge_resume_vf_rate(hdev);
12089 	if (ret)
12090 		return ret;
12091 
12092 	hclge_init_rxd_adv_layout(hdev);
12093 
12094 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12095 		 HCLGE_DRIVER_NAME);
12096 
12097 	return 0;
12098 }
12099 
12100 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12101 {
12102 	struct hclge_dev *hdev = ae_dev->priv;
12103 	struct hclge_mac *mac = &hdev->hw.mac;
12104 
12105 	hclge_reset_vf_rate(hdev);
12106 	hclge_clear_vf_vlan(hdev);
12107 	hclge_misc_affinity_teardown(hdev);
12108 	hclge_state_uninit(hdev);
12109 	hclge_ptp_uninit(hdev);
12110 	hclge_uninit_rxd_adv_layout(hdev);
12111 	hclge_uninit_mac_table(hdev);
12112 	hclge_del_all_fd_entries(hdev);
12113 
12114 	if (mac->phydev)
12115 		mdiobus_unregister(mac->mdio_bus);
12116 
12117 	/* Disable MISC vector(vector0) */
12118 	hclge_enable_vector(&hdev->misc_vector, false);
12119 	synchronize_irq(hdev->misc_vector.vector_irq);
12120 
12121 	/* Disable all hw interrupts */
12122 	hclge_config_mac_tnl_int(hdev, false);
12123 	hclge_config_nic_hw_error(hdev, false);
12124 	hclge_config_rocee_ras_interrupt(hdev, false);
12125 
12126 	hclge_cmd_uninit(hdev);
12127 	hclge_misc_irq_uninit(hdev);
12128 	hclge_devlink_uninit(hdev);
12129 	hclge_pci_uninit(hdev);
12130 	mutex_destroy(&hdev->vport_lock);
12131 	hclge_uninit_vport_vlan_table(hdev);
12132 	ae_dev->priv = NULL;
12133 }
12134 
12135 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12136 {
12137 	struct hclge_vport *vport = hclge_get_vport(handle);
12138 	struct hclge_dev *hdev = vport->back;
12139 
12140 	return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12141 }
12142 
12143 static void hclge_get_channels(struct hnae3_handle *handle,
12144 			       struct ethtool_channels *ch)
12145 {
12146 	ch->max_combined = hclge_get_max_channels(handle);
12147 	ch->other_count = 1;
12148 	ch->max_other = 1;
12149 	ch->combined_count = handle->kinfo.rss_size;
12150 }
12151 
12152 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12153 					u16 *alloc_tqps, u16 *max_rss_size)
12154 {
12155 	struct hclge_vport *vport = hclge_get_vport(handle);
12156 	struct hclge_dev *hdev = vport->back;
12157 
12158 	*alloc_tqps = vport->alloc_tqps;
12159 	*max_rss_size = hdev->pf_rss_size_max;
12160 }
12161 
12162 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12163 			      bool rxfh_configured)
12164 {
12165 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12166 	struct hclge_vport *vport = hclge_get_vport(handle);
12167 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12168 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12169 	struct hclge_dev *hdev = vport->back;
12170 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12171 	u16 cur_rss_size = kinfo->rss_size;
12172 	u16 cur_tqps = kinfo->num_tqps;
12173 	u16 tc_valid[HCLGE_MAX_TC_NUM];
12174 	u16 roundup_size;
12175 	u32 *rss_indir;
12176 	unsigned int i;
12177 	int ret;
12178 
12179 	kinfo->req_rss_size = new_tqps_num;
12180 
12181 	ret = hclge_tm_vport_map_update(hdev);
12182 	if (ret) {
12183 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12184 		return ret;
12185 	}
12186 
12187 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
12188 	roundup_size = ilog2(roundup_size);
12189 	/* Set the RSS TC mode according to the new RSS size */
12190 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12191 		tc_valid[i] = 0;
12192 
12193 		if (!(hdev->hw_tc_map & BIT(i)))
12194 			continue;
12195 
12196 		tc_valid[i] = 1;
12197 		tc_size[i] = roundup_size;
12198 		tc_offset[i] = kinfo->rss_size * i;
12199 	}
12200 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12201 	if (ret)
12202 		return ret;
12203 
12204 	/* RSS indirection table has been configured by user */
12205 	if (rxfh_configured)
12206 		goto out;
12207 
12208 	/* Reinitializes the rss indirect table according to the new RSS size */
12209 	rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12210 			    GFP_KERNEL);
12211 	if (!rss_indir)
12212 		return -ENOMEM;
12213 
12214 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12215 		rss_indir[i] = i % kinfo->rss_size;
12216 
12217 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12218 	if (ret)
12219 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12220 			ret);
12221 
12222 	kfree(rss_indir);
12223 
12224 out:
12225 	if (!ret)
12226 		dev_info(&hdev->pdev->dev,
12227 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12228 			 cur_rss_size, kinfo->rss_size,
12229 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12230 
12231 	return ret;
12232 }
12233 
12234 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12235 			      u32 *regs_num_64_bit)
12236 {
12237 	struct hclge_desc desc;
12238 	u32 total_num;
12239 	int ret;
12240 
12241 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12242 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12243 	if (ret) {
12244 		dev_err(&hdev->pdev->dev,
12245 			"Query register number cmd failed, ret = %d.\n", ret);
12246 		return ret;
12247 	}
12248 
12249 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
12250 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
12251 
12252 	total_num = *regs_num_32_bit + *regs_num_64_bit;
12253 	if (!total_num)
12254 		return -EINVAL;
12255 
12256 	return 0;
12257 }
12258 
12259 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12260 				 void *data)
12261 {
12262 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12263 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12264 
12265 	struct hclge_desc *desc;
12266 	u32 *reg_val = data;
12267 	__le32 *desc_data;
12268 	int nodata_num;
12269 	int cmd_num;
12270 	int i, k, n;
12271 	int ret;
12272 
12273 	if (regs_num == 0)
12274 		return 0;
12275 
12276 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12277 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12278 			       HCLGE_32_BIT_REG_RTN_DATANUM);
12279 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12280 	if (!desc)
12281 		return -ENOMEM;
12282 
12283 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12284 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12285 	if (ret) {
12286 		dev_err(&hdev->pdev->dev,
12287 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
12288 		kfree(desc);
12289 		return ret;
12290 	}
12291 
12292 	for (i = 0; i < cmd_num; i++) {
12293 		if (i == 0) {
12294 			desc_data = (__le32 *)(&desc[i].data[0]);
12295 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12296 		} else {
12297 			desc_data = (__le32 *)(&desc[i]);
12298 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
12299 		}
12300 		for (k = 0; k < n; k++) {
12301 			*reg_val++ = le32_to_cpu(*desc_data++);
12302 
12303 			regs_num--;
12304 			if (!regs_num)
12305 				break;
12306 		}
12307 	}
12308 
12309 	kfree(desc);
12310 	return 0;
12311 }
12312 
12313 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12314 				 void *data)
12315 {
12316 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12317 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12318 
12319 	struct hclge_desc *desc;
12320 	u64 *reg_val = data;
12321 	__le64 *desc_data;
12322 	int nodata_len;
12323 	int cmd_num;
12324 	int i, k, n;
12325 	int ret;
12326 
12327 	if (regs_num == 0)
12328 		return 0;
12329 
12330 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12331 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12332 			       HCLGE_64_BIT_REG_RTN_DATANUM);
12333 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12334 	if (!desc)
12335 		return -ENOMEM;
12336 
12337 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12338 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12339 	if (ret) {
12340 		dev_err(&hdev->pdev->dev,
12341 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
12342 		kfree(desc);
12343 		return ret;
12344 	}
12345 
12346 	for (i = 0; i < cmd_num; i++) {
12347 		if (i == 0) {
12348 			desc_data = (__le64 *)(&desc[i].data[0]);
12349 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12350 		} else {
12351 			desc_data = (__le64 *)(&desc[i]);
12352 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
12353 		}
12354 		for (k = 0; k < n; k++) {
12355 			*reg_val++ = le64_to_cpu(*desc_data++);
12356 
12357 			regs_num--;
12358 			if (!regs_num)
12359 				break;
12360 		}
12361 	}
12362 
12363 	kfree(desc);
12364 	return 0;
12365 }
12366 
12367 #define MAX_SEPARATE_NUM	4
12368 #define SEPARATOR_VALUE		0xFDFCFBFA
12369 #define REG_NUM_PER_LINE	4
12370 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
12371 #define REG_SEPARATOR_LINE	1
12372 #define REG_NUM_REMAIN_MASK	3
12373 
12374 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12375 {
12376 	int i;
12377 
12378 	/* initialize command BD except the last one */
12379 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12380 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12381 					   true);
12382 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12383 	}
12384 
12385 	/* initialize the last command BD */
12386 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12387 
12388 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12389 }
12390 
12391 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12392 				    int *bd_num_list,
12393 				    u32 type_num)
12394 {
12395 	u32 entries_per_desc, desc_index, index, offset, i;
12396 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12397 	int ret;
12398 
12399 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
12400 	if (ret) {
12401 		dev_err(&hdev->pdev->dev,
12402 			"Get dfx bd num fail, status is %d.\n", ret);
12403 		return ret;
12404 	}
12405 
12406 	entries_per_desc = ARRAY_SIZE(desc[0].data);
12407 	for (i = 0; i < type_num; i++) {
12408 		offset = hclge_dfx_bd_offset_list[i];
12409 		index = offset % entries_per_desc;
12410 		desc_index = offset / entries_per_desc;
12411 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12412 	}
12413 
12414 	return ret;
12415 }
12416 
12417 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12418 				  struct hclge_desc *desc_src, int bd_num,
12419 				  enum hclge_opcode_type cmd)
12420 {
12421 	struct hclge_desc *desc = desc_src;
12422 	int i, ret;
12423 
12424 	hclge_cmd_setup_basic_desc(desc, cmd, true);
12425 	for (i = 0; i < bd_num - 1; i++) {
12426 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12427 		desc++;
12428 		hclge_cmd_setup_basic_desc(desc, cmd, true);
12429 	}
12430 
12431 	desc = desc_src;
12432 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12433 	if (ret)
12434 		dev_err(&hdev->pdev->dev,
12435 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12436 			cmd, ret);
12437 
12438 	return ret;
12439 }
12440 
12441 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12442 				    void *data)
12443 {
12444 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12445 	struct hclge_desc *desc = desc_src;
12446 	u32 *reg = data;
12447 
12448 	entries_per_desc = ARRAY_SIZE(desc->data);
12449 	reg_num = entries_per_desc * bd_num;
12450 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12451 	for (i = 0; i < reg_num; i++) {
12452 		index = i % entries_per_desc;
12453 		desc_index = i / entries_per_desc;
12454 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
12455 	}
12456 	for (i = 0; i < separator_num; i++)
12457 		*reg++ = SEPARATOR_VALUE;
12458 
12459 	return reg_num + separator_num;
12460 }
12461 
12462 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12463 {
12464 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12465 	int data_len_per_desc, bd_num, i;
12466 	int *bd_num_list;
12467 	u32 data_len;
12468 	int ret;
12469 
12470 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12471 	if (!bd_num_list)
12472 		return -ENOMEM;
12473 
12474 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12475 	if (ret) {
12476 		dev_err(&hdev->pdev->dev,
12477 			"Get dfx reg bd num fail, status is %d.\n", ret);
12478 		goto out;
12479 	}
12480 
12481 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
12482 	*len = 0;
12483 	for (i = 0; i < dfx_reg_type_num; i++) {
12484 		bd_num = bd_num_list[i];
12485 		data_len = data_len_per_desc * bd_num;
12486 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12487 	}
12488 
12489 out:
12490 	kfree(bd_num_list);
12491 	return ret;
12492 }
12493 
12494 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12495 {
12496 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12497 	int bd_num, bd_num_max, buf_len, i;
12498 	struct hclge_desc *desc_src;
12499 	int *bd_num_list;
12500 	u32 *reg = data;
12501 	int ret;
12502 
12503 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12504 	if (!bd_num_list)
12505 		return -ENOMEM;
12506 
12507 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12508 	if (ret) {
12509 		dev_err(&hdev->pdev->dev,
12510 			"Get dfx reg bd num fail, status is %d.\n", ret);
12511 		goto out;
12512 	}
12513 
12514 	bd_num_max = bd_num_list[0];
12515 	for (i = 1; i < dfx_reg_type_num; i++)
12516 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12517 
12518 	buf_len = sizeof(*desc_src) * bd_num_max;
12519 	desc_src = kzalloc(buf_len, GFP_KERNEL);
12520 	if (!desc_src) {
12521 		ret = -ENOMEM;
12522 		goto out;
12523 	}
12524 
12525 	for (i = 0; i < dfx_reg_type_num; i++) {
12526 		bd_num = bd_num_list[i];
12527 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12528 					     hclge_dfx_reg_opcode_list[i]);
12529 		if (ret) {
12530 			dev_err(&hdev->pdev->dev,
12531 				"Get dfx reg fail, status is %d.\n", ret);
12532 			break;
12533 		}
12534 
12535 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12536 	}
12537 
12538 	kfree(desc_src);
12539 out:
12540 	kfree(bd_num_list);
12541 	return ret;
12542 }
12543 
12544 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12545 			      struct hnae3_knic_private_info *kinfo)
12546 {
12547 #define HCLGE_RING_REG_OFFSET		0x200
12548 #define HCLGE_RING_INT_REG_OFFSET	0x4
12549 
12550 	int i, j, reg_num, separator_num;
12551 	int data_num_sum;
12552 	u32 *reg = data;
12553 
12554 	/* fetching per-PF registers valus from PF PCIe register space */
12555 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12556 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12557 	for (i = 0; i < reg_num; i++)
12558 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12559 	for (i = 0; i < separator_num; i++)
12560 		*reg++ = SEPARATOR_VALUE;
12561 	data_num_sum = reg_num + separator_num;
12562 
12563 	reg_num = ARRAY_SIZE(common_reg_addr_list);
12564 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12565 	for (i = 0; i < reg_num; i++)
12566 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12567 	for (i = 0; i < separator_num; i++)
12568 		*reg++ = SEPARATOR_VALUE;
12569 	data_num_sum += reg_num + separator_num;
12570 
12571 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
12572 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12573 	for (j = 0; j < kinfo->num_tqps; j++) {
12574 		for (i = 0; i < reg_num; i++)
12575 			*reg++ = hclge_read_dev(&hdev->hw,
12576 						ring_reg_addr_list[i] +
12577 						HCLGE_RING_REG_OFFSET * j);
12578 		for (i = 0; i < separator_num; i++)
12579 			*reg++ = SEPARATOR_VALUE;
12580 	}
12581 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12582 
12583 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12584 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12585 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
12586 		for (i = 0; i < reg_num; i++)
12587 			*reg++ = hclge_read_dev(&hdev->hw,
12588 						tqp_intr_reg_addr_list[i] +
12589 						HCLGE_RING_INT_REG_OFFSET * j);
12590 		for (i = 0; i < separator_num; i++)
12591 			*reg++ = SEPARATOR_VALUE;
12592 	}
12593 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12594 
12595 	return data_num_sum;
12596 }
12597 
12598 static int hclge_get_regs_len(struct hnae3_handle *handle)
12599 {
12600 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12601 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12602 	struct hclge_vport *vport = hclge_get_vport(handle);
12603 	struct hclge_dev *hdev = vport->back;
12604 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12605 	int regs_lines_32_bit, regs_lines_64_bit;
12606 	int ret;
12607 
12608 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12609 	if (ret) {
12610 		dev_err(&hdev->pdev->dev,
12611 			"Get register number failed, ret = %d.\n", ret);
12612 		return ret;
12613 	}
12614 
12615 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12616 	if (ret) {
12617 		dev_err(&hdev->pdev->dev,
12618 			"Get dfx reg len failed, ret = %d.\n", ret);
12619 		return ret;
12620 	}
12621 
12622 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12623 		REG_SEPARATOR_LINE;
12624 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12625 		REG_SEPARATOR_LINE;
12626 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12627 		REG_SEPARATOR_LINE;
12628 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12629 		REG_SEPARATOR_LINE;
12630 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12631 		REG_SEPARATOR_LINE;
12632 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12633 		REG_SEPARATOR_LINE;
12634 
12635 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12636 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12637 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12638 }
12639 
12640 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12641 			   void *data)
12642 {
12643 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12644 	struct hclge_vport *vport = hclge_get_vport(handle);
12645 	struct hclge_dev *hdev = vport->back;
12646 	u32 regs_num_32_bit, regs_num_64_bit;
12647 	int i, reg_num, separator_num, ret;
12648 	u32 *reg = data;
12649 
12650 	*version = hdev->fw_version;
12651 
12652 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12653 	if (ret) {
12654 		dev_err(&hdev->pdev->dev,
12655 			"Get register number failed, ret = %d.\n", ret);
12656 		return;
12657 	}
12658 
12659 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12660 
12661 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12662 	if (ret) {
12663 		dev_err(&hdev->pdev->dev,
12664 			"Get 32 bit register failed, ret = %d.\n", ret);
12665 		return;
12666 	}
12667 	reg_num = regs_num_32_bit;
12668 	reg += reg_num;
12669 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12670 	for (i = 0; i < separator_num; i++)
12671 		*reg++ = SEPARATOR_VALUE;
12672 
12673 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12674 	if (ret) {
12675 		dev_err(&hdev->pdev->dev,
12676 			"Get 64 bit register failed, ret = %d.\n", ret);
12677 		return;
12678 	}
12679 	reg_num = regs_num_64_bit * 2;
12680 	reg += reg_num;
12681 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12682 	for (i = 0; i < separator_num; i++)
12683 		*reg++ = SEPARATOR_VALUE;
12684 
12685 	ret = hclge_get_dfx_reg(hdev, reg);
12686 	if (ret)
12687 		dev_err(&hdev->pdev->dev,
12688 			"Get dfx register failed, ret = %d.\n", ret);
12689 }
12690 
12691 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12692 {
12693 	struct hclge_set_led_state_cmd *req;
12694 	struct hclge_desc desc;
12695 	int ret;
12696 
12697 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12698 
12699 	req = (struct hclge_set_led_state_cmd *)desc.data;
12700 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12701 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12702 
12703 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12704 	if (ret)
12705 		dev_err(&hdev->pdev->dev,
12706 			"Send set led state cmd error, ret =%d\n", ret);
12707 
12708 	return ret;
12709 }
12710 
12711 enum hclge_led_status {
12712 	HCLGE_LED_OFF,
12713 	HCLGE_LED_ON,
12714 	HCLGE_LED_NO_CHANGE = 0xFF,
12715 };
12716 
12717 static int hclge_set_led_id(struct hnae3_handle *handle,
12718 			    enum ethtool_phys_id_state status)
12719 {
12720 	struct hclge_vport *vport = hclge_get_vport(handle);
12721 	struct hclge_dev *hdev = vport->back;
12722 
12723 	switch (status) {
12724 	case ETHTOOL_ID_ACTIVE:
12725 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
12726 	case ETHTOOL_ID_INACTIVE:
12727 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12728 	default:
12729 		return -EINVAL;
12730 	}
12731 }
12732 
12733 static void hclge_get_link_mode(struct hnae3_handle *handle,
12734 				unsigned long *supported,
12735 				unsigned long *advertising)
12736 {
12737 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12738 	struct hclge_vport *vport = hclge_get_vport(handle);
12739 	struct hclge_dev *hdev = vport->back;
12740 	unsigned int idx = 0;
12741 
12742 	for (; idx < size; idx++) {
12743 		supported[idx] = hdev->hw.mac.supported[idx];
12744 		advertising[idx] = hdev->hw.mac.advertising[idx];
12745 	}
12746 }
12747 
12748 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12749 {
12750 	struct hclge_vport *vport = hclge_get_vport(handle);
12751 	struct hclge_dev *hdev = vport->back;
12752 	bool gro_en_old = hdev->gro_en;
12753 	int ret;
12754 
12755 	hdev->gro_en = enable;
12756 	ret = hclge_config_gro(hdev);
12757 	if (ret)
12758 		hdev->gro_en = gro_en_old;
12759 
12760 	return ret;
12761 }
12762 
12763 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12764 {
12765 	struct hclge_vport *vport = &hdev->vport[0];
12766 	struct hnae3_handle *handle = &vport->nic;
12767 	u8 tmp_flags;
12768 	int ret;
12769 	u16 i;
12770 
12771 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12772 		set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12773 		vport->last_promisc_flags = vport->overflow_promisc_flags;
12774 	}
12775 
12776 	if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12777 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12778 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12779 					     tmp_flags & HNAE3_MPE);
12780 		if (!ret) {
12781 			clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12782 				  &vport->state);
12783 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12784 				&vport->state);
12785 		}
12786 	}
12787 
12788 	for (i = 1; i < hdev->num_alloc_vport; i++) {
12789 		bool uc_en = false;
12790 		bool mc_en = false;
12791 		bool bc_en;
12792 
12793 		vport = &hdev->vport[i];
12794 
12795 		if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12796 					&vport->state))
12797 			continue;
12798 
12799 		if (vport->vf_info.trusted) {
12800 			uc_en = vport->vf_info.request_uc_en > 0;
12801 			mc_en = vport->vf_info.request_mc_en > 0;
12802 		}
12803 		bc_en = vport->vf_info.request_bc_en > 0;
12804 
12805 		ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12806 						 mc_en, bc_en);
12807 		if (ret) {
12808 			set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12809 				&vport->state);
12810 			return;
12811 		}
12812 		hclge_set_vport_vlan_fltr_change(vport);
12813 	}
12814 }
12815 
12816 static bool hclge_module_existed(struct hclge_dev *hdev)
12817 {
12818 	struct hclge_desc desc;
12819 	u32 existed;
12820 	int ret;
12821 
12822 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12823 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12824 	if (ret) {
12825 		dev_err(&hdev->pdev->dev,
12826 			"failed to get SFP exist state, ret = %d\n", ret);
12827 		return false;
12828 	}
12829 
12830 	existed = le32_to_cpu(desc.data[0]);
12831 
12832 	return existed != 0;
12833 }
12834 
12835 /* need 6 bds(total 140 bytes) in one reading
12836  * return the number of bytes actually read, 0 means read failed.
12837  */
12838 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12839 				     u32 len, u8 *data)
12840 {
12841 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12842 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12843 	u16 read_len;
12844 	u16 copy_len;
12845 	int ret;
12846 	int i;
12847 
12848 	/* setup all 6 bds to read module eeprom info. */
12849 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12850 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12851 					   true);
12852 
12853 		/* bd0~bd4 need next flag */
12854 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12855 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12856 	}
12857 
12858 	/* setup bd0, this bd contains offset and read length. */
12859 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12860 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12861 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12862 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
12863 
12864 	ret = hclge_cmd_send(&hdev->hw, desc, i);
12865 	if (ret) {
12866 		dev_err(&hdev->pdev->dev,
12867 			"failed to get SFP eeprom info, ret = %d\n", ret);
12868 		return 0;
12869 	}
12870 
12871 	/* copy sfp info from bd0 to out buffer. */
12872 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12873 	memcpy(data, sfp_info_bd0->data, copy_len);
12874 	read_len = copy_len;
12875 
12876 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
12877 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12878 		if (read_len >= len)
12879 			return read_len;
12880 
12881 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12882 		memcpy(data + read_len, desc[i].data, copy_len);
12883 		read_len += copy_len;
12884 	}
12885 
12886 	return read_len;
12887 }
12888 
12889 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12890 				   u32 len, u8 *data)
12891 {
12892 	struct hclge_vport *vport = hclge_get_vport(handle);
12893 	struct hclge_dev *hdev = vport->back;
12894 	u32 read_len = 0;
12895 	u16 data_len;
12896 
12897 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12898 		return -EOPNOTSUPP;
12899 
12900 	if (!hclge_module_existed(hdev))
12901 		return -ENXIO;
12902 
12903 	while (read_len < len) {
12904 		data_len = hclge_get_sfp_eeprom_info(hdev,
12905 						     offset + read_len,
12906 						     len - read_len,
12907 						     data + read_len);
12908 		if (!data_len)
12909 			return -EIO;
12910 
12911 		read_len += data_len;
12912 	}
12913 
12914 	return 0;
12915 }
12916 
12917 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
12918 					 u32 *status_code)
12919 {
12920 	struct hclge_vport *vport = hclge_get_vport(handle);
12921 	struct hclge_dev *hdev = vport->back;
12922 	struct hclge_desc desc;
12923 	int ret;
12924 
12925 	if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
12926 		return -EOPNOTSUPP;
12927 
12928 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
12929 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12930 	if (ret) {
12931 		dev_err(&hdev->pdev->dev,
12932 			"failed to query link diagnosis info, ret = %d\n", ret);
12933 		return ret;
12934 	}
12935 
12936 	*status_code = le32_to_cpu(desc.data[0]);
12937 	return 0;
12938 }
12939 
12940 static const struct hnae3_ae_ops hclge_ops = {
12941 	.init_ae_dev = hclge_init_ae_dev,
12942 	.uninit_ae_dev = hclge_uninit_ae_dev,
12943 	.reset_prepare = hclge_reset_prepare_general,
12944 	.reset_done = hclge_reset_done,
12945 	.init_client_instance = hclge_init_client_instance,
12946 	.uninit_client_instance = hclge_uninit_client_instance,
12947 	.map_ring_to_vector = hclge_map_ring_to_vector,
12948 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12949 	.get_vector = hclge_get_vector,
12950 	.put_vector = hclge_put_vector,
12951 	.set_promisc_mode = hclge_set_promisc_mode,
12952 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
12953 	.set_loopback = hclge_set_loopback,
12954 	.start = hclge_ae_start,
12955 	.stop = hclge_ae_stop,
12956 	.client_start = hclge_client_start,
12957 	.client_stop = hclge_client_stop,
12958 	.get_status = hclge_get_status,
12959 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
12960 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12961 	.get_media_type = hclge_get_media_type,
12962 	.check_port_speed = hclge_check_port_speed,
12963 	.get_fec = hclge_get_fec,
12964 	.set_fec = hclge_set_fec,
12965 	.get_rss_key_size = hclge_get_rss_key_size,
12966 	.get_rss = hclge_get_rss,
12967 	.set_rss = hclge_set_rss,
12968 	.set_rss_tuple = hclge_set_rss_tuple,
12969 	.get_rss_tuple = hclge_get_rss_tuple,
12970 	.get_tc_size = hclge_get_tc_size,
12971 	.get_mac_addr = hclge_get_mac_addr,
12972 	.set_mac_addr = hclge_set_mac_addr,
12973 	.do_ioctl = hclge_do_ioctl,
12974 	.add_uc_addr = hclge_add_uc_addr,
12975 	.rm_uc_addr = hclge_rm_uc_addr,
12976 	.add_mc_addr = hclge_add_mc_addr,
12977 	.rm_mc_addr = hclge_rm_mc_addr,
12978 	.set_autoneg = hclge_set_autoneg,
12979 	.get_autoneg = hclge_get_autoneg,
12980 	.restart_autoneg = hclge_restart_autoneg,
12981 	.halt_autoneg = hclge_halt_autoneg,
12982 	.get_pauseparam = hclge_get_pauseparam,
12983 	.set_pauseparam = hclge_set_pauseparam,
12984 	.set_mtu = hclge_set_mtu,
12985 	.reset_queue = hclge_reset_tqp,
12986 	.get_stats = hclge_get_stats,
12987 	.get_mac_stats = hclge_get_mac_stat,
12988 	.update_stats = hclge_update_stats,
12989 	.get_strings = hclge_get_strings,
12990 	.get_sset_count = hclge_get_sset_count,
12991 	.get_fw_version = hclge_get_fw_version,
12992 	.get_mdix_mode = hclge_get_mdix_mode,
12993 	.enable_vlan_filter = hclge_enable_vlan_filter,
12994 	.set_vlan_filter = hclge_set_vlan_filter,
12995 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12996 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12997 	.reset_event = hclge_reset_event,
12998 	.get_reset_level = hclge_get_reset_level,
12999 	.set_default_reset_request = hclge_set_def_reset_request,
13000 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
13001 	.set_channels = hclge_set_channels,
13002 	.get_channels = hclge_get_channels,
13003 	.get_regs_len = hclge_get_regs_len,
13004 	.get_regs = hclge_get_regs,
13005 	.set_led_id = hclge_set_led_id,
13006 	.get_link_mode = hclge_get_link_mode,
13007 	.add_fd_entry = hclge_add_fd_entry,
13008 	.del_fd_entry = hclge_del_fd_entry,
13009 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
13010 	.get_fd_rule_info = hclge_get_fd_rule_info,
13011 	.get_fd_all_rules = hclge_get_all_rules,
13012 	.enable_fd = hclge_enable_fd,
13013 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
13014 	.dbg_read_cmd = hclge_dbg_read_cmd,
13015 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
13016 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
13017 	.ae_dev_resetting = hclge_ae_dev_resetting,
13018 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
13019 	.set_gro_en = hclge_gro_en,
13020 	.get_global_queue_id = hclge_covert_handle_qid_global,
13021 	.set_timer_task = hclge_set_timer_task,
13022 	.mac_connect_phy = hclge_mac_connect_phy,
13023 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
13024 	.get_vf_config = hclge_get_vf_config,
13025 	.set_vf_link_state = hclge_set_vf_link_state,
13026 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
13027 	.set_vf_trust = hclge_set_vf_trust,
13028 	.set_vf_rate = hclge_set_vf_rate,
13029 	.set_vf_mac = hclge_set_vf_mac,
13030 	.get_module_eeprom = hclge_get_module_eeprom,
13031 	.get_cmdq_stat = hclge_get_cmdq_stat,
13032 	.add_cls_flower = hclge_add_cls_flower,
13033 	.del_cls_flower = hclge_del_cls_flower,
13034 	.cls_flower_active = hclge_is_cls_flower_active,
13035 	.get_phy_link_ksettings = hclge_get_phy_link_ksettings,
13036 	.set_phy_link_ksettings = hclge_set_phy_link_ksettings,
13037 	.set_tx_hwts_info = hclge_ptp_set_tx_info,
13038 	.get_rx_hwts = hclge_ptp_get_rx_hwts,
13039 	.get_ts_info = hclge_ptp_get_ts_info,
13040 	.get_link_diagnosis_info = hclge_get_link_diagnosis_info,
13041 };
13042 
13043 static struct hnae3_ae_algo ae_algo = {
13044 	.ops = &hclge_ops,
13045 	.pdev_id_table = ae_algo_pci_tbl,
13046 };
13047 
13048 static int hclge_init(void)
13049 {
13050 	pr_info("%s is initializing\n", HCLGE_NAME);
13051 
13052 	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
13053 	if (!hclge_wq) {
13054 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
13055 		return -ENOMEM;
13056 	}
13057 
13058 	hnae3_register_ae_algo(&ae_algo);
13059 
13060 	return 0;
13061 }
13062 
13063 static void hclge_exit(void)
13064 {
13065 	hnae3_unregister_ae_algo(&ae_algo);
13066 	destroy_workqueue(hclge_wq);
13067 }
13068 module_init(hclge_init);
13069 module_exit(hclge_exit);
13070 
13071 MODULE_LICENSE("GPL");
13072 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
13073 MODULE_DESCRIPTION("HCLGE Driver");
13074 MODULE_VERSION(HCLGE_MOD_VERSION);
13075