1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26 #include "hclge_devlink.h"
27 
28 #define HCLGE_NAME			"hclge"
29 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
30 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31 
32 #define HCLGE_BUF_SIZE_UNIT	256U
33 #define HCLGE_BUF_MUL_BY	2
34 #define HCLGE_BUF_DIV_BY	2
35 #define NEED_RESERVE_TC_NUM	2
36 #define BUF_MAX_PERCENT		100
37 #define BUF_RESERVE_PERCENT	90
38 
39 #define HCLGE_RESET_MAX_FAIL_CNT	5
40 #define HCLGE_RESET_SYNC_TIME		100
41 #define HCLGE_PF_RESET_SYNC_TIME	20
42 #define HCLGE_PF_RESET_SYNC_CNT		1500
43 
44 /* Get DFX BD number offset */
45 #define HCLGE_DFX_BIOS_BD_OFFSET        1
46 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
47 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
48 #define HCLGE_DFX_IGU_BD_OFFSET         4
49 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
50 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
51 #define HCLGE_DFX_NCSI_BD_OFFSET        7
52 #define HCLGE_DFX_RTC_BD_OFFSET         8
53 #define HCLGE_DFX_PPP_BD_OFFSET         9
54 #define HCLGE_DFX_RCB_BD_OFFSET         10
55 #define HCLGE_DFX_TQP_BD_OFFSET         11
56 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
57 
58 #define HCLGE_LINK_STATUS_MS	10
59 
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 						   unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 static void hclge_sync_fd_table(struct hclge_dev *hdev);
75 
76 static struct hnae3_ae_algo ae_algo;
77 
78 static struct workqueue_struct *hclge_wq;
79 
80 static const struct pci_device_id ae_algo_pci_tbl[] = {
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
86 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
87 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
88 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
89 	/* required last entry */
90 	{0, }
91 };
92 
93 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
94 
95 static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG,
96 					 HCLGE_NIC_CSQ_BASEADDR_H_REG,
97 					 HCLGE_NIC_CSQ_DEPTH_REG,
98 					 HCLGE_NIC_CSQ_TAIL_REG,
99 					 HCLGE_NIC_CSQ_HEAD_REG,
100 					 HCLGE_NIC_CRQ_BASEADDR_L_REG,
101 					 HCLGE_NIC_CRQ_BASEADDR_H_REG,
102 					 HCLGE_NIC_CRQ_DEPTH_REG,
103 					 HCLGE_NIC_CRQ_TAIL_REG,
104 					 HCLGE_NIC_CRQ_HEAD_REG,
105 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
106 					 HCLGE_CMDQ_INTR_STS_REG,
107 					 HCLGE_CMDQ_INTR_EN_REG,
108 					 HCLGE_CMDQ_INTR_GEN_REG};
109 
110 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
111 					   HCLGE_PF_OTHER_INT_REG,
112 					   HCLGE_MISC_RESET_STS_REG,
113 					   HCLGE_MISC_VECTOR_INT_STS,
114 					   HCLGE_GLOBAL_RESET_REG,
115 					   HCLGE_FUN_RST_ING,
116 					   HCLGE_GRO_EN_REG};
117 
118 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
119 					 HCLGE_RING_RX_ADDR_H_REG,
120 					 HCLGE_RING_RX_BD_NUM_REG,
121 					 HCLGE_RING_RX_BD_LENGTH_REG,
122 					 HCLGE_RING_RX_MERGE_EN_REG,
123 					 HCLGE_RING_RX_TAIL_REG,
124 					 HCLGE_RING_RX_HEAD_REG,
125 					 HCLGE_RING_RX_FBD_NUM_REG,
126 					 HCLGE_RING_RX_OFFSET_REG,
127 					 HCLGE_RING_RX_FBD_OFFSET_REG,
128 					 HCLGE_RING_RX_STASH_REG,
129 					 HCLGE_RING_RX_BD_ERR_REG,
130 					 HCLGE_RING_TX_ADDR_L_REG,
131 					 HCLGE_RING_TX_ADDR_H_REG,
132 					 HCLGE_RING_TX_BD_NUM_REG,
133 					 HCLGE_RING_TX_PRIORITY_REG,
134 					 HCLGE_RING_TX_TC_REG,
135 					 HCLGE_RING_TX_MERGE_EN_REG,
136 					 HCLGE_RING_TX_TAIL_REG,
137 					 HCLGE_RING_TX_HEAD_REG,
138 					 HCLGE_RING_TX_FBD_NUM_REG,
139 					 HCLGE_RING_TX_OFFSET_REG,
140 					 HCLGE_RING_TX_EBD_NUM_REG,
141 					 HCLGE_RING_TX_EBD_OFFSET_REG,
142 					 HCLGE_RING_TX_BD_ERR_REG,
143 					 HCLGE_RING_EN_REG};
144 
145 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
146 					     HCLGE_TQP_INTR_GL0_REG,
147 					     HCLGE_TQP_INTR_GL1_REG,
148 					     HCLGE_TQP_INTR_GL2_REG,
149 					     HCLGE_TQP_INTR_RL_REG};
150 
151 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
152 	"App    Loopback test",
153 	"Serdes serial Loopback test",
154 	"Serdes parallel Loopback test",
155 	"Phy    Loopback test"
156 };
157 
158 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
159 	{"mac_tx_mac_pause_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
161 	{"mac_rx_mac_pause_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
163 	{"mac_tx_control_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
165 	{"mac_rx_control_pkt_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
167 	{"mac_tx_pfc_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
169 	{"mac_tx_pfc_pri0_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
171 	{"mac_tx_pfc_pri1_pkt_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
173 	{"mac_tx_pfc_pri2_pkt_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
175 	{"mac_tx_pfc_pri3_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
177 	{"mac_tx_pfc_pri4_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
179 	{"mac_tx_pfc_pri5_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
181 	{"mac_tx_pfc_pri6_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
183 	{"mac_tx_pfc_pri7_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
185 	{"mac_rx_pfc_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
187 	{"mac_rx_pfc_pri0_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
189 	{"mac_rx_pfc_pri1_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
191 	{"mac_rx_pfc_pri2_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
193 	{"mac_rx_pfc_pri3_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
195 	{"mac_rx_pfc_pri4_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
197 	{"mac_rx_pfc_pri5_pkt_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
199 	{"mac_rx_pfc_pri6_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
201 	{"mac_rx_pfc_pri7_pkt_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
203 	{"mac_tx_total_pkt_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
205 	{"mac_tx_total_oct_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
207 	{"mac_tx_good_pkt_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
209 	{"mac_tx_bad_pkt_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
211 	{"mac_tx_good_oct_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
213 	{"mac_tx_bad_oct_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
215 	{"mac_tx_uni_pkt_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
217 	{"mac_tx_multi_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
219 	{"mac_tx_broad_pkt_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
221 	{"mac_tx_undersize_pkt_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
223 	{"mac_tx_oversize_pkt_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
225 	{"mac_tx_64_oct_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
227 	{"mac_tx_65_127_oct_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
229 	{"mac_tx_128_255_oct_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
231 	{"mac_tx_256_511_oct_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
233 	{"mac_tx_512_1023_oct_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
235 	{"mac_tx_1024_1518_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
237 	{"mac_tx_1519_2047_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
239 	{"mac_tx_2048_4095_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
241 	{"mac_tx_4096_8191_oct_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
243 	{"mac_tx_8192_9216_oct_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
245 	{"mac_tx_9217_12287_oct_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
247 	{"mac_tx_12288_16383_oct_pkt_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
249 	{"mac_tx_1519_max_good_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
251 	{"mac_tx_1519_max_bad_pkt_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
253 	{"mac_rx_total_pkt_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
255 	{"mac_rx_total_oct_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
257 	{"mac_rx_good_pkt_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
259 	{"mac_rx_bad_pkt_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
261 	{"mac_rx_good_oct_num",
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
263 	{"mac_rx_bad_oct_num",
264 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
265 	{"mac_rx_uni_pkt_num",
266 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
267 	{"mac_rx_multi_pkt_num",
268 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
269 	{"mac_rx_broad_pkt_num",
270 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
271 	{"mac_rx_undersize_pkt_num",
272 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
273 	{"mac_rx_oversize_pkt_num",
274 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
275 	{"mac_rx_64_oct_pkt_num",
276 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
277 	{"mac_rx_65_127_oct_pkt_num",
278 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
279 	{"mac_rx_128_255_oct_pkt_num",
280 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
281 	{"mac_rx_256_511_oct_pkt_num",
282 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
283 	{"mac_rx_512_1023_oct_pkt_num",
284 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
285 	{"mac_rx_1024_1518_oct_pkt_num",
286 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
287 	{"mac_rx_1519_2047_oct_pkt_num",
288 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
289 	{"mac_rx_2048_4095_oct_pkt_num",
290 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
291 	{"mac_rx_4096_8191_oct_pkt_num",
292 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
293 	{"mac_rx_8192_9216_oct_pkt_num",
294 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
295 	{"mac_rx_9217_12287_oct_pkt_num",
296 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
297 	{"mac_rx_12288_16383_oct_pkt_num",
298 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
299 	{"mac_rx_1519_max_good_pkt_num",
300 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
301 	{"mac_rx_1519_max_bad_pkt_num",
302 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
303 
304 	{"mac_tx_fragment_pkt_num",
305 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
306 	{"mac_tx_undermin_pkt_num",
307 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
308 	{"mac_tx_jabber_pkt_num",
309 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
310 	{"mac_tx_err_all_pkt_num",
311 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
312 	{"mac_tx_from_app_good_pkt_num",
313 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
314 	{"mac_tx_from_app_bad_pkt_num",
315 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
316 	{"mac_rx_fragment_pkt_num",
317 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
318 	{"mac_rx_undermin_pkt_num",
319 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
320 	{"mac_rx_jabber_pkt_num",
321 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
322 	{"mac_rx_fcs_err_pkt_num",
323 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
324 	{"mac_rx_send_app_good_pkt_num",
325 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
326 	{"mac_rx_send_app_bad_pkt_num",
327 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
328 };
329 
330 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
331 	{
332 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
333 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
334 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
335 		.i_port_bitmap = 0x1,
336 	},
337 };
338 
339 static const u8 hclge_hash_key[] = {
340 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
341 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
342 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
343 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
344 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
345 };
346 
347 static const u32 hclge_dfx_bd_offset_list[] = {
348 	HCLGE_DFX_BIOS_BD_OFFSET,
349 	HCLGE_DFX_SSU_0_BD_OFFSET,
350 	HCLGE_DFX_SSU_1_BD_OFFSET,
351 	HCLGE_DFX_IGU_BD_OFFSET,
352 	HCLGE_DFX_RPU_0_BD_OFFSET,
353 	HCLGE_DFX_RPU_1_BD_OFFSET,
354 	HCLGE_DFX_NCSI_BD_OFFSET,
355 	HCLGE_DFX_RTC_BD_OFFSET,
356 	HCLGE_DFX_PPP_BD_OFFSET,
357 	HCLGE_DFX_RCB_BD_OFFSET,
358 	HCLGE_DFX_TQP_BD_OFFSET,
359 	HCLGE_DFX_SSU_2_BD_OFFSET
360 };
361 
362 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
363 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
364 	HCLGE_OPC_DFX_SSU_REG_0,
365 	HCLGE_OPC_DFX_SSU_REG_1,
366 	HCLGE_OPC_DFX_IGU_EGU_REG,
367 	HCLGE_OPC_DFX_RPU_REG_0,
368 	HCLGE_OPC_DFX_RPU_REG_1,
369 	HCLGE_OPC_DFX_NCSI_REG,
370 	HCLGE_OPC_DFX_RTC_REG,
371 	HCLGE_OPC_DFX_PPP_REG,
372 	HCLGE_OPC_DFX_RCB_REG,
373 	HCLGE_OPC_DFX_TQP_REG,
374 	HCLGE_OPC_DFX_SSU_REG_2
375 };
376 
377 static const struct key_info meta_data_key_info[] = {
378 	{ PACKET_TYPE_ID, 6 },
379 	{ IP_FRAGEMENT, 1 },
380 	{ ROCE_TYPE, 1 },
381 	{ NEXT_KEY, 5 },
382 	{ VLAN_NUMBER, 2 },
383 	{ SRC_VPORT, 12 },
384 	{ DST_VPORT, 12 },
385 	{ TUNNEL_PACKET, 1 },
386 };
387 
388 static const struct key_info tuple_key_info[] = {
389 	{ OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
390 	{ OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
391 	{ OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
392 	{ OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
393 	{ OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
394 	{ OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
395 	{ OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
396 	{ OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
397 	{ OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
398 	{ OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
399 	{ OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
400 	{ OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
401 	{ OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
402 	{ OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
403 	{ OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
404 	{ OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
405 	{ INNER_DST_MAC, 48, KEY_OPT_MAC,
406 	  offsetof(struct hclge_fd_rule, tuples.dst_mac),
407 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
408 	{ INNER_SRC_MAC, 48, KEY_OPT_MAC,
409 	  offsetof(struct hclge_fd_rule, tuples.src_mac),
410 	  offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
411 	{ INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
412 	  offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
413 	  offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
414 	{ INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
415 	{ INNER_ETH_TYPE, 16, KEY_OPT_LE16,
416 	  offsetof(struct hclge_fd_rule, tuples.ether_proto),
417 	  offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
418 	{ INNER_L2_RSV, 16, KEY_OPT_LE16,
419 	  offsetof(struct hclge_fd_rule, tuples.l2_user_def),
420 	  offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
421 	{ INNER_IP_TOS, 8, KEY_OPT_U8,
422 	  offsetof(struct hclge_fd_rule, tuples.ip_tos),
423 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
424 	{ INNER_IP_PROTO, 8, KEY_OPT_U8,
425 	  offsetof(struct hclge_fd_rule, tuples.ip_proto),
426 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
427 	{ INNER_SRC_IP, 32, KEY_OPT_IP,
428 	  offsetof(struct hclge_fd_rule, tuples.src_ip),
429 	  offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
430 	{ INNER_DST_IP, 32, KEY_OPT_IP,
431 	  offsetof(struct hclge_fd_rule, tuples.dst_ip),
432 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
433 	{ INNER_L3_RSV, 16, KEY_OPT_LE16,
434 	  offsetof(struct hclge_fd_rule, tuples.l3_user_def),
435 	  offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
436 	{ INNER_SRC_PORT, 16, KEY_OPT_LE16,
437 	  offsetof(struct hclge_fd_rule, tuples.src_port),
438 	  offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
439 	{ INNER_DST_PORT, 16, KEY_OPT_LE16,
440 	  offsetof(struct hclge_fd_rule, tuples.dst_port),
441 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
442 	{ INNER_L4_RSV, 32, KEY_OPT_LE32,
443 	  offsetof(struct hclge_fd_rule, tuples.l4_user_def),
444 	  offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
445 };
446 
447 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
448 {
449 #define HCLGE_MAC_CMD_NUM 21
450 
451 	u64 *data = (u64 *)(&hdev->mac_stats);
452 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
453 	__le64 *desc_data;
454 	int i, k, n;
455 	int ret;
456 
457 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
458 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
459 	if (ret) {
460 		dev_err(&hdev->pdev->dev,
461 			"Get MAC pkt stats fail, status = %d.\n", ret);
462 
463 		return ret;
464 	}
465 
466 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
467 		/* for special opcode 0032, only the first desc has the head */
468 		if (unlikely(i == 0)) {
469 			desc_data = (__le64 *)(&desc[i].data[0]);
470 			n = HCLGE_RD_FIRST_STATS_NUM;
471 		} else {
472 			desc_data = (__le64 *)(&desc[i]);
473 			n = HCLGE_RD_OTHER_STATS_NUM;
474 		}
475 
476 		for (k = 0; k < n; k++) {
477 			*data += le64_to_cpu(*desc_data);
478 			data++;
479 			desc_data++;
480 		}
481 	}
482 
483 	return 0;
484 }
485 
486 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
487 {
488 	u64 *data = (u64 *)(&hdev->mac_stats);
489 	struct hclge_desc *desc;
490 	__le64 *desc_data;
491 	u16 i, k, n;
492 	int ret;
493 
494 	/* This may be called inside atomic sections,
495 	 * so GFP_ATOMIC is more suitalbe here
496 	 */
497 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
498 	if (!desc)
499 		return -ENOMEM;
500 
501 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
502 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
503 	if (ret) {
504 		kfree(desc);
505 		return ret;
506 	}
507 
508 	for (i = 0; i < desc_num; i++) {
509 		/* for special opcode 0034, only the first desc has the head */
510 		if (i == 0) {
511 			desc_data = (__le64 *)(&desc[i].data[0]);
512 			n = HCLGE_RD_FIRST_STATS_NUM;
513 		} else {
514 			desc_data = (__le64 *)(&desc[i]);
515 			n = HCLGE_RD_OTHER_STATS_NUM;
516 		}
517 
518 		for (k = 0; k < n; k++) {
519 			*data += le64_to_cpu(*desc_data);
520 			data++;
521 			desc_data++;
522 		}
523 	}
524 
525 	kfree(desc);
526 
527 	return 0;
528 }
529 
530 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
531 {
532 	struct hclge_desc desc;
533 	__le32 *desc_data;
534 	u32 reg_num;
535 	int ret;
536 
537 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
538 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
539 	if (ret)
540 		return ret;
541 
542 	desc_data = (__le32 *)(&desc.data[0]);
543 	reg_num = le32_to_cpu(*desc_data);
544 
545 	*desc_num = 1 + ((reg_num - 3) >> 2) +
546 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
547 
548 	return 0;
549 }
550 
551 static int hclge_mac_update_stats(struct hclge_dev *hdev)
552 {
553 	u32 desc_num;
554 	int ret;
555 
556 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
557 	/* The firmware supports the new statistics acquisition method */
558 	if (!ret)
559 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
560 	else if (ret == -EOPNOTSUPP)
561 		ret = hclge_mac_update_stats_defective(hdev);
562 	else
563 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
564 
565 	return ret;
566 }
567 
568 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
569 {
570 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
571 	struct hclge_vport *vport = hclge_get_vport(handle);
572 	struct hclge_dev *hdev = vport->back;
573 	struct hnae3_queue *queue;
574 	struct hclge_desc desc[1];
575 	struct hclge_tqp *tqp;
576 	int ret, i;
577 
578 	for (i = 0; i < kinfo->num_tqps; i++) {
579 		queue = handle->kinfo.tqp[i];
580 		tqp = container_of(queue, struct hclge_tqp, q);
581 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
582 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
583 					   true);
584 
585 		desc[0].data[0] = cpu_to_le32(tqp->index);
586 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
587 		if (ret) {
588 			dev_err(&hdev->pdev->dev,
589 				"Query tqp stat fail, status = %d,queue = %d\n",
590 				ret, i);
591 			return ret;
592 		}
593 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
594 			le32_to_cpu(desc[0].data[1]);
595 	}
596 
597 	for (i = 0; i < kinfo->num_tqps; i++) {
598 		queue = handle->kinfo.tqp[i];
599 		tqp = container_of(queue, struct hclge_tqp, q);
600 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
601 		hclge_cmd_setup_basic_desc(&desc[0],
602 					   HCLGE_OPC_QUERY_TX_STATS,
603 					   true);
604 
605 		desc[0].data[0] = cpu_to_le32(tqp->index);
606 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
607 		if (ret) {
608 			dev_err(&hdev->pdev->dev,
609 				"Query tqp stat fail, status = %d,queue = %d\n",
610 				ret, i);
611 			return ret;
612 		}
613 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
614 			le32_to_cpu(desc[0].data[1]);
615 	}
616 
617 	return 0;
618 }
619 
620 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
621 {
622 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
623 	struct hclge_tqp *tqp;
624 	u64 *buff = data;
625 	int i;
626 
627 	for (i = 0; i < kinfo->num_tqps; i++) {
628 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
629 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
630 	}
631 
632 	for (i = 0; i < kinfo->num_tqps; i++) {
633 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
634 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
635 	}
636 
637 	return buff;
638 }
639 
640 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
641 {
642 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
643 
644 	/* each tqp has TX & RX two queues */
645 	return kinfo->num_tqps * (2);
646 }
647 
648 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
649 {
650 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
651 	u8 *buff = data;
652 	int i;
653 
654 	for (i = 0; i < kinfo->num_tqps; i++) {
655 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
656 			struct hclge_tqp, q);
657 		snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
658 			 tqp->index);
659 		buff = buff + ETH_GSTRING_LEN;
660 	}
661 
662 	for (i = 0; i < kinfo->num_tqps; i++) {
663 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
664 			struct hclge_tqp, q);
665 		snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
666 			 tqp->index);
667 		buff = buff + ETH_GSTRING_LEN;
668 	}
669 
670 	return buff;
671 }
672 
673 static u64 *hclge_comm_get_stats(const void *comm_stats,
674 				 const struct hclge_comm_stats_str strs[],
675 				 int size, u64 *data)
676 {
677 	u64 *buf = data;
678 	u32 i;
679 
680 	for (i = 0; i < size; i++)
681 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
682 
683 	return buf + size;
684 }
685 
686 static u8 *hclge_comm_get_strings(u32 stringset,
687 				  const struct hclge_comm_stats_str strs[],
688 				  int size, u8 *data)
689 {
690 	char *buff = (char *)data;
691 	u32 i;
692 
693 	if (stringset != ETH_SS_STATS)
694 		return buff;
695 
696 	for (i = 0; i < size; i++) {
697 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
698 		buff = buff + ETH_GSTRING_LEN;
699 	}
700 
701 	return (u8 *)buff;
702 }
703 
704 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
705 {
706 	struct hnae3_handle *handle;
707 	int status;
708 
709 	handle = &hdev->vport[0].nic;
710 	if (handle->client) {
711 		status = hclge_tqps_update_stats(handle);
712 		if (status) {
713 			dev_err(&hdev->pdev->dev,
714 				"Update TQPS stats fail, status = %d.\n",
715 				status);
716 		}
717 	}
718 
719 	status = hclge_mac_update_stats(hdev);
720 	if (status)
721 		dev_err(&hdev->pdev->dev,
722 			"Update MAC stats fail, status = %d.\n", status);
723 }
724 
725 static void hclge_update_stats(struct hnae3_handle *handle,
726 			       struct net_device_stats *net_stats)
727 {
728 	struct hclge_vport *vport = hclge_get_vport(handle);
729 	struct hclge_dev *hdev = vport->back;
730 	int status;
731 
732 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
733 		return;
734 
735 	status = hclge_mac_update_stats(hdev);
736 	if (status)
737 		dev_err(&hdev->pdev->dev,
738 			"Update MAC stats fail, status = %d.\n",
739 			status);
740 
741 	status = hclge_tqps_update_stats(handle);
742 	if (status)
743 		dev_err(&hdev->pdev->dev,
744 			"Update TQPS stats fail, status = %d.\n",
745 			status);
746 
747 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
748 }
749 
750 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
751 {
752 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
753 		HNAE3_SUPPORT_PHY_LOOPBACK | \
754 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
755 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
756 
757 	struct hclge_vport *vport = hclge_get_vport(handle);
758 	struct hclge_dev *hdev = vport->back;
759 	int count = 0;
760 
761 	/* Loopback test support rules:
762 	 * mac: only GE mode support
763 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
764 	 * phy: only support when phy device exist on board
765 	 */
766 	if (stringset == ETH_SS_TEST) {
767 		/* clear loopback bit flags at first */
768 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
769 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
770 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
771 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
772 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
773 			count += 1;
774 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
775 		}
776 
777 		count += 2;
778 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
779 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
780 
781 		if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
782 		     hdev->hw.mac.phydev->drv->set_loopback) ||
783 		    hnae3_dev_phy_imp_supported(hdev)) {
784 			count += 1;
785 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
786 		}
787 	} else if (stringset == ETH_SS_STATS) {
788 		count = ARRAY_SIZE(g_mac_stats_string) +
789 			hclge_tqps_get_sset_count(handle, stringset);
790 	}
791 
792 	return count;
793 }
794 
795 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
796 			      u8 *data)
797 {
798 	u8 *p = (char *)data;
799 	int size;
800 
801 	if (stringset == ETH_SS_STATS) {
802 		size = ARRAY_SIZE(g_mac_stats_string);
803 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
804 					   size, p);
805 		p = hclge_tqps_get_strings(handle, p);
806 	} else if (stringset == ETH_SS_TEST) {
807 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
808 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
809 			       ETH_GSTRING_LEN);
810 			p += ETH_GSTRING_LEN;
811 		}
812 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
813 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
814 			       ETH_GSTRING_LEN);
815 			p += ETH_GSTRING_LEN;
816 		}
817 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
818 			memcpy(p,
819 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
820 			       ETH_GSTRING_LEN);
821 			p += ETH_GSTRING_LEN;
822 		}
823 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
824 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
825 			       ETH_GSTRING_LEN);
826 			p += ETH_GSTRING_LEN;
827 		}
828 	}
829 }
830 
831 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
832 {
833 	struct hclge_vport *vport = hclge_get_vport(handle);
834 	struct hclge_dev *hdev = vport->back;
835 	u64 *p;
836 
837 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
838 				 ARRAY_SIZE(g_mac_stats_string), data);
839 	p = hclge_tqps_get_stats(handle, p);
840 }
841 
842 static void hclge_get_mac_stat(struct hnae3_handle *handle,
843 			       struct hns3_mac_stats *mac_stats)
844 {
845 	struct hclge_vport *vport = hclge_get_vport(handle);
846 	struct hclge_dev *hdev = vport->back;
847 
848 	hclge_update_stats(handle, NULL);
849 
850 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
851 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
852 }
853 
854 static int hclge_parse_func_status(struct hclge_dev *hdev,
855 				   struct hclge_func_status_cmd *status)
856 {
857 #define HCLGE_MAC_ID_MASK	0xF
858 
859 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
860 		return -EINVAL;
861 
862 	/* Set the pf to main pf */
863 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
864 		hdev->flag |= HCLGE_FLAG_MAIN;
865 	else
866 		hdev->flag &= ~HCLGE_FLAG_MAIN;
867 
868 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
869 	return 0;
870 }
871 
872 static int hclge_query_function_status(struct hclge_dev *hdev)
873 {
874 #define HCLGE_QUERY_MAX_CNT	5
875 
876 	struct hclge_func_status_cmd *req;
877 	struct hclge_desc desc;
878 	int timeout = 0;
879 	int ret;
880 
881 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
882 	req = (struct hclge_func_status_cmd *)desc.data;
883 
884 	do {
885 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
886 		if (ret) {
887 			dev_err(&hdev->pdev->dev,
888 				"query function status failed %d.\n", ret);
889 			return ret;
890 		}
891 
892 		/* Check pf reset is done */
893 		if (req->pf_state)
894 			break;
895 		usleep_range(1000, 2000);
896 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
897 
898 	return hclge_parse_func_status(hdev, req);
899 }
900 
901 static int hclge_query_pf_resource(struct hclge_dev *hdev)
902 {
903 	struct hclge_pf_res_cmd *req;
904 	struct hclge_desc desc;
905 	int ret;
906 
907 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
908 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
909 	if (ret) {
910 		dev_err(&hdev->pdev->dev,
911 			"query pf resource failed %d.\n", ret);
912 		return ret;
913 	}
914 
915 	req = (struct hclge_pf_res_cmd *)desc.data;
916 	hdev->num_tqps = le16_to_cpu(req->tqp_num) +
917 			 le16_to_cpu(req->ext_tqp_num);
918 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
919 
920 	if (req->tx_buf_size)
921 		hdev->tx_buf_size =
922 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
923 	else
924 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
925 
926 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
927 
928 	if (req->dv_buf_size)
929 		hdev->dv_buf_size =
930 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
931 	else
932 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
933 
934 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
935 
936 	hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
937 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
938 		dev_err(&hdev->pdev->dev,
939 			"only %u msi resources available, not enough for pf(min:2).\n",
940 			hdev->num_nic_msi);
941 		return -EINVAL;
942 	}
943 
944 	if (hnae3_dev_roce_supported(hdev)) {
945 		hdev->num_roce_msi =
946 			le16_to_cpu(req->pf_intr_vector_number_roce);
947 
948 		/* PF should have NIC vectors and Roce vectors,
949 		 * NIC vectors are queued before Roce vectors.
950 		 */
951 		hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
952 	} else {
953 		hdev->num_msi = hdev->num_nic_msi;
954 	}
955 
956 	return 0;
957 }
958 
959 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
960 {
961 	switch (speed_cmd) {
962 	case HCLGE_FW_MAC_SPEED_10M:
963 		*speed = HCLGE_MAC_SPEED_10M;
964 		break;
965 	case HCLGE_FW_MAC_SPEED_100M:
966 		*speed = HCLGE_MAC_SPEED_100M;
967 		break;
968 	case HCLGE_FW_MAC_SPEED_1G:
969 		*speed = HCLGE_MAC_SPEED_1G;
970 		break;
971 	case HCLGE_FW_MAC_SPEED_10G:
972 		*speed = HCLGE_MAC_SPEED_10G;
973 		break;
974 	case HCLGE_FW_MAC_SPEED_25G:
975 		*speed = HCLGE_MAC_SPEED_25G;
976 		break;
977 	case HCLGE_FW_MAC_SPEED_40G:
978 		*speed = HCLGE_MAC_SPEED_40G;
979 		break;
980 	case HCLGE_FW_MAC_SPEED_50G:
981 		*speed = HCLGE_MAC_SPEED_50G;
982 		break;
983 	case HCLGE_FW_MAC_SPEED_100G:
984 		*speed = HCLGE_MAC_SPEED_100G;
985 		break;
986 	case HCLGE_FW_MAC_SPEED_200G:
987 		*speed = HCLGE_MAC_SPEED_200G;
988 		break;
989 	default:
990 		return -EINVAL;
991 	}
992 
993 	return 0;
994 }
995 
996 static const struct hclge_speed_bit_map speed_bit_map[] = {
997 	{HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
998 	{HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
999 	{HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
1000 	{HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
1001 	{HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
1002 	{HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
1003 	{HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
1004 	{HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
1005 	{HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
1006 };
1007 
1008 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
1009 {
1010 	u16 i;
1011 
1012 	for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
1013 		if (speed == speed_bit_map[i].speed) {
1014 			*speed_bit = speed_bit_map[i].speed_bit;
1015 			return 0;
1016 		}
1017 	}
1018 
1019 	return -EINVAL;
1020 }
1021 
1022 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
1023 {
1024 	struct hclge_vport *vport = hclge_get_vport(handle);
1025 	struct hclge_dev *hdev = vport->back;
1026 	u32 speed_ability = hdev->hw.mac.speed_ability;
1027 	u32 speed_bit = 0;
1028 	int ret;
1029 
1030 	ret = hclge_get_speed_bit(speed, &speed_bit);
1031 	if (ret)
1032 		return ret;
1033 
1034 	if (speed_bit & speed_ability)
1035 		return 0;
1036 
1037 	return -EINVAL;
1038 }
1039 
1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1041 {
1042 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1044 				 mac->supported);
1045 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1047 				 mac->supported);
1048 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1050 				 mac->supported);
1051 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1053 				 mac->supported);
1054 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1056 				 mac->supported);
1057 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1059 				 mac->supported);
1060 }
1061 
1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1063 {
1064 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1066 				 mac->supported);
1067 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1069 				 mac->supported);
1070 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1072 				 mac->supported);
1073 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1075 				 mac->supported);
1076 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1078 				 mac->supported);
1079 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1080 		linkmode_set_bit(
1081 			ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1082 			mac->supported);
1083 }
1084 
1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1086 {
1087 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1089 				 mac->supported);
1090 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1092 				 mac->supported);
1093 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1095 				 mac->supported);
1096 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1098 				 mac->supported);
1099 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1101 				 mac->supported);
1102 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1104 				 mac->supported);
1105 }
1106 
1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1108 {
1109 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1111 				 mac->supported);
1112 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1114 				 mac->supported);
1115 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1117 				 mac->supported);
1118 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1120 				 mac->supported);
1121 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1123 				 mac->supported);
1124 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1126 				 mac->supported);
1127 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1129 				 mac->supported);
1130 }
1131 
1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1133 {
1134 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1136 
1137 	switch (mac->speed) {
1138 	case HCLGE_MAC_SPEED_10G:
1139 	case HCLGE_MAC_SPEED_40G:
1140 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1141 				 mac->supported);
1142 		mac->fec_ability =
1143 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1144 		break;
1145 	case HCLGE_MAC_SPEED_25G:
1146 	case HCLGE_MAC_SPEED_50G:
1147 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1148 				 mac->supported);
1149 		mac->fec_ability =
1150 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151 			BIT(HNAE3_FEC_AUTO);
1152 		break;
1153 	case HCLGE_MAC_SPEED_100G:
1154 	case HCLGE_MAC_SPEED_200G:
1155 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1157 		break;
1158 	default:
1159 		mac->fec_ability = 0;
1160 		break;
1161 	}
1162 }
1163 
1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1165 					u16 speed_ability)
1166 {
1167 	struct hclge_mac *mac = &hdev->hw.mac;
1168 
1169 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1171 				 mac->supported);
1172 
1173 	hclge_convert_setting_sr(mac, speed_ability);
1174 	hclge_convert_setting_lr(mac, speed_ability);
1175 	hclge_convert_setting_cr(mac, speed_ability);
1176 	if (hnae3_dev_fec_supported(hdev))
1177 		hclge_convert_setting_fec(mac);
1178 
1179 	if (hnae3_dev_pause_supported(hdev))
1180 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1181 
1182 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1184 }
1185 
1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1187 					    u16 speed_ability)
1188 {
1189 	struct hclge_mac *mac = &hdev->hw.mac;
1190 
1191 	hclge_convert_setting_kr(mac, speed_ability);
1192 	if (hnae3_dev_fec_supported(hdev))
1193 		hclge_convert_setting_fec(mac);
1194 
1195 	if (hnae3_dev_pause_supported(hdev))
1196 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1197 
1198 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1200 }
1201 
1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1203 					 u16 speed_ability)
1204 {
1205 	unsigned long *supported = hdev->hw.mac.supported;
1206 
1207 	/* default to support all speed for GE port */
1208 	if (!speed_ability)
1209 		speed_ability = HCLGE_SUPPORT_GE;
1210 
1211 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1213 				 supported);
1214 
1215 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1217 				 supported);
1218 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1219 				 supported);
1220 	}
1221 
1222 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1225 	}
1226 
1227 	if (hnae3_dev_pause_supported(hdev)) {
1228 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229 		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1230 	}
1231 
1232 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1234 }
1235 
1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1237 {
1238 	u8 media_type = hdev->hw.mac.media_type;
1239 
1240 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1242 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243 		hclge_parse_copper_link_mode(hdev, speed_ability);
1244 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1246 }
1247 
1248 static u32 hclge_get_max_speed(u16 speed_ability)
1249 {
1250 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251 		return HCLGE_MAC_SPEED_200G;
1252 
1253 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254 		return HCLGE_MAC_SPEED_100G;
1255 
1256 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257 		return HCLGE_MAC_SPEED_50G;
1258 
1259 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260 		return HCLGE_MAC_SPEED_40G;
1261 
1262 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263 		return HCLGE_MAC_SPEED_25G;
1264 
1265 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266 		return HCLGE_MAC_SPEED_10G;
1267 
1268 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269 		return HCLGE_MAC_SPEED_1G;
1270 
1271 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272 		return HCLGE_MAC_SPEED_100M;
1273 
1274 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275 		return HCLGE_MAC_SPEED_10M;
1276 
1277 	return HCLGE_MAC_SPEED_1G;
1278 }
1279 
1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1281 {
1282 #define HCLGE_TX_SPARE_SIZE_UNIT		4096
1283 #define SPEED_ABILITY_EXT_SHIFT			8
1284 
1285 	struct hclge_cfg_param_cmd *req;
1286 	u64 mac_addr_tmp_high;
1287 	u16 speed_ability_ext;
1288 	u64 mac_addr_tmp;
1289 	unsigned int i;
1290 
1291 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1292 
1293 	/* get the configuration */
1294 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1295 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1296 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1297 					    HCLGE_CFG_TQP_DESC_N_M,
1298 					    HCLGE_CFG_TQP_DESC_N_S);
1299 
1300 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1301 					HCLGE_CFG_PHY_ADDR_M,
1302 					HCLGE_CFG_PHY_ADDR_S);
1303 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1304 					  HCLGE_CFG_MEDIA_TP_M,
1305 					  HCLGE_CFG_MEDIA_TP_S);
1306 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307 					  HCLGE_CFG_RX_BUF_LEN_M,
1308 					  HCLGE_CFG_RX_BUF_LEN_S);
1309 	/* get mac_address */
1310 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1311 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1312 					    HCLGE_CFG_MAC_ADDR_H_M,
1313 					    HCLGE_CFG_MAC_ADDR_H_S);
1314 
1315 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1316 
1317 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1318 					     HCLGE_CFG_DEFAULT_SPEED_M,
1319 					     HCLGE_CFG_DEFAULT_SPEED_S);
1320 	cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1321 					       HCLGE_CFG_RSS_SIZE_M,
1322 					       HCLGE_CFG_RSS_SIZE_S);
1323 
1324 	for (i = 0; i < ETH_ALEN; i++)
1325 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1326 
1327 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1328 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1329 
1330 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1331 					     HCLGE_CFG_SPEED_ABILITY_M,
1332 					     HCLGE_CFG_SPEED_ABILITY_S);
1333 	speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1334 					    HCLGE_CFG_SPEED_ABILITY_EXT_M,
1335 					    HCLGE_CFG_SPEED_ABILITY_EXT_S);
1336 	cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1337 
1338 	cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1339 					       HCLGE_CFG_VLAN_FLTR_CAP_M,
1340 					       HCLGE_CFG_VLAN_FLTR_CAP_S);
1341 
1342 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1343 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1344 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1345 	if (!cfg->umv_space)
1346 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1347 
1348 	cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1349 					       HCLGE_CFG_PF_RSS_SIZE_M,
1350 					       HCLGE_CFG_PF_RSS_SIZE_S);
1351 
1352 	/* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1353 	 * power of 2, instead of reading out directly. This would
1354 	 * be more flexible for future changes and expansions.
1355 	 * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1356 	 * it does not make sense if PF's field is 0. In this case, PF and VF
1357 	 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1358 	 */
1359 	cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1360 			       1U << cfg->pf_rss_size_max :
1361 			       cfg->vf_rss_size_max;
1362 
1363 	/* The unit of the tx spare buffer size queried from configuration
1364 	 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1365 	 * needed here.
1366 	 */
1367 	cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1368 						 HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1369 						 HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1370 	cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1371 }
1372 
1373 /* hclge_get_cfg: query the static parameter from flash
1374  * @hdev: pointer to struct hclge_dev
1375  * @hcfg: the config structure to be getted
1376  */
1377 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1378 {
1379 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1380 	struct hclge_cfg_param_cmd *req;
1381 	unsigned int i;
1382 	int ret;
1383 
1384 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1385 		u32 offset = 0;
1386 
1387 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1388 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1389 					   true);
1390 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1391 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1392 		/* Len should be united by 4 bytes when send to hardware */
1393 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1394 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1395 		req->offset = cpu_to_le32(offset);
1396 	}
1397 
1398 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1399 	if (ret) {
1400 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1401 		return ret;
1402 	}
1403 
1404 	hclge_parse_cfg(hcfg, desc);
1405 
1406 	return 0;
1407 }
1408 
1409 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1410 {
1411 #define HCLGE_MAX_NON_TSO_BD_NUM			8U
1412 
1413 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1414 
1415 	ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1416 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1417 	ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1418 	ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1419 	ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1420 	ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1421 	ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1422 }
1423 
1424 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1425 				  struct hclge_desc *desc)
1426 {
1427 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1428 	struct hclge_dev_specs_0_cmd *req0;
1429 	struct hclge_dev_specs_1_cmd *req1;
1430 
1431 	req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1432 	req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1433 
1434 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1435 	ae_dev->dev_specs.rss_ind_tbl_size =
1436 		le16_to_cpu(req0->rss_ind_tbl_size);
1437 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1438 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1439 	ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1440 	ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1441 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1442 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1443 }
1444 
1445 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1446 {
1447 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1448 
1449 	if (!dev_specs->max_non_tso_bd_num)
1450 		dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1451 	if (!dev_specs->rss_ind_tbl_size)
1452 		dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1453 	if (!dev_specs->rss_key_size)
1454 		dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1455 	if (!dev_specs->max_tm_rate)
1456 		dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1457 	if (!dev_specs->max_qset_num)
1458 		dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1459 	if (!dev_specs->max_int_gl)
1460 		dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1461 	if (!dev_specs->max_frm_size)
1462 		dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1463 }
1464 
1465 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1466 {
1467 	struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1468 	int ret;
1469 	int i;
1470 
1471 	/* set default specifications as devices lower than version V3 do not
1472 	 * support querying specifications from firmware.
1473 	 */
1474 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1475 		hclge_set_default_dev_specs(hdev);
1476 		return 0;
1477 	}
1478 
1479 	for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1480 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1481 					   true);
1482 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1483 	}
1484 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1485 
1486 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1487 	if (ret)
1488 		return ret;
1489 
1490 	hclge_parse_dev_specs(hdev, desc);
1491 	hclge_check_dev_specs(hdev);
1492 
1493 	return 0;
1494 }
1495 
1496 static int hclge_get_cap(struct hclge_dev *hdev)
1497 {
1498 	int ret;
1499 
1500 	ret = hclge_query_function_status(hdev);
1501 	if (ret) {
1502 		dev_err(&hdev->pdev->dev,
1503 			"query function status error %d.\n", ret);
1504 		return ret;
1505 	}
1506 
1507 	/* get pf resource */
1508 	return hclge_query_pf_resource(hdev);
1509 }
1510 
1511 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1512 {
1513 #define HCLGE_MIN_TX_DESC	64
1514 #define HCLGE_MIN_RX_DESC	64
1515 
1516 	if (!is_kdump_kernel())
1517 		return;
1518 
1519 	dev_info(&hdev->pdev->dev,
1520 		 "Running kdump kernel. Using minimal resources\n");
1521 
1522 	/* minimal queue pairs equals to the number of vports */
1523 	hdev->num_tqps = hdev->num_req_vfs + 1;
1524 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1525 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1526 }
1527 
1528 static int hclge_configure(struct hclge_dev *hdev)
1529 {
1530 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1531 	const struct cpumask *cpumask = cpu_online_mask;
1532 	struct hclge_cfg cfg;
1533 	unsigned int i;
1534 	int node, ret;
1535 
1536 	ret = hclge_get_cfg(hdev, &cfg);
1537 	if (ret)
1538 		return ret;
1539 
1540 	hdev->base_tqp_pid = 0;
1541 	hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1542 	hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1543 	hdev->rx_buf_len = cfg.rx_buf_len;
1544 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1545 	hdev->hw.mac.media_type = cfg.media_type;
1546 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1547 	hdev->num_tx_desc = cfg.tqp_desc_num;
1548 	hdev->num_rx_desc = cfg.tqp_desc_num;
1549 	hdev->tm_info.num_pg = 1;
1550 	hdev->tc_max = cfg.tc_num;
1551 	hdev->tm_info.hw_pfc_map = 0;
1552 	hdev->wanted_umv_size = cfg.umv_space;
1553 	hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1554 	hdev->gro_en = true;
1555 	if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1556 		set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1557 
1558 	if (hnae3_dev_fd_supported(hdev)) {
1559 		hdev->fd_en = true;
1560 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1561 	}
1562 
1563 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1564 	if (ret) {
1565 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1566 			cfg.default_speed, ret);
1567 		return ret;
1568 	}
1569 
1570 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1571 
1572 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1573 
1574 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1575 	    (hdev->tc_max < 1)) {
1576 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1577 			 hdev->tc_max);
1578 		hdev->tc_max = 1;
1579 	}
1580 
1581 	/* Dev does not support DCB */
1582 	if (!hnae3_dev_dcb_supported(hdev)) {
1583 		hdev->tc_max = 1;
1584 		hdev->pfc_max = 0;
1585 	} else {
1586 		hdev->pfc_max = hdev->tc_max;
1587 	}
1588 
1589 	hdev->tm_info.num_tc = 1;
1590 
1591 	/* Currently not support uncontiuous tc */
1592 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1593 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1594 
1595 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1596 
1597 	hclge_init_kdump_kernel_config(hdev);
1598 
1599 	/* Set the affinity based on numa node */
1600 	node = dev_to_node(&hdev->pdev->dev);
1601 	if (node != NUMA_NO_NODE)
1602 		cpumask = cpumask_of_node(node);
1603 
1604 	cpumask_copy(&hdev->affinity_mask, cpumask);
1605 
1606 	return ret;
1607 }
1608 
1609 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1610 			    u16 tso_mss_max)
1611 {
1612 	struct hclge_cfg_tso_status_cmd *req;
1613 	struct hclge_desc desc;
1614 
1615 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1616 
1617 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1618 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1619 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1620 
1621 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1622 }
1623 
1624 static int hclge_config_gro(struct hclge_dev *hdev)
1625 {
1626 	struct hclge_cfg_gro_status_cmd *req;
1627 	struct hclge_desc desc;
1628 	int ret;
1629 
1630 	if (!hnae3_dev_gro_supported(hdev))
1631 		return 0;
1632 
1633 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1634 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1635 
1636 	req->gro_en = hdev->gro_en ? 1 : 0;
1637 
1638 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1639 	if (ret)
1640 		dev_err(&hdev->pdev->dev,
1641 			"GRO hardware config cmd failed, ret = %d\n", ret);
1642 
1643 	return ret;
1644 }
1645 
1646 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1647 {
1648 	struct hclge_tqp *tqp;
1649 	int i;
1650 
1651 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1652 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1653 	if (!hdev->htqp)
1654 		return -ENOMEM;
1655 
1656 	tqp = hdev->htqp;
1657 
1658 	for (i = 0; i < hdev->num_tqps; i++) {
1659 		tqp->dev = &hdev->pdev->dev;
1660 		tqp->index = i;
1661 
1662 		tqp->q.ae_algo = &ae_algo;
1663 		tqp->q.buf_size = hdev->rx_buf_len;
1664 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1665 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1666 
1667 		/* need an extended offset to configure queues >=
1668 		 * HCLGE_TQP_MAX_SIZE_DEV_V2
1669 		 */
1670 		if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1671 			tqp->q.io_base = hdev->hw.io_base +
1672 					 HCLGE_TQP_REG_OFFSET +
1673 					 i * HCLGE_TQP_REG_SIZE;
1674 		else
1675 			tqp->q.io_base = hdev->hw.io_base +
1676 					 HCLGE_TQP_REG_OFFSET +
1677 					 HCLGE_TQP_EXT_REG_OFFSET +
1678 					 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1679 					 HCLGE_TQP_REG_SIZE;
1680 
1681 		tqp++;
1682 	}
1683 
1684 	return 0;
1685 }
1686 
1687 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1688 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1689 {
1690 	struct hclge_tqp_map_cmd *req;
1691 	struct hclge_desc desc;
1692 	int ret;
1693 
1694 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1695 
1696 	req = (struct hclge_tqp_map_cmd *)desc.data;
1697 	req->tqp_id = cpu_to_le16(tqp_pid);
1698 	req->tqp_vf = func_id;
1699 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1700 	if (!is_pf)
1701 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1702 	req->tqp_vid = cpu_to_le16(tqp_vid);
1703 
1704 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1705 	if (ret)
1706 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1707 
1708 	return ret;
1709 }
1710 
1711 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1712 {
1713 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1714 	struct hclge_dev *hdev = vport->back;
1715 	int i, alloced;
1716 
1717 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1718 	     alloced < num_tqps; i++) {
1719 		if (!hdev->htqp[i].alloced) {
1720 			hdev->htqp[i].q.handle = &vport->nic;
1721 			hdev->htqp[i].q.tqp_index = alloced;
1722 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1723 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1724 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1725 			hdev->htqp[i].alloced = true;
1726 			alloced++;
1727 		}
1728 	}
1729 	vport->alloc_tqps = alloced;
1730 	kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1731 				vport->alloc_tqps / hdev->tm_info.num_tc);
1732 
1733 	/* ensure one to one mapping between irq and queue at default */
1734 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1735 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1736 
1737 	return 0;
1738 }
1739 
1740 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1741 			    u16 num_tx_desc, u16 num_rx_desc)
1742 
1743 {
1744 	struct hnae3_handle *nic = &vport->nic;
1745 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1746 	struct hclge_dev *hdev = vport->back;
1747 	int ret;
1748 
1749 	kinfo->num_tx_desc = num_tx_desc;
1750 	kinfo->num_rx_desc = num_rx_desc;
1751 
1752 	kinfo->rx_buf_len = hdev->rx_buf_len;
1753 	kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1754 
1755 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1756 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1757 	if (!kinfo->tqp)
1758 		return -ENOMEM;
1759 
1760 	ret = hclge_assign_tqp(vport, num_tqps);
1761 	if (ret)
1762 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1763 
1764 	return ret;
1765 }
1766 
1767 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1768 				  struct hclge_vport *vport)
1769 {
1770 	struct hnae3_handle *nic = &vport->nic;
1771 	struct hnae3_knic_private_info *kinfo;
1772 	u16 i;
1773 
1774 	kinfo = &nic->kinfo;
1775 	for (i = 0; i < vport->alloc_tqps; i++) {
1776 		struct hclge_tqp *q =
1777 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1778 		bool is_pf;
1779 		int ret;
1780 
1781 		is_pf = !(vport->vport_id);
1782 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1783 					     i, is_pf);
1784 		if (ret)
1785 			return ret;
1786 	}
1787 
1788 	return 0;
1789 }
1790 
1791 static int hclge_map_tqp(struct hclge_dev *hdev)
1792 {
1793 	struct hclge_vport *vport = hdev->vport;
1794 	u16 i, num_vport;
1795 
1796 	num_vport = hdev->num_req_vfs + 1;
1797 	for (i = 0; i < num_vport; i++)	{
1798 		int ret;
1799 
1800 		ret = hclge_map_tqp_to_vport(hdev, vport);
1801 		if (ret)
1802 			return ret;
1803 
1804 		vport++;
1805 	}
1806 
1807 	return 0;
1808 }
1809 
1810 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1811 {
1812 	struct hnae3_handle *nic = &vport->nic;
1813 	struct hclge_dev *hdev = vport->back;
1814 	int ret;
1815 
1816 	nic->pdev = hdev->pdev;
1817 	nic->ae_algo = &ae_algo;
1818 	nic->numa_node_mask = hdev->numa_node_mask;
1819 	nic->kinfo.io_base = hdev->hw.io_base;
1820 
1821 	ret = hclge_knic_setup(vport, num_tqps,
1822 			       hdev->num_tx_desc, hdev->num_rx_desc);
1823 	if (ret)
1824 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1825 
1826 	return ret;
1827 }
1828 
1829 static int hclge_alloc_vport(struct hclge_dev *hdev)
1830 {
1831 	struct pci_dev *pdev = hdev->pdev;
1832 	struct hclge_vport *vport;
1833 	u32 tqp_main_vport;
1834 	u32 tqp_per_vport;
1835 	int num_vport, i;
1836 	int ret;
1837 
1838 	/* We need to alloc a vport for main NIC of PF */
1839 	num_vport = hdev->num_req_vfs + 1;
1840 
1841 	if (hdev->num_tqps < num_vport) {
1842 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1843 			hdev->num_tqps, num_vport);
1844 		return -EINVAL;
1845 	}
1846 
1847 	/* Alloc the same number of TQPs for every vport */
1848 	tqp_per_vport = hdev->num_tqps / num_vport;
1849 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1850 
1851 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1852 			     GFP_KERNEL);
1853 	if (!vport)
1854 		return -ENOMEM;
1855 
1856 	hdev->vport = vport;
1857 	hdev->num_alloc_vport = num_vport;
1858 
1859 	if (IS_ENABLED(CONFIG_PCI_IOV))
1860 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1861 
1862 	for (i = 0; i < num_vport; i++) {
1863 		vport->back = hdev;
1864 		vport->vport_id = i;
1865 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1866 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1867 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1868 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1869 		vport->req_vlan_fltr_en = true;
1870 		INIT_LIST_HEAD(&vport->vlan_list);
1871 		INIT_LIST_HEAD(&vport->uc_mac_list);
1872 		INIT_LIST_HEAD(&vport->mc_mac_list);
1873 		spin_lock_init(&vport->mac_list_lock);
1874 
1875 		if (i == 0)
1876 			ret = hclge_vport_setup(vport, tqp_main_vport);
1877 		else
1878 			ret = hclge_vport_setup(vport, tqp_per_vport);
1879 		if (ret) {
1880 			dev_err(&pdev->dev,
1881 				"vport setup failed for vport %d, %d\n",
1882 				i, ret);
1883 			return ret;
1884 		}
1885 
1886 		vport++;
1887 	}
1888 
1889 	return 0;
1890 }
1891 
1892 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1893 				    struct hclge_pkt_buf_alloc *buf_alloc)
1894 {
1895 /* TX buffer size is unit by 128 byte */
1896 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1897 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1898 	struct hclge_tx_buff_alloc_cmd *req;
1899 	struct hclge_desc desc;
1900 	int ret;
1901 	u8 i;
1902 
1903 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1904 
1905 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1906 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1907 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1908 
1909 		req->tx_pkt_buff[i] =
1910 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1911 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1912 	}
1913 
1914 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1915 	if (ret)
1916 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1917 			ret);
1918 
1919 	return ret;
1920 }
1921 
1922 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1923 				 struct hclge_pkt_buf_alloc *buf_alloc)
1924 {
1925 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1926 
1927 	if (ret)
1928 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1929 
1930 	return ret;
1931 }
1932 
1933 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1934 {
1935 	unsigned int i;
1936 	u32 cnt = 0;
1937 
1938 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1939 		if (hdev->hw_tc_map & BIT(i))
1940 			cnt++;
1941 	return cnt;
1942 }
1943 
1944 /* Get the number of pfc enabled TCs, which have private buffer */
1945 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1946 				  struct hclge_pkt_buf_alloc *buf_alloc)
1947 {
1948 	struct hclge_priv_buf *priv;
1949 	unsigned int i;
1950 	int cnt = 0;
1951 
1952 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1953 		priv = &buf_alloc->priv_buf[i];
1954 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1955 		    priv->enable)
1956 			cnt++;
1957 	}
1958 
1959 	return cnt;
1960 }
1961 
1962 /* Get the number of pfc disabled TCs, which have private buffer */
1963 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1964 				     struct hclge_pkt_buf_alloc *buf_alloc)
1965 {
1966 	struct hclge_priv_buf *priv;
1967 	unsigned int i;
1968 	int cnt = 0;
1969 
1970 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1971 		priv = &buf_alloc->priv_buf[i];
1972 		if (hdev->hw_tc_map & BIT(i) &&
1973 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1974 		    priv->enable)
1975 			cnt++;
1976 	}
1977 
1978 	return cnt;
1979 }
1980 
1981 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1982 {
1983 	struct hclge_priv_buf *priv;
1984 	u32 rx_priv = 0;
1985 	int i;
1986 
1987 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1988 		priv = &buf_alloc->priv_buf[i];
1989 		if (priv->enable)
1990 			rx_priv += priv->buf_size;
1991 	}
1992 	return rx_priv;
1993 }
1994 
1995 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1996 {
1997 	u32 i, total_tx_size = 0;
1998 
1999 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
2000 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2001 
2002 	return total_tx_size;
2003 }
2004 
2005 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2006 				struct hclge_pkt_buf_alloc *buf_alloc,
2007 				u32 rx_all)
2008 {
2009 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2010 	u32 tc_num = hclge_get_tc_num(hdev);
2011 	u32 shared_buf, aligned_mps;
2012 	u32 rx_priv;
2013 	int i;
2014 
2015 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2016 
2017 	if (hnae3_dev_dcb_supported(hdev))
2018 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2019 					hdev->dv_buf_size;
2020 	else
2021 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2022 					+ hdev->dv_buf_size;
2023 
2024 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2025 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2026 			     HCLGE_BUF_SIZE_UNIT);
2027 
2028 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2029 	if (rx_all < rx_priv + shared_std)
2030 		return false;
2031 
2032 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2033 	buf_alloc->s_buf.buf_size = shared_buf;
2034 	if (hnae3_dev_dcb_supported(hdev)) {
2035 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2036 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2037 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2038 				  HCLGE_BUF_SIZE_UNIT);
2039 	} else {
2040 		buf_alloc->s_buf.self.high = aligned_mps +
2041 						HCLGE_NON_DCB_ADDITIONAL_BUF;
2042 		buf_alloc->s_buf.self.low = aligned_mps;
2043 	}
2044 
2045 	if (hnae3_dev_dcb_supported(hdev)) {
2046 		hi_thrd = shared_buf - hdev->dv_buf_size;
2047 
2048 		if (tc_num <= NEED_RESERVE_TC_NUM)
2049 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2050 					/ BUF_MAX_PERCENT;
2051 
2052 		if (tc_num)
2053 			hi_thrd = hi_thrd / tc_num;
2054 
2055 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2056 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2057 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2058 	} else {
2059 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2060 		lo_thrd = aligned_mps;
2061 	}
2062 
2063 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2064 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2065 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2066 	}
2067 
2068 	return true;
2069 }
2070 
2071 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2072 				struct hclge_pkt_buf_alloc *buf_alloc)
2073 {
2074 	u32 i, total_size;
2075 
2076 	total_size = hdev->pkt_buf_size;
2077 
2078 	/* alloc tx buffer for all enabled tc */
2079 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2080 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2081 
2082 		if (hdev->hw_tc_map & BIT(i)) {
2083 			if (total_size < hdev->tx_buf_size)
2084 				return -ENOMEM;
2085 
2086 			priv->tx_buf_size = hdev->tx_buf_size;
2087 		} else {
2088 			priv->tx_buf_size = 0;
2089 		}
2090 
2091 		total_size -= priv->tx_buf_size;
2092 	}
2093 
2094 	return 0;
2095 }
2096 
2097 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2098 				  struct hclge_pkt_buf_alloc *buf_alloc)
2099 {
2100 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2101 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2102 	unsigned int i;
2103 
2104 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2105 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2106 
2107 		priv->enable = 0;
2108 		priv->wl.low = 0;
2109 		priv->wl.high = 0;
2110 		priv->buf_size = 0;
2111 
2112 		if (!(hdev->hw_tc_map & BIT(i)))
2113 			continue;
2114 
2115 		priv->enable = 1;
2116 
2117 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2118 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2119 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
2120 						HCLGE_BUF_SIZE_UNIT);
2121 		} else {
2122 			priv->wl.low = 0;
2123 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2124 					aligned_mps;
2125 		}
2126 
2127 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2128 	}
2129 
2130 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2131 }
2132 
2133 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2134 					  struct hclge_pkt_buf_alloc *buf_alloc)
2135 {
2136 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2137 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2138 	int i;
2139 
2140 	/* let the last to be cleared first */
2141 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2142 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2143 		unsigned int mask = BIT((unsigned int)i);
2144 
2145 		if (hdev->hw_tc_map & mask &&
2146 		    !(hdev->tm_info.hw_pfc_map & mask)) {
2147 			/* Clear the no pfc TC private buffer */
2148 			priv->wl.low = 0;
2149 			priv->wl.high = 0;
2150 			priv->buf_size = 0;
2151 			priv->enable = 0;
2152 			no_pfc_priv_num--;
2153 		}
2154 
2155 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2156 		    no_pfc_priv_num == 0)
2157 			break;
2158 	}
2159 
2160 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2161 }
2162 
2163 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2164 					struct hclge_pkt_buf_alloc *buf_alloc)
2165 {
2166 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2167 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2168 	int i;
2169 
2170 	/* let the last to be cleared first */
2171 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2172 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2173 		unsigned int mask = BIT((unsigned int)i);
2174 
2175 		if (hdev->hw_tc_map & mask &&
2176 		    hdev->tm_info.hw_pfc_map & mask) {
2177 			/* Reduce the number of pfc TC with private buffer */
2178 			priv->wl.low = 0;
2179 			priv->enable = 0;
2180 			priv->wl.high = 0;
2181 			priv->buf_size = 0;
2182 			pfc_priv_num--;
2183 		}
2184 
2185 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2186 		    pfc_priv_num == 0)
2187 			break;
2188 	}
2189 
2190 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2191 }
2192 
2193 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2194 				      struct hclge_pkt_buf_alloc *buf_alloc)
2195 {
2196 #define COMPENSATE_BUFFER	0x3C00
2197 #define COMPENSATE_HALF_MPS_NUM	5
2198 #define PRIV_WL_GAP		0x1800
2199 
2200 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2201 	u32 tc_num = hclge_get_tc_num(hdev);
2202 	u32 half_mps = hdev->mps >> 1;
2203 	u32 min_rx_priv;
2204 	unsigned int i;
2205 
2206 	if (tc_num)
2207 		rx_priv = rx_priv / tc_num;
2208 
2209 	if (tc_num <= NEED_RESERVE_TC_NUM)
2210 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2211 
2212 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2213 			COMPENSATE_HALF_MPS_NUM * half_mps;
2214 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2215 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2216 	if (rx_priv < min_rx_priv)
2217 		return false;
2218 
2219 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2220 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2221 
2222 		priv->enable = 0;
2223 		priv->wl.low = 0;
2224 		priv->wl.high = 0;
2225 		priv->buf_size = 0;
2226 
2227 		if (!(hdev->hw_tc_map & BIT(i)))
2228 			continue;
2229 
2230 		priv->enable = 1;
2231 		priv->buf_size = rx_priv;
2232 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2233 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2234 	}
2235 
2236 	buf_alloc->s_buf.buf_size = 0;
2237 
2238 	return true;
2239 }
2240 
2241 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2242  * @hdev: pointer to struct hclge_dev
2243  * @buf_alloc: pointer to buffer calculation data
2244  * @return: 0: calculate successful, negative: fail
2245  */
2246 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2247 				struct hclge_pkt_buf_alloc *buf_alloc)
2248 {
2249 	/* When DCB is not supported, rx private buffer is not allocated. */
2250 	if (!hnae3_dev_dcb_supported(hdev)) {
2251 		u32 rx_all = hdev->pkt_buf_size;
2252 
2253 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2254 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2255 			return -ENOMEM;
2256 
2257 		return 0;
2258 	}
2259 
2260 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2261 		return 0;
2262 
2263 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2264 		return 0;
2265 
2266 	/* try to decrease the buffer size */
2267 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2268 		return 0;
2269 
2270 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2271 		return 0;
2272 
2273 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2274 		return 0;
2275 
2276 	return -ENOMEM;
2277 }
2278 
2279 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2280 				   struct hclge_pkt_buf_alloc *buf_alloc)
2281 {
2282 	struct hclge_rx_priv_buff_cmd *req;
2283 	struct hclge_desc desc;
2284 	int ret;
2285 	int i;
2286 
2287 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2288 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2289 
2290 	/* Alloc private buffer TCs */
2291 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2292 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2293 
2294 		req->buf_num[i] =
2295 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2296 		req->buf_num[i] |=
2297 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2298 	}
2299 
2300 	req->shared_buf =
2301 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2302 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2303 
2304 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2305 	if (ret)
2306 		dev_err(&hdev->pdev->dev,
2307 			"rx private buffer alloc cmd failed %d\n", ret);
2308 
2309 	return ret;
2310 }
2311 
2312 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2313 				   struct hclge_pkt_buf_alloc *buf_alloc)
2314 {
2315 	struct hclge_rx_priv_wl_buf *req;
2316 	struct hclge_priv_buf *priv;
2317 	struct hclge_desc desc[2];
2318 	int i, j;
2319 	int ret;
2320 
2321 	for (i = 0; i < 2; i++) {
2322 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2323 					   false);
2324 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2325 
2326 		/* The first descriptor set the NEXT bit to 1 */
2327 		if (i == 0)
2328 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2329 		else
2330 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2331 
2332 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2333 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2334 
2335 			priv = &buf_alloc->priv_buf[idx];
2336 			req->tc_wl[j].high =
2337 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2338 			req->tc_wl[j].high |=
2339 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2340 			req->tc_wl[j].low =
2341 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2342 			req->tc_wl[j].low |=
2343 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2344 		}
2345 	}
2346 
2347 	/* Send 2 descriptor at one time */
2348 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2349 	if (ret)
2350 		dev_err(&hdev->pdev->dev,
2351 			"rx private waterline config cmd failed %d\n",
2352 			ret);
2353 	return ret;
2354 }
2355 
2356 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2357 				    struct hclge_pkt_buf_alloc *buf_alloc)
2358 {
2359 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2360 	struct hclge_rx_com_thrd *req;
2361 	struct hclge_desc desc[2];
2362 	struct hclge_tc_thrd *tc;
2363 	int i, j;
2364 	int ret;
2365 
2366 	for (i = 0; i < 2; i++) {
2367 		hclge_cmd_setup_basic_desc(&desc[i],
2368 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2369 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2370 
2371 		/* The first descriptor set the NEXT bit to 1 */
2372 		if (i == 0)
2373 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2374 		else
2375 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2376 
2377 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2378 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2379 
2380 			req->com_thrd[j].high =
2381 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2382 			req->com_thrd[j].high |=
2383 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2384 			req->com_thrd[j].low =
2385 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2386 			req->com_thrd[j].low |=
2387 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2388 		}
2389 	}
2390 
2391 	/* Send 2 descriptors at one time */
2392 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2393 	if (ret)
2394 		dev_err(&hdev->pdev->dev,
2395 			"common threshold config cmd failed %d\n", ret);
2396 	return ret;
2397 }
2398 
2399 static int hclge_common_wl_config(struct hclge_dev *hdev,
2400 				  struct hclge_pkt_buf_alloc *buf_alloc)
2401 {
2402 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2403 	struct hclge_rx_com_wl *req;
2404 	struct hclge_desc desc;
2405 	int ret;
2406 
2407 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2408 
2409 	req = (struct hclge_rx_com_wl *)desc.data;
2410 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2411 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2412 
2413 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2414 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2415 
2416 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2417 	if (ret)
2418 		dev_err(&hdev->pdev->dev,
2419 			"common waterline config cmd failed %d\n", ret);
2420 
2421 	return ret;
2422 }
2423 
2424 int hclge_buffer_alloc(struct hclge_dev *hdev)
2425 {
2426 	struct hclge_pkt_buf_alloc *pkt_buf;
2427 	int ret;
2428 
2429 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2430 	if (!pkt_buf)
2431 		return -ENOMEM;
2432 
2433 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2434 	if (ret) {
2435 		dev_err(&hdev->pdev->dev,
2436 			"could not calc tx buffer size for all TCs %d\n", ret);
2437 		goto out;
2438 	}
2439 
2440 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2441 	if (ret) {
2442 		dev_err(&hdev->pdev->dev,
2443 			"could not alloc tx buffers %d\n", ret);
2444 		goto out;
2445 	}
2446 
2447 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2448 	if (ret) {
2449 		dev_err(&hdev->pdev->dev,
2450 			"could not calc rx priv buffer size for all TCs %d\n",
2451 			ret);
2452 		goto out;
2453 	}
2454 
2455 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2456 	if (ret) {
2457 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2458 			ret);
2459 		goto out;
2460 	}
2461 
2462 	if (hnae3_dev_dcb_supported(hdev)) {
2463 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2464 		if (ret) {
2465 			dev_err(&hdev->pdev->dev,
2466 				"could not configure rx private waterline %d\n",
2467 				ret);
2468 			goto out;
2469 		}
2470 
2471 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2472 		if (ret) {
2473 			dev_err(&hdev->pdev->dev,
2474 				"could not configure common threshold %d\n",
2475 				ret);
2476 			goto out;
2477 		}
2478 	}
2479 
2480 	ret = hclge_common_wl_config(hdev, pkt_buf);
2481 	if (ret)
2482 		dev_err(&hdev->pdev->dev,
2483 			"could not configure common waterline %d\n", ret);
2484 
2485 out:
2486 	kfree(pkt_buf);
2487 	return ret;
2488 }
2489 
2490 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2491 {
2492 	struct hnae3_handle *roce = &vport->roce;
2493 	struct hnae3_handle *nic = &vport->nic;
2494 	struct hclge_dev *hdev = vport->back;
2495 
2496 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2497 
2498 	if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2499 		return -EINVAL;
2500 
2501 	roce->rinfo.base_vector = hdev->roce_base_vector;
2502 
2503 	roce->rinfo.netdev = nic->kinfo.netdev;
2504 	roce->rinfo.roce_io_base = hdev->hw.io_base;
2505 	roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2506 
2507 	roce->pdev = nic->pdev;
2508 	roce->ae_algo = nic->ae_algo;
2509 	roce->numa_node_mask = nic->numa_node_mask;
2510 
2511 	return 0;
2512 }
2513 
2514 static int hclge_init_msi(struct hclge_dev *hdev)
2515 {
2516 	struct pci_dev *pdev = hdev->pdev;
2517 	int vectors;
2518 	int i;
2519 
2520 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2521 					hdev->num_msi,
2522 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2523 	if (vectors < 0) {
2524 		dev_err(&pdev->dev,
2525 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2526 			vectors);
2527 		return vectors;
2528 	}
2529 	if (vectors < hdev->num_msi)
2530 		dev_warn(&hdev->pdev->dev,
2531 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2532 			 hdev->num_msi, vectors);
2533 
2534 	hdev->num_msi = vectors;
2535 	hdev->num_msi_left = vectors;
2536 
2537 	hdev->base_msi_vector = pdev->irq;
2538 	hdev->roce_base_vector = hdev->base_msi_vector +
2539 				hdev->num_nic_msi;
2540 
2541 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2542 					   sizeof(u16), GFP_KERNEL);
2543 	if (!hdev->vector_status) {
2544 		pci_free_irq_vectors(pdev);
2545 		return -ENOMEM;
2546 	}
2547 
2548 	for (i = 0; i < hdev->num_msi; i++)
2549 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2550 
2551 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2552 					sizeof(int), GFP_KERNEL);
2553 	if (!hdev->vector_irq) {
2554 		pci_free_irq_vectors(pdev);
2555 		return -ENOMEM;
2556 	}
2557 
2558 	return 0;
2559 }
2560 
2561 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2562 {
2563 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2564 		duplex = HCLGE_MAC_FULL;
2565 
2566 	return duplex;
2567 }
2568 
2569 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2570 				      u8 duplex)
2571 {
2572 	struct hclge_config_mac_speed_dup_cmd *req;
2573 	struct hclge_desc desc;
2574 	int ret;
2575 
2576 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2577 
2578 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2579 
2580 	if (duplex)
2581 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2582 
2583 	switch (speed) {
2584 	case HCLGE_MAC_SPEED_10M:
2585 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2586 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
2587 		break;
2588 	case HCLGE_MAC_SPEED_100M:
2589 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2590 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
2591 		break;
2592 	case HCLGE_MAC_SPEED_1G:
2593 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2594 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
2595 		break;
2596 	case HCLGE_MAC_SPEED_10G:
2597 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2598 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
2599 		break;
2600 	case HCLGE_MAC_SPEED_25G:
2601 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2602 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
2603 		break;
2604 	case HCLGE_MAC_SPEED_40G:
2605 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2606 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
2607 		break;
2608 	case HCLGE_MAC_SPEED_50G:
2609 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2610 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
2611 		break;
2612 	case HCLGE_MAC_SPEED_100G:
2613 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2614 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
2615 		break;
2616 	case HCLGE_MAC_SPEED_200G:
2617 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2618 				HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
2619 		break;
2620 	default:
2621 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2622 		return -EINVAL;
2623 	}
2624 
2625 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2626 		      1);
2627 
2628 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2629 	if (ret) {
2630 		dev_err(&hdev->pdev->dev,
2631 			"mac speed/duplex config cmd failed %d.\n", ret);
2632 		return ret;
2633 	}
2634 
2635 	return 0;
2636 }
2637 
2638 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2639 {
2640 	struct hclge_mac *mac = &hdev->hw.mac;
2641 	int ret;
2642 
2643 	duplex = hclge_check_speed_dup(duplex, speed);
2644 	if (!mac->support_autoneg && mac->speed == speed &&
2645 	    mac->duplex == duplex)
2646 		return 0;
2647 
2648 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2649 	if (ret)
2650 		return ret;
2651 
2652 	hdev->hw.mac.speed = speed;
2653 	hdev->hw.mac.duplex = duplex;
2654 
2655 	return 0;
2656 }
2657 
2658 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2659 				     u8 duplex)
2660 {
2661 	struct hclge_vport *vport = hclge_get_vport(handle);
2662 	struct hclge_dev *hdev = vport->back;
2663 
2664 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2665 }
2666 
2667 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2668 {
2669 	struct hclge_config_auto_neg_cmd *req;
2670 	struct hclge_desc desc;
2671 	u32 flag = 0;
2672 	int ret;
2673 
2674 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2675 
2676 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2677 	if (enable)
2678 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2679 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2680 
2681 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2682 	if (ret)
2683 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2684 			ret);
2685 
2686 	return ret;
2687 }
2688 
2689 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2690 {
2691 	struct hclge_vport *vport = hclge_get_vport(handle);
2692 	struct hclge_dev *hdev = vport->back;
2693 
2694 	if (!hdev->hw.mac.support_autoneg) {
2695 		if (enable) {
2696 			dev_err(&hdev->pdev->dev,
2697 				"autoneg is not supported by current port\n");
2698 			return -EOPNOTSUPP;
2699 		} else {
2700 			return 0;
2701 		}
2702 	}
2703 
2704 	return hclge_set_autoneg_en(hdev, enable);
2705 }
2706 
2707 static int hclge_get_autoneg(struct hnae3_handle *handle)
2708 {
2709 	struct hclge_vport *vport = hclge_get_vport(handle);
2710 	struct hclge_dev *hdev = vport->back;
2711 	struct phy_device *phydev = hdev->hw.mac.phydev;
2712 
2713 	if (phydev)
2714 		return phydev->autoneg;
2715 
2716 	return hdev->hw.mac.autoneg;
2717 }
2718 
2719 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2720 {
2721 	struct hclge_vport *vport = hclge_get_vport(handle);
2722 	struct hclge_dev *hdev = vport->back;
2723 	int ret;
2724 
2725 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2726 
2727 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2728 	if (ret)
2729 		return ret;
2730 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2731 }
2732 
2733 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2734 {
2735 	struct hclge_vport *vport = hclge_get_vport(handle);
2736 	struct hclge_dev *hdev = vport->back;
2737 
2738 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2739 		return hclge_set_autoneg_en(hdev, !halt);
2740 
2741 	return 0;
2742 }
2743 
2744 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2745 {
2746 	struct hclge_config_fec_cmd *req;
2747 	struct hclge_desc desc;
2748 	int ret;
2749 
2750 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2751 
2752 	req = (struct hclge_config_fec_cmd *)desc.data;
2753 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2754 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2755 	if (fec_mode & BIT(HNAE3_FEC_RS))
2756 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2757 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2758 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2759 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2760 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2761 
2762 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2763 	if (ret)
2764 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2765 
2766 	return ret;
2767 }
2768 
2769 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2770 {
2771 	struct hclge_vport *vport = hclge_get_vport(handle);
2772 	struct hclge_dev *hdev = vport->back;
2773 	struct hclge_mac *mac = &hdev->hw.mac;
2774 	int ret;
2775 
2776 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2777 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2778 		return -EINVAL;
2779 	}
2780 
2781 	ret = hclge_set_fec_hw(hdev, fec_mode);
2782 	if (ret)
2783 		return ret;
2784 
2785 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2786 	return 0;
2787 }
2788 
2789 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2790 			  u8 *fec_mode)
2791 {
2792 	struct hclge_vport *vport = hclge_get_vport(handle);
2793 	struct hclge_dev *hdev = vport->back;
2794 	struct hclge_mac *mac = &hdev->hw.mac;
2795 
2796 	if (fec_ability)
2797 		*fec_ability = mac->fec_ability;
2798 	if (fec_mode)
2799 		*fec_mode = mac->fec_mode;
2800 }
2801 
2802 static int hclge_mac_init(struct hclge_dev *hdev)
2803 {
2804 	struct hclge_mac *mac = &hdev->hw.mac;
2805 	int ret;
2806 
2807 	hdev->support_sfp_query = true;
2808 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2809 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2810 					 hdev->hw.mac.duplex);
2811 	if (ret)
2812 		return ret;
2813 
2814 	if (hdev->hw.mac.support_autoneg) {
2815 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2816 		if (ret)
2817 			return ret;
2818 	}
2819 
2820 	mac->link = 0;
2821 
2822 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2823 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2824 		if (ret)
2825 			return ret;
2826 	}
2827 
2828 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2829 	if (ret) {
2830 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2831 		return ret;
2832 	}
2833 
2834 	ret = hclge_set_default_loopback(hdev);
2835 	if (ret)
2836 		return ret;
2837 
2838 	ret = hclge_buffer_alloc(hdev);
2839 	if (ret)
2840 		dev_err(&hdev->pdev->dev,
2841 			"allocate buffer fail, ret=%d\n", ret);
2842 
2843 	return ret;
2844 }
2845 
2846 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2847 {
2848 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2849 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2850 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2851 				    hclge_wq, &hdev->service_task, 0);
2852 }
2853 
2854 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2855 {
2856 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2857 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2858 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2859 				    hclge_wq, &hdev->service_task, 0);
2860 }
2861 
2862 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2863 {
2864 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2865 	    !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2866 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2867 				    hclge_wq, &hdev->service_task, 0);
2868 }
2869 
2870 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2871 {
2872 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2873 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2874 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2875 				    hclge_wq, &hdev->service_task,
2876 				    delay_time);
2877 }
2878 
2879 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2880 {
2881 	struct hclge_link_status_cmd *req;
2882 	struct hclge_desc desc;
2883 	int ret;
2884 
2885 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2886 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2887 	if (ret) {
2888 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2889 			ret);
2890 		return ret;
2891 	}
2892 
2893 	req = (struct hclge_link_status_cmd *)desc.data;
2894 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2895 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2896 
2897 	return 0;
2898 }
2899 
2900 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2901 {
2902 	struct phy_device *phydev = hdev->hw.mac.phydev;
2903 
2904 	*link_status = HCLGE_LINK_STATUS_DOWN;
2905 
2906 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2907 		return 0;
2908 
2909 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2910 		return 0;
2911 
2912 	return hclge_get_mac_link_status(hdev, link_status);
2913 }
2914 
2915 static void hclge_push_link_status(struct hclge_dev *hdev)
2916 {
2917 	struct hclge_vport *vport;
2918 	int ret;
2919 	u16 i;
2920 
2921 	for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2922 		vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2923 
2924 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2925 		    vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2926 			continue;
2927 
2928 		ret = hclge_push_vf_link_status(vport);
2929 		if (ret) {
2930 			dev_err(&hdev->pdev->dev,
2931 				"failed to push link status to vf%u, ret = %d\n",
2932 				i, ret);
2933 		}
2934 	}
2935 }
2936 
2937 static void hclge_update_link_status(struct hclge_dev *hdev)
2938 {
2939 	struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2940 	struct hnae3_handle *handle = &hdev->vport[0].nic;
2941 	struct hnae3_client *rclient = hdev->roce_client;
2942 	struct hnae3_client *client = hdev->nic_client;
2943 	int state;
2944 	int ret;
2945 
2946 	if (!client)
2947 		return;
2948 
2949 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2950 		return;
2951 
2952 	ret = hclge_get_mac_phy_link(hdev, &state);
2953 	if (ret) {
2954 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2955 		return;
2956 	}
2957 
2958 	if (state != hdev->hw.mac.link) {
2959 		hdev->hw.mac.link = state;
2960 		client->ops->link_status_change(handle, state);
2961 		hclge_config_mac_tnl_int(hdev, state);
2962 		if (rclient && rclient->ops->link_status_change)
2963 			rclient->ops->link_status_change(rhandle, state);
2964 
2965 		hclge_push_link_status(hdev);
2966 	}
2967 
2968 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2969 }
2970 
2971 static void hclge_update_port_capability(struct hclge_dev *hdev,
2972 					 struct hclge_mac *mac)
2973 {
2974 	if (hnae3_dev_fec_supported(hdev))
2975 		/* update fec ability by speed */
2976 		hclge_convert_setting_fec(mac);
2977 
2978 	/* firmware can not identify back plane type, the media type
2979 	 * read from configuration can help deal it
2980 	 */
2981 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2982 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2983 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2984 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2985 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2986 
2987 	if (mac->support_autoneg) {
2988 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2989 		linkmode_copy(mac->advertising, mac->supported);
2990 	} else {
2991 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2992 				   mac->supported);
2993 		linkmode_zero(mac->advertising);
2994 	}
2995 }
2996 
2997 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2998 {
2999 	struct hclge_sfp_info_cmd *resp;
3000 	struct hclge_desc desc;
3001 	int ret;
3002 
3003 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3004 	resp = (struct hclge_sfp_info_cmd *)desc.data;
3005 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3006 	if (ret == -EOPNOTSUPP) {
3007 		dev_warn(&hdev->pdev->dev,
3008 			 "IMP do not support get SFP speed %d\n", ret);
3009 		return ret;
3010 	} else if (ret) {
3011 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3012 		return ret;
3013 	}
3014 
3015 	*speed = le32_to_cpu(resp->speed);
3016 
3017 	return 0;
3018 }
3019 
3020 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3021 {
3022 	struct hclge_sfp_info_cmd *resp;
3023 	struct hclge_desc desc;
3024 	int ret;
3025 
3026 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3027 	resp = (struct hclge_sfp_info_cmd *)desc.data;
3028 
3029 	resp->query_type = QUERY_ACTIVE_SPEED;
3030 
3031 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3032 	if (ret == -EOPNOTSUPP) {
3033 		dev_warn(&hdev->pdev->dev,
3034 			 "IMP does not support get SFP info %d\n", ret);
3035 		return ret;
3036 	} else if (ret) {
3037 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3038 		return ret;
3039 	}
3040 
3041 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
3042 	 * set to mac->speed.
3043 	 */
3044 	if (!le32_to_cpu(resp->speed))
3045 		return 0;
3046 
3047 	mac->speed = le32_to_cpu(resp->speed);
3048 	/* if resp->speed_ability is 0, it means it's an old version
3049 	 * firmware, do not update these params
3050 	 */
3051 	if (resp->speed_ability) {
3052 		mac->module_type = le32_to_cpu(resp->module_type);
3053 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
3054 		mac->autoneg = resp->autoneg;
3055 		mac->support_autoneg = resp->autoneg_ability;
3056 		mac->speed_type = QUERY_ACTIVE_SPEED;
3057 		if (!resp->active_fec)
3058 			mac->fec_mode = 0;
3059 		else
3060 			mac->fec_mode = BIT(resp->active_fec);
3061 	} else {
3062 		mac->speed_type = QUERY_SFP_SPEED;
3063 	}
3064 
3065 	return 0;
3066 }
3067 
3068 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3069 					struct ethtool_link_ksettings *cmd)
3070 {
3071 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3072 	struct hclge_vport *vport = hclge_get_vport(handle);
3073 	struct hclge_phy_link_ksetting_0_cmd *req0;
3074 	struct hclge_phy_link_ksetting_1_cmd *req1;
3075 	u32 supported, advertising, lp_advertising;
3076 	struct hclge_dev *hdev = vport->back;
3077 	int ret;
3078 
3079 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3080 				   true);
3081 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3082 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3083 				   true);
3084 
3085 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3086 	if (ret) {
3087 		dev_err(&hdev->pdev->dev,
3088 			"failed to get phy link ksetting, ret = %d.\n", ret);
3089 		return ret;
3090 	}
3091 
3092 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3093 	cmd->base.autoneg = req0->autoneg;
3094 	cmd->base.speed = le32_to_cpu(req0->speed);
3095 	cmd->base.duplex = req0->duplex;
3096 	cmd->base.port = req0->port;
3097 	cmd->base.transceiver = req0->transceiver;
3098 	cmd->base.phy_address = req0->phy_address;
3099 	cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3100 	cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3101 	supported = le32_to_cpu(req0->supported);
3102 	advertising = le32_to_cpu(req0->advertising);
3103 	lp_advertising = le32_to_cpu(req0->lp_advertising);
3104 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3105 						supported);
3106 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3107 						advertising);
3108 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3109 						lp_advertising);
3110 
3111 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3112 	cmd->base.master_slave_cfg = req1->master_slave_cfg;
3113 	cmd->base.master_slave_state = req1->master_slave_state;
3114 
3115 	return 0;
3116 }
3117 
3118 static int
3119 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3120 			     const struct ethtool_link_ksettings *cmd)
3121 {
3122 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3123 	struct hclge_vport *vport = hclge_get_vport(handle);
3124 	struct hclge_phy_link_ksetting_0_cmd *req0;
3125 	struct hclge_phy_link_ksetting_1_cmd *req1;
3126 	struct hclge_dev *hdev = vport->back;
3127 	u32 advertising;
3128 	int ret;
3129 
3130 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
3131 	    ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3132 	     (cmd->base.duplex != DUPLEX_HALF &&
3133 	      cmd->base.duplex != DUPLEX_FULL)))
3134 		return -EINVAL;
3135 
3136 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3137 				   false);
3138 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3139 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3140 				   false);
3141 
3142 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3143 	req0->autoneg = cmd->base.autoneg;
3144 	req0->speed = cpu_to_le32(cmd->base.speed);
3145 	req0->duplex = cmd->base.duplex;
3146 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
3147 						cmd->link_modes.advertising);
3148 	req0->advertising = cpu_to_le32(advertising);
3149 	req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3150 
3151 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3152 	req1->master_slave_cfg = cmd->base.master_slave_cfg;
3153 
3154 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3155 	if (ret) {
3156 		dev_err(&hdev->pdev->dev,
3157 			"failed to set phy link ksettings, ret = %d.\n", ret);
3158 		return ret;
3159 	}
3160 
3161 	hdev->hw.mac.autoneg = cmd->base.autoneg;
3162 	hdev->hw.mac.speed = cmd->base.speed;
3163 	hdev->hw.mac.duplex = cmd->base.duplex;
3164 	linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3165 
3166 	return 0;
3167 }
3168 
3169 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3170 {
3171 	struct ethtool_link_ksettings cmd;
3172 	int ret;
3173 
3174 	if (!hnae3_dev_phy_imp_supported(hdev))
3175 		return 0;
3176 
3177 	ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3178 	if (ret)
3179 		return ret;
3180 
3181 	hdev->hw.mac.autoneg = cmd.base.autoneg;
3182 	hdev->hw.mac.speed = cmd.base.speed;
3183 	hdev->hw.mac.duplex = cmd.base.duplex;
3184 
3185 	return 0;
3186 }
3187 
3188 static int hclge_tp_port_init(struct hclge_dev *hdev)
3189 {
3190 	struct ethtool_link_ksettings cmd;
3191 
3192 	if (!hnae3_dev_phy_imp_supported(hdev))
3193 		return 0;
3194 
3195 	cmd.base.autoneg = hdev->hw.mac.autoneg;
3196 	cmd.base.speed = hdev->hw.mac.speed;
3197 	cmd.base.duplex = hdev->hw.mac.duplex;
3198 	linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3199 
3200 	return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3201 }
3202 
3203 static int hclge_update_port_info(struct hclge_dev *hdev)
3204 {
3205 	struct hclge_mac *mac = &hdev->hw.mac;
3206 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
3207 	int ret;
3208 
3209 	/* get the port info from SFP cmd if not copper port */
3210 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3211 		return hclge_update_tp_port_info(hdev);
3212 
3213 	/* if IMP does not support get SFP/qSFP info, return directly */
3214 	if (!hdev->support_sfp_query)
3215 		return 0;
3216 
3217 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3218 		ret = hclge_get_sfp_info(hdev, mac);
3219 	else
3220 		ret = hclge_get_sfp_speed(hdev, &speed);
3221 
3222 	if (ret == -EOPNOTSUPP) {
3223 		hdev->support_sfp_query = false;
3224 		return ret;
3225 	} else if (ret) {
3226 		return ret;
3227 	}
3228 
3229 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3230 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3231 			hclge_update_port_capability(hdev, mac);
3232 			return 0;
3233 		}
3234 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3235 					       HCLGE_MAC_FULL);
3236 	} else {
3237 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3238 			return 0; /* do nothing if no SFP */
3239 
3240 		/* must config full duplex for SFP */
3241 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3242 	}
3243 }
3244 
3245 static int hclge_get_status(struct hnae3_handle *handle)
3246 {
3247 	struct hclge_vport *vport = hclge_get_vport(handle);
3248 	struct hclge_dev *hdev = vport->back;
3249 
3250 	hclge_update_link_status(hdev);
3251 
3252 	return hdev->hw.mac.link;
3253 }
3254 
3255 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3256 {
3257 	if (!pci_num_vf(hdev->pdev)) {
3258 		dev_err(&hdev->pdev->dev,
3259 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
3260 		return NULL;
3261 	}
3262 
3263 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3264 		dev_err(&hdev->pdev->dev,
3265 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
3266 			vf, pci_num_vf(hdev->pdev));
3267 		return NULL;
3268 	}
3269 
3270 	/* VF start from 1 in vport */
3271 	vf += HCLGE_VF_VPORT_START_NUM;
3272 	return &hdev->vport[vf];
3273 }
3274 
3275 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3276 			       struct ifla_vf_info *ivf)
3277 {
3278 	struct hclge_vport *vport = hclge_get_vport(handle);
3279 	struct hclge_dev *hdev = vport->back;
3280 
3281 	vport = hclge_get_vf_vport(hdev, vf);
3282 	if (!vport)
3283 		return -EINVAL;
3284 
3285 	ivf->vf = vf;
3286 	ivf->linkstate = vport->vf_info.link_state;
3287 	ivf->spoofchk = vport->vf_info.spoofchk;
3288 	ivf->trusted = vport->vf_info.trusted;
3289 	ivf->min_tx_rate = 0;
3290 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3291 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3292 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3293 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3294 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
3295 
3296 	return 0;
3297 }
3298 
3299 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3300 				   int link_state)
3301 {
3302 	struct hclge_vport *vport = hclge_get_vport(handle);
3303 	struct hclge_dev *hdev = vport->back;
3304 	int link_state_old;
3305 	int ret;
3306 
3307 	vport = hclge_get_vf_vport(hdev, vf);
3308 	if (!vport)
3309 		return -EINVAL;
3310 
3311 	link_state_old = vport->vf_info.link_state;
3312 	vport->vf_info.link_state = link_state;
3313 
3314 	ret = hclge_push_vf_link_status(vport);
3315 	if (ret) {
3316 		vport->vf_info.link_state = link_state_old;
3317 		dev_err(&hdev->pdev->dev,
3318 			"failed to push vf%d link status, ret = %d\n", vf, ret);
3319 	}
3320 
3321 	return ret;
3322 }
3323 
3324 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3325 {
3326 	u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3327 
3328 	/* fetch the events from their corresponding regs */
3329 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3330 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3331 	hw_err_src_reg = hclge_read_dev(&hdev->hw,
3332 					HCLGE_RAS_PF_OTHER_INT_STS_REG);
3333 
3334 	/* Assumption: If by any chance reset and mailbox events are reported
3335 	 * together then we will only process reset event in this go and will
3336 	 * defer the processing of the mailbox events. Since, we would have not
3337 	 * cleared RX CMDQ event this time we would receive again another
3338 	 * interrupt from H/W just for the mailbox.
3339 	 *
3340 	 * check for vector0 reset event sources
3341 	 */
3342 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3343 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3344 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3345 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3346 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3347 		hdev->rst_stats.imp_rst_cnt++;
3348 		return HCLGE_VECTOR0_EVENT_RST;
3349 	}
3350 
3351 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3352 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3353 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3354 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3355 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3356 		hdev->rst_stats.global_rst_cnt++;
3357 		return HCLGE_VECTOR0_EVENT_RST;
3358 	}
3359 
3360 	/* check for vector0 msix event and hardware error event source */
3361 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3362 	    hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3363 		return HCLGE_VECTOR0_EVENT_ERR;
3364 
3365 	/* check for vector0 ptp event source */
3366 	if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3367 		*clearval = msix_src_reg;
3368 		return HCLGE_VECTOR0_EVENT_PTP;
3369 	}
3370 
3371 	/* check for vector0 mailbox(=CMDQ RX) event source */
3372 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3373 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3374 		*clearval = cmdq_src_reg;
3375 		return HCLGE_VECTOR0_EVENT_MBX;
3376 	}
3377 
3378 	/* print other vector0 event source */
3379 	dev_info(&hdev->pdev->dev,
3380 		 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3381 		 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3382 
3383 	return HCLGE_VECTOR0_EVENT_OTHER;
3384 }
3385 
3386 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3387 				    u32 regclr)
3388 {
3389 	switch (event_type) {
3390 	case HCLGE_VECTOR0_EVENT_PTP:
3391 	case HCLGE_VECTOR0_EVENT_RST:
3392 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3393 		break;
3394 	case HCLGE_VECTOR0_EVENT_MBX:
3395 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3396 		break;
3397 	default:
3398 		break;
3399 	}
3400 }
3401 
3402 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3403 {
3404 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3405 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3406 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3407 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3408 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3409 }
3410 
3411 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3412 {
3413 	writel(enable ? 1 : 0, vector->addr);
3414 }
3415 
3416 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3417 {
3418 	struct hclge_dev *hdev = data;
3419 	unsigned long flags;
3420 	u32 clearval = 0;
3421 	u32 event_cause;
3422 
3423 	hclge_enable_vector(&hdev->misc_vector, false);
3424 	event_cause = hclge_check_event_cause(hdev, &clearval);
3425 
3426 	/* vector 0 interrupt is shared with reset and mailbox source events. */
3427 	switch (event_cause) {
3428 	case HCLGE_VECTOR0_EVENT_ERR:
3429 		hclge_errhand_task_schedule(hdev);
3430 		break;
3431 	case HCLGE_VECTOR0_EVENT_RST:
3432 		hclge_reset_task_schedule(hdev);
3433 		break;
3434 	case HCLGE_VECTOR0_EVENT_PTP:
3435 		spin_lock_irqsave(&hdev->ptp->lock, flags);
3436 		hclge_ptp_clean_tx_hwts(hdev);
3437 		spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3438 		break;
3439 	case HCLGE_VECTOR0_EVENT_MBX:
3440 		/* If we are here then,
3441 		 * 1. Either we are not handling any mbx task and we are not
3442 		 *    scheduled as well
3443 		 *                        OR
3444 		 * 2. We could be handling a mbx task but nothing more is
3445 		 *    scheduled.
3446 		 * In both cases, we should schedule mbx task as there are more
3447 		 * mbx messages reported by this interrupt.
3448 		 */
3449 		hclge_mbx_task_schedule(hdev);
3450 		break;
3451 	default:
3452 		dev_warn(&hdev->pdev->dev,
3453 			 "received unknown or unhandled event of vector0\n");
3454 		break;
3455 	}
3456 
3457 	hclge_clear_event_cause(hdev, event_cause, clearval);
3458 
3459 	/* Enable interrupt if it is not caused by reset event or error event */
3460 	if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3461 	    event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3462 	    event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3463 		hclge_enable_vector(&hdev->misc_vector, true);
3464 
3465 	return IRQ_HANDLED;
3466 }
3467 
3468 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3469 {
3470 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3471 		dev_warn(&hdev->pdev->dev,
3472 			 "vector(vector_id %d) has been freed.\n", vector_id);
3473 		return;
3474 	}
3475 
3476 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3477 	hdev->num_msi_left += 1;
3478 	hdev->num_msi_used -= 1;
3479 }
3480 
3481 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3482 {
3483 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3484 
3485 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3486 
3487 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3488 	hdev->vector_status[0] = 0;
3489 
3490 	hdev->num_msi_left -= 1;
3491 	hdev->num_msi_used += 1;
3492 }
3493 
3494 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3495 				      const cpumask_t *mask)
3496 {
3497 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3498 					      affinity_notify);
3499 
3500 	cpumask_copy(&hdev->affinity_mask, mask);
3501 }
3502 
3503 static void hclge_irq_affinity_release(struct kref *ref)
3504 {
3505 }
3506 
3507 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3508 {
3509 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3510 			      &hdev->affinity_mask);
3511 
3512 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3513 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3514 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3515 				  &hdev->affinity_notify);
3516 }
3517 
3518 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3519 {
3520 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3521 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3522 }
3523 
3524 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3525 {
3526 	int ret;
3527 
3528 	hclge_get_misc_vector(hdev);
3529 
3530 	/* this would be explicitly freed in the end */
3531 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3532 		 HCLGE_NAME, pci_name(hdev->pdev));
3533 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3534 			  0, hdev->misc_vector.name, hdev);
3535 	if (ret) {
3536 		hclge_free_vector(hdev, 0);
3537 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3538 			hdev->misc_vector.vector_irq);
3539 	}
3540 
3541 	return ret;
3542 }
3543 
3544 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3545 {
3546 	free_irq(hdev->misc_vector.vector_irq, hdev);
3547 	hclge_free_vector(hdev, 0);
3548 }
3549 
3550 int hclge_notify_client(struct hclge_dev *hdev,
3551 			enum hnae3_reset_notify_type type)
3552 {
3553 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3554 	struct hnae3_client *client = hdev->nic_client;
3555 	int ret;
3556 
3557 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3558 		return 0;
3559 
3560 	if (!client->ops->reset_notify)
3561 		return -EOPNOTSUPP;
3562 
3563 	ret = client->ops->reset_notify(handle, type);
3564 	if (ret)
3565 		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3566 			type, ret);
3567 
3568 	return ret;
3569 }
3570 
3571 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3572 				    enum hnae3_reset_notify_type type)
3573 {
3574 	struct hnae3_handle *handle = &hdev->vport[0].roce;
3575 	struct hnae3_client *client = hdev->roce_client;
3576 	int ret;
3577 
3578 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3579 		return 0;
3580 
3581 	if (!client->ops->reset_notify)
3582 		return -EOPNOTSUPP;
3583 
3584 	ret = client->ops->reset_notify(handle, type);
3585 	if (ret)
3586 		dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3587 			type, ret);
3588 
3589 	return ret;
3590 }
3591 
3592 static int hclge_reset_wait(struct hclge_dev *hdev)
3593 {
3594 #define HCLGE_RESET_WATI_MS	100
3595 #define HCLGE_RESET_WAIT_CNT	350
3596 
3597 	u32 val, reg, reg_bit;
3598 	u32 cnt = 0;
3599 
3600 	switch (hdev->reset_type) {
3601 	case HNAE3_IMP_RESET:
3602 		reg = HCLGE_GLOBAL_RESET_REG;
3603 		reg_bit = HCLGE_IMP_RESET_BIT;
3604 		break;
3605 	case HNAE3_GLOBAL_RESET:
3606 		reg = HCLGE_GLOBAL_RESET_REG;
3607 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3608 		break;
3609 	case HNAE3_FUNC_RESET:
3610 		reg = HCLGE_FUN_RST_ING;
3611 		reg_bit = HCLGE_FUN_RST_ING_B;
3612 		break;
3613 	default:
3614 		dev_err(&hdev->pdev->dev,
3615 			"Wait for unsupported reset type: %d\n",
3616 			hdev->reset_type);
3617 		return -EINVAL;
3618 	}
3619 
3620 	val = hclge_read_dev(&hdev->hw, reg);
3621 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3622 		msleep(HCLGE_RESET_WATI_MS);
3623 		val = hclge_read_dev(&hdev->hw, reg);
3624 		cnt++;
3625 	}
3626 
3627 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3628 		dev_warn(&hdev->pdev->dev,
3629 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3630 		return -EBUSY;
3631 	}
3632 
3633 	return 0;
3634 }
3635 
3636 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3637 {
3638 	struct hclge_vf_rst_cmd *req;
3639 	struct hclge_desc desc;
3640 
3641 	req = (struct hclge_vf_rst_cmd *)desc.data;
3642 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3643 	req->dest_vfid = func_id;
3644 
3645 	if (reset)
3646 		req->vf_rst = 0x1;
3647 
3648 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3649 }
3650 
3651 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3652 {
3653 	int i;
3654 
3655 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3656 		struct hclge_vport *vport = &hdev->vport[i];
3657 		int ret;
3658 
3659 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3660 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3661 		if (ret) {
3662 			dev_err(&hdev->pdev->dev,
3663 				"set vf(%u) rst failed %d!\n",
3664 				vport->vport_id, ret);
3665 			return ret;
3666 		}
3667 
3668 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3669 			continue;
3670 
3671 		/* Inform VF to process the reset.
3672 		 * hclge_inform_reset_assert_to_vf may fail if VF
3673 		 * driver is not loaded.
3674 		 */
3675 		ret = hclge_inform_reset_assert_to_vf(vport);
3676 		if (ret)
3677 			dev_warn(&hdev->pdev->dev,
3678 				 "inform reset to vf(%u) failed %d!\n",
3679 				 vport->vport_id, ret);
3680 	}
3681 
3682 	return 0;
3683 }
3684 
3685 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3686 {
3687 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3688 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3689 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3690 		return;
3691 
3692 	hclge_mbx_handler(hdev);
3693 
3694 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3695 }
3696 
3697 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3698 {
3699 	struct hclge_pf_rst_sync_cmd *req;
3700 	struct hclge_desc desc;
3701 	int cnt = 0;
3702 	int ret;
3703 
3704 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3705 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3706 
3707 	do {
3708 		/* vf need to down netdev by mbx during PF or FLR reset */
3709 		hclge_mailbox_service_task(hdev);
3710 
3711 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3712 		/* for compatible with old firmware, wait
3713 		 * 100 ms for VF to stop IO
3714 		 */
3715 		if (ret == -EOPNOTSUPP) {
3716 			msleep(HCLGE_RESET_SYNC_TIME);
3717 			return;
3718 		} else if (ret) {
3719 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3720 				 ret);
3721 			return;
3722 		} else if (req->all_vf_ready) {
3723 			return;
3724 		}
3725 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3726 		hclge_cmd_reuse_desc(&desc, true);
3727 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3728 
3729 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3730 }
3731 
3732 void hclge_report_hw_error(struct hclge_dev *hdev,
3733 			   enum hnae3_hw_error_type type)
3734 {
3735 	struct hnae3_client *client = hdev->nic_client;
3736 
3737 	if (!client || !client->ops->process_hw_error ||
3738 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3739 		return;
3740 
3741 	client->ops->process_hw_error(&hdev->vport[0].nic, type);
3742 }
3743 
3744 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3745 {
3746 	u32 reg_val;
3747 
3748 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3749 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3750 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3751 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3752 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3753 	}
3754 
3755 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3756 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3757 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3758 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3759 	}
3760 }
3761 
3762 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3763 {
3764 	struct hclge_desc desc;
3765 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3766 	int ret;
3767 
3768 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3769 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3770 	req->fun_reset_vfid = func_id;
3771 
3772 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3773 	if (ret)
3774 		dev_err(&hdev->pdev->dev,
3775 			"send function reset cmd fail, status =%d\n", ret);
3776 
3777 	return ret;
3778 }
3779 
3780 static void hclge_do_reset(struct hclge_dev *hdev)
3781 {
3782 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3783 	struct pci_dev *pdev = hdev->pdev;
3784 	u32 val;
3785 
3786 	if (hclge_get_hw_reset_stat(handle)) {
3787 		dev_info(&pdev->dev, "hardware reset not finish\n");
3788 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3789 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3790 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3791 		return;
3792 	}
3793 
3794 	switch (hdev->reset_type) {
3795 	case HNAE3_IMP_RESET:
3796 		dev_info(&pdev->dev, "IMP reset requested\n");
3797 		val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3798 		hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
3799 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
3800 		break;
3801 	case HNAE3_GLOBAL_RESET:
3802 		dev_info(&pdev->dev, "global reset requested\n");
3803 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3804 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3805 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3806 		break;
3807 	case HNAE3_FUNC_RESET:
3808 		dev_info(&pdev->dev, "PF reset requested\n");
3809 		/* schedule again to check later */
3810 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3811 		hclge_reset_task_schedule(hdev);
3812 		break;
3813 	default:
3814 		dev_warn(&pdev->dev,
3815 			 "unsupported reset type: %d\n", hdev->reset_type);
3816 		break;
3817 	}
3818 }
3819 
3820 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3821 						   unsigned long *addr)
3822 {
3823 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3824 	struct hclge_dev *hdev = ae_dev->priv;
3825 
3826 	/* return the highest priority reset level amongst all */
3827 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3828 		rst_level = HNAE3_IMP_RESET;
3829 		clear_bit(HNAE3_IMP_RESET, addr);
3830 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3831 		clear_bit(HNAE3_FUNC_RESET, addr);
3832 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3833 		rst_level = HNAE3_GLOBAL_RESET;
3834 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3835 		clear_bit(HNAE3_FUNC_RESET, addr);
3836 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3837 		rst_level = HNAE3_FUNC_RESET;
3838 		clear_bit(HNAE3_FUNC_RESET, addr);
3839 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3840 		rst_level = HNAE3_FLR_RESET;
3841 		clear_bit(HNAE3_FLR_RESET, addr);
3842 	}
3843 
3844 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3845 	    rst_level < hdev->reset_type)
3846 		return HNAE3_NONE_RESET;
3847 
3848 	return rst_level;
3849 }
3850 
3851 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3852 {
3853 	u32 clearval = 0;
3854 
3855 	switch (hdev->reset_type) {
3856 	case HNAE3_IMP_RESET:
3857 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3858 		break;
3859 	case HNAE3_GLOBAL_RESET:
3860 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3861 		break;
3862 	default:
3863 		break;
3864 	}
3865 
3866 	if (!clearval)
3867 		return;
3868 
3869 	/* For revision 0x20, the reset interrupt source
3870 	 * can only be cleared after hardware reset done
3871 	 */
3872 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3873 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3874 				clearval);
3875 
3876 	hclge_enable_vector(&hdev->misc_vector, true);
3877 }
3878 
3879 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3880 {
3881 	u32 reg_val;
3882 
3883 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3884 	if (enable)
3885 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3886 	else
3887 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3888 
3889 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3890 }
3891 
3892 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3893 {
3894 	int ret;
3895 
3896 	ret = hclge_set_all_vf_rst(hdev, true);
3897 	if (ret)
3898 		return ret;
3899 
3900 	hclge_func_reset_sync_vf(hdev);
3901 
3902 	return 0;
3903 }
3904 
3905 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3906 {
3907 	u32 reg_val;
3908 	int ret = 0;
3909 
3910 	switch (hdev->reset_type) {
3911 	case HNAE3_FUNC_RESET:
3912 		ret = hclge_func_reset_notify_vf(hdev);
3913 		if (ret)
3914 			return ret;
3915 
3916 		ret = hclge_func_reset_cmd(hdev, 0);
3917 		if (ret) {
3918 			dev_err(&hdev->pdev->dev,
3919 				"asserting function reset fail %d!\n", ret);
3920 			return ret;
3921 		}
3922 
3923 		/* After performaning pf reset, it is not necessary to do the
3924 		 * mailbox handling or send any command to firmware, because
3925 		 * any mailbox handling or command to firmware is only valid
3926 		 * after hclge_cmd_init is called.
3927 		 */
3928 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3929 		hdev->rst_stats.pf_rst_cnt++;
3930 		break;
3931 	case HNAE3_FLR_RESET:
3932 		ret = hclge_func_reset_notify_vf(hdev);
3933 		if (ret)
3934 			return ret;
3935 		break;
3936 	case HNAE3_IMP_RESET:
3937 		hclge_handle_imp_error(hdev);
3938 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3939 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3940 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3941 		break;
3942 	default:
3943 		break;
3944 	}
3945 
3946 	/* inform hardware that preparatory work is done */
3947 	msleep(HCLGE_RESET_SYNC_TIME);
3948 	hclge_reset_handshake(hdev, true);
3949 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3950 
3951 	return ret;
3952 }
3953 
3954 static void hclge_show_rst_info(struct hclge_dev *hdev)
3955 {
3956 	char *buf;
3957 
3958 	buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
3959 	if (!buf)
3960 		return;
3961 
3962 	hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
3963 
3964 	dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
3965 
3966 	kfree(buf);
3967 }
3968 
3969 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3970 {
3971 #define MAX_RESET_FAIL_CNT 5
3972 
3973 	if (hdev->reset_pending) {
3974 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3975 			 hdev->reset_pending);
3976 		return true;
3977 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3978 		   HCLGE_RESET_INT_M) {
3979 		dev_info(&hdev->pdev->dev,
3980 			 "reset failed because new reset interrupt\n");
3981 		hclge_clear_reset_cause(hdev);
3982 		return false;
3983 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3984 		hdev->rst_stats.reset_fail_cnt++;
3985 		set_bit(hdev->reset_type, &hdev->reset_pending);
3986 		dev_info(&hdev->pdev->dev,
3987 			 "re-schedule reset task(%u)\n",
3988 			 hdev->rst_stats.reset_fail_cnt);
3989 		return true;
3990 	}
3991 
3992 	hclge_clear_reset_cause(hdev);
3993 
3994 	/* recover the handshake status when reset fail */
3995 	hclge_reset_handshake(hdev, true);
3996 
3997 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3998 
3999 	hclge_show_rst_info(hdev);
4000 
4001 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4002 
4003 	return false;
4004 }
4005 
4006 static void hclge_update_reset_level(struct hclge_dev *hdev)
4007 {
4008 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4009 	enum hnae3_reset_type reset_level;
4010 
4011 	/* reset request will not be set during reset, so clear
4012 	 * pending reset request to avoid unnecessary reset
4013 	 * caused by the same reason.
4014 	 */
4015 	hclge_get_reset_level(ae_dev, &hdev->reset_request);
4016 
4017 	/* if default_reset_request has a higher level reset request,
4018 	 * it should be handled as soon as possible. since some errors
4019 	 * need this kind of reset to fix.
4020 	 */
4021 	reset_level = hclge_get_reset_level(ae_dev,
4022 					    &hdev->default_reset_request);
4023 	if (reset_level != HNAE3_NONE_RESET)
4024 		set_bit(reset_level, &hdev->reset_request);
4025 }
4026 
4027 static int hclge_set_rst_done(struct hclge_dev *hdev)
4028 {
4029 	struct hclge_pf_rst_done_cmd *req;
4030 	struct hclge_desc desc;
4031 	int ret;
4032 
4033 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
4034 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4035 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4036 
4037 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4038 	/* To be compatible with the old firmware, which does not support
4039 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4040 	 * return success
4041 	 */
4042 	if (ret == -EOPNOTSUPP) {
4043 		dev_warn(&hdev->pdev->dev,
4044 			 "current firmware does not support command(0x%x)!\n",
4045 			 HCLGE_OPC_PF_RST_DONE);
4046 		return 0;
4047 	} else if (ret) {
4048 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4049 			ret);
4050 	}
4051 
4052 	return ret;
4053 }
4054 
4055 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4056 {
4057 	int ret = 0;
4058 
4059 	switch (hdev->reset_type) {
4060 	case HNAE3_FUNC_RESET:
4061 	case HNAE3_FLR_RESET:
4062 		ret = hclge_set_all_vf_rst(hdev, false);
4063 		break;
4064 	case HNAE3_GLOBAL_RESET:
4065 	case HNAE3_IMP_RESET:
4066 		ret = hclge_set_rst_done(hdev);
4067 		break;
4068 	default:
4069 		break;
4070 	}
4071 
4072 	/* clear up the handshake status after re-initialize done */
4073 	hclge_reset_handshake(hdev, false);
4074 
4075 	return ret;
4076 }
4077 
4078 static int hclge_reset_stack(struct hclge_dev *hdev)
4079 {
4080 	int ret;
4081 
4082 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4083 	if (ret)
4084 		return ret;
4085 
4086 	ret = hclge_reset_ae_dev(hdev->ae_dev);
4087 	if (ret)
4088 		return ret;
4089 
4090 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4091 }
4092 
4093 static int hclge_reset_prepare(struct hclge_dev *hdev)
4094 {
4095 	int ret;
4096 
4097 	hdev->rst_stats.reset_cnt++;
4098 	/* perform reset of the stack & ae device for a client */
4099 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4100 	if (ret)
4101 		return ret;
4102 
4103 	rtnl_lock();
4104 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4105 	rtnl_unlock();
4106 	if (ret)
4107 		return ret;
4108 
4109 	return hclge_reset_prepare_wait(hdev);
4110 }
4111 
4112 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4113 {
4114 	int ret;
4115 
4116 	hdev->rst_stats.hw_reset_done_cnt++;
4117 
4118 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4119 	if (ret)
4120 		return ret;
4121 
4122 	rtnl_lock();
4123 	ret = hclge_reset_stack(hdev);
4124 	rtnl_unlock();
4125 	if (ret)
4126 		return ret;
4127 
4128 	hclge_clear_reset_cause(hdev);
4129 
4130 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4131 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4132 	 * times
4133 	 */
4134 	if (ret &&
4135 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4136 		return ret;
4137 
4138 	ret = hclge_reset_prepare_up(hdev);
4139 	if (ret)
4140 		return ret;
4141 
4142 	rtnl_lock();
4143 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4144 	rtnl_unlock();
4145 	if (ret)
4146 		return ret;
4147 
4148 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4149 	if (ret)
4150 		return ret;
4151 
4152 	hdev->last_reset_time = jiffies;
4153 	hdev->rst_stats.reset_fail_cnt = 0;
4154 	hdev->rst_stats.reset_done_cnt++;
4155 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4156 
4157 	hclge_update_reset_level(hdev);
4158 
4159 	return 0;
4160 }
4161 
4162 static void hclge_reset(struct hclge_dev *hdev)
4163 {
4164 	if (hclge_reset_prepare(hdev))
4165 		goto err_reset;
4166 
4167 	if (hclge_reset_wait(hdev))
4168 		goto err_reset;
4169 
4170 	if (hclge_reset_rebuild(hdev))
4171 		goto err_reset;
4172 
4173 	return;
4174 
4175 err_reset:
4176 	if (hclge_reset_err_handle(hdev))
4177 		hclge_reset_task_schedule(hdev);
4178 }
4179 
4180 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4181 {
4182 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4183 	struct hclge_dev *hdev = ae_dev->priv;
4184 
4185 	/* We might end up getting called broadly because of 2 below cases:
4186 	 * 1. Recoverable error was conveyed through APEI and only way to bring
4187 	 *    normalcy is to reset.
4188 	 * 2. A new reset request from the stack due to timeout
4189 	 *
4190 	 * check if this is a new reset request and we are not here just because
4191 	 * last reset attempt did not succeed and watchdog hit us again. We will
4192 	 * know this if last reset request did not occur very recently (watchdog
4193 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4194 	 * In case of new request we reset the "reset level" to PF reset.
4195 	 * And if it is a repeat reset request of the most recent one then we
4196 	 * want to make sure we throttle the reset request. Therefore, we will
4197 	 * not allow it again before 3*HZ times.
4198 	 */
4199 
4200 	if (time_before(jiffies, (hdev->last_reset_time +
4201 				  HCLGE_RESET_INTERVAL))) {
4202 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4203 		return;
4204 	}
4205 
4206 	if (hdev->default_reset_request) {
4207 		hdev->reset_level =
4208 			hclge_get_reset_level(ae_dev,
4209 					      &hdev->default_reset_request);
4210 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4211 		hdev->reset_level = HNAE3_FUNC_RESET;
4212 	}
4213 
4214 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4215 		 hdev->reset_level);
4216 
4217 	/* request reset & schedule reset task */
4218 	set_bit(hdev->reset_level, &hdev->reset_request);
4219 	hclge_reset_task_schedule(hdev);
4220 
4221 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4222 		hdev->reset_level++;
4223 }
4224 
4225 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4226 					enum hnae3_reset_type rst_type)
4227 {
4228 	struct hclge_dev *hdev = ae_dev->priv;
4229 
4230 	set_bit(rst_type, &hdev->default_reset_request);
4231 }
4232 
4233 static void hclge_reset_timer(struct timer_list *t)
4234 {
4235 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4236 
4237 	/* if default_reset_request has no value, it means that this reset
4238 	 * request has already be handled, so just return here
4239 	 */
4240 	if (!hdev->default_reset_request)
4241 		return;
4242 
4243 	dev_info(&hdev->pdev->dev,
4244 		 "triggering reset in reset timer\n");
4245 	hclge_reset_event(hdev->pdev, NULL);
4246 }
4247 
4248 static void hclge_reset_subtask(struct hclge_dev *hdev)
4249 {
4250 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4251 
4252 	/* check if there is any ongoing reset in the hardware. This status can
4253 	 * be checked from reset_pending. If there is then, we need to wait for
4254 	 * hardware to complete reset.
4255 	 *    a. If we are able to figure out in reasonable time that hardware
4256 	 *       has fully resetted then, we can proceed with driver, client
4257 	 *       reset.
4258 	 *    b. else, we can come back later to check this status so re-sched
4259 	 *       now.
4260 	 */
4261 	hdev->last_reset_time = jiffies;
4262 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4263 	if (hdev->reset_type != HNAE3_NONE_RESET)
4264 		hclge_reset(hdev);
4265 
4266 	/* check if we got any *new* reset requests to be honored */
4267 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4268 	if (hdev->reset_type != HNAE3_NONE_RESET)
4269 		hclge_do_reset(hdev);
4270 
4271 	hdev->reset_type = HNAE3_NONE_RESET;
4272 }
4273 
4274 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4275 {
4276 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4277 	enum hnae3_reset_type reset_type;
4278 
4279 	if (ae_dev->hw_err_reset_req) {
4280 		reset_type = hclge_get_reset_level(ae_dev,
4281 						   &ae_dev->hw_err_reset_req);
4282 		hclge_set_def_reset_request(ae_dev, reset_type);
4283 	}
4284 
4285 	if (hdev->default_reset_request && ae_dev->ops->reset_event)
4286 		ae_dev->ops->reset_event(hdev->pdev, NULL);
4287 
4288 	/* enable interrupt after error handling complete */
4289 	hclge_enable_vector(&hdev->misc_vector, true);
4290 }
4291 
4292 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4293 {
4294 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4295 
4296 	ae_dev->hw_err_reset_req = 0;
4297 
4298 	if (hclge_find_error_source(hdev)) {
4299 		hclge_handle_error_info_log(ae_dev);
4300 		hclge_handle_mac_tnl(hdev);
4301 	}
4302 
4303 	hclge_handle_err_reset_request(hdev);
4304 }
4305 
4306 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4307 {
4308 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4309 	struct device *dev = &hdev->pdev->dev;
4310 	u32 msix_sts_reg;
4311 
4312 	msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4313 	if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4314 		if (hclge_handle_hw_msix_error
4315 				(hdev, &hdev->default_reset_request))
4316 			dev_info(dev, "received msix interrupt 0x%x\n",
4317 				 msix_sts_reg);
4318 	}
4319 
4320 	hclge_handle_hw_ras_error(ae_dev);
4321 
4322 	hclge_handle_err_reset_request(hdev);
4323 }
4324 
4325 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4326 {
4327 	if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4328 		return;
4329 
4330 	if (hnae3_dev_ras_imp_supported(hdev))
4331 		hclge_handle_err_recovery(hdev);
4332 	else
4333 		hclge_misc_err_recovery(hdev);
4334 }
4335 
4336 static void hclge_reset_service_task(struct hclge_dev *hdev)
4337 {
4338 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4339 		return;
4340 
4341 	down(&hdev->reset_sem);
4342 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4343 
4344 	hclge_reset_subtask(hdev);
4345 
4346 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4347 	up(&hdev->reset_sem);
4348 }
4349 
4350 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4351 {
4352 	int i;
4353 
4354 	/* start from vport 1 for PF is always alive */
4355 	for (i = 1; i < hdev->num_alloc_vport; i++) {
4356 		struct hclge_vport *vport = &hdev->vport[i];
4357 
4358 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4359 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4360 
4361 		/* If vf is not alive, set to default value */
4362 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4363 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4364 	}
4365 }
4366 
4367 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4368 {
4369 	unsigned long delta = round_jiffies_relative(HZ);
4370 
4371 	if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4372 		return;
4373 
4374 	/* Always handle the link updating to make sure link state is
4375 	 * updated when it is triggered by mbx.
4376 	 */
4377 	hclge_update_link_status(hdev);
4378 	hclge_sync_mac_table(hdev);
4379 	hclge_sync_promisc_mode(hdev);
4380 	hclge_sync_fd_table(hdev);
4381 
4382 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4383 		delta = jiffies - hdev->last_serv_processed;
4384 
4385 		if (delta < round_jiffies_relative(HZ)) {
4386 			delta = round_jiffies_relative(HZ) - delta;
4387 			goto out;
4388 		}
4389 	}
4390 
4391 	hdev->serv_processed_cnt++;
4392 	hclge_update_vport_alive(hdev);
4393 
4394 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4395 		hdev->last_serv_processed = jiffies;
4396 		goto out;
4397 	}
4398 
4399 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4400 		hclge_update_stats_for_all(hdev);
4401 
4402 	hclge_update_port_info(hdev);
4403 	hclge_sync_vlan_filter(hdev);
4404 
4405 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4406 		hclge_rfs_filter_expire(hdev);
4407 
4408 	hdev->last_serv_processed = jiffies;
4409 
4410 out:
4411 	hclge_task_schedule(hdev, delta);
4412 }
4413 
4414 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4415 {
4416 	unsigned long flags;
4417 
4418 	if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4419 	    !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4420 	    !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4421 		return;
4422 
4423 	/* to prevent concurrence with the irq handler */
4424 	spin_lock_irqsave(&hdev->ptp->lock, flags);
4425 
4426 	/* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4427 	 * handler may handle it just before spin_lock_irqsave().
4428 	 */
4429 	if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4430 		hclge_ptp_clean_tx_hwts(hdev);
4431 
4432 	spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4433 }
4434 
4435 static void hclge_service_task(struct work_struct *work)
4436 {
4437 	struct hclge_dev *hdev =
4438 		container_of(work, struct hclge_dev, service_task.work);
4439 
4440 	hclge_errhand_service_task(hdev);
4441 	hclge_reset_service_task(hdev);
4442 	hclge_ptp_service_task(hdev);
4443 	hclge_mailbox_service_task(hdev);
4444 	hclge_periodic_service_task(hdev);
4445 
4446 	/* Handle error recovery, reset and mbx again in case periodical task
4447 	 * delays the handling by calling hclge_task_schedule() in
4448 	 * hclge_periodic_service_task().
4449 	 */
4450 	hclge_errhand_service_task(hdev);
4451 	hclge_reset_service_task(hdev);
4452 	hclge_mailbox_service_task(hdev);
4453 }
4454 
4455 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4456 {
4457 	/* VF handle has no client */
4458 	if (!handle->client)
4459 		return container_of(handle, struct hclge_vport, nic);
4460 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4461 		return container_of(handle, struct hclge_vport, roce);
4462 	else
4463 		return container_of(handle, struct hclge_vport, nic);
4464 }
4465 
4466 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4467 				  struct hnae3_vector_info *vector_info)
4468 {
4469 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2	64
4470 
4471 	vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4472 
4473 	/* need an extend offset to config vector >= 64 */
4474 	if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4475 		vector_info->io_addr = hdev->hw.io_base +
4476 				HCLGE_VECTOR_REG_BASE +
4477 				(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4478 	else
4479 		vector_info->io_addr = hdev->hw.io_base +
4480 				HCLGE_VECTOR_EXT_REG_BASE +
4481 				(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4482 				HCLGE_VECTOR_REG_OFFSET_H +
4483 				(idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4484 				HCLGE_VECTOR_REG_OFFSET;
4485 
4486 	hdev->vector_status[idx] = hdev->vport[0].vport_id;
4487 	hdev->vector_irq[idx] = vector_info->vector;
4488 }
4489 
4490 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4491 			    struct hnae3_vector_info *vector_info)
4492 {
4493 	struct hclge_vport *vport = hclge_get_vport(handle);
4494 	struct hnae3_vector_info *vector = vector_info;
4495 	struct hclge_dev *hdev = vport->back;
4496 	int alloc = 0;
4497 	u16 i = 0;
4498 	u16 j;
4499 
4500 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4501 	vector_num = min(hdev->num_msi_left, vector_num);
4502 
4503 	for (j = 0; j < vector_num; j++) {
4504 		while (++i < hdev->num_nic_msi) {
4505 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4506 				hclge_get_vector_info(hdev, i, vector);
4507 				vector++;
4508 				alloc++;
4509 
4510 				break;
4511 			}
4512 		}
4513 	}
4514 	hdev->num_msi_left -= alloc;
4515 	hdev->num_msi_used += alloc;
4516 
4517 	return alloc;
4518 }
4519 
4520 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4521 {
4522 	int i;
4523 
4524 	for (i = 0; i < hdev->num_msi; i++)
4525 		if (vector == hdev->vector_irq[i])
4526 			return i;
4527 
4528 	return -EINVAL;
4529 }
4530 
4531 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4532 {
4533 	struct hclge_vport *vport = hclge_get_vport(handle);
4534 	struct hclge_dev *hdev = vport->back;
4535 	int vector_id;
4536 
4537 	vector_id = hclge_get_vector_index(hdev, vector);
4538 	if (vector_id < 0) {
4539 		dev_err(&hdev->pdev->dev,
4540 			"Get vector index fail. vector = %d\n", vector);
4541 		return vector_id;
4542 	}
4543 
4544 	hclge_free_vector(hdev, vector_id);
4545 
4546 	return 0;
4547 }
4548 
4549 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4550 {
4551 	return HCLGE_RSS_KEY_SIZE;
4552 }
4553 
4554 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4555 				  const u8 hfunc, const u8 *key)
4556 {
4557 	struct hclge_rss_config_cmd *req;
4558 	unsigned int key_offset = 0;
4559 	struct hclge_desc desc;
4560 	int key_counts;
4561 	int key_size;
4562 	int ret;
4563 
4564 	key_counts = HCLGE_RSS_KEY_SIZE;
4565 	req = (struct hclge_rss_config_cmd *)desc.data;
4566 
4567 	while (key_counts) {
4568 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4569 					   false);
4570 
4571 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4572 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4573 
4574 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4575 		memcpy(req->hash_key,
4576 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4577 
4578 		key_counts -= key_size;
4579 		key_offset++;
4580 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4581 		if (ret) {
4582 			dev_err(&hdev->pdev->dev,
4583 				"Configure RSS config fail, status = %d\n",
4584 				ret);
4585 			return ret;
4586 		}
4587 	}
4588 	return 0;
4589 }
4590 
4591 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4592 {
4593 	struct hclge_rss_indirection_table_cmd *req;
4594 	struct hclge_desc desc;
4595 	int rss_cfg_tbl_num;
4596 	u8 rss_msb_oft;
4597 	u8 rss_msb_val;
4598 	int ret;
4599 	u16 qid;
4600 	int i;
4601 	u32 j;
4602 
4603 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4604 	rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4605 			  HCLGE_RSS_CFG_TBL_SIZE;
4606 
4607 	for (i = 0; i < rss_cfg_tbl_num; i++) {
4608 		hclge_cmd_setup_basic_desc
4609 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4610 
4611 		req->start_table_index =
4612 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4613 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4614 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4615 			qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4616 			req->rss_qid_l[j] = qid & 0xff;
4617 			rss_msb_oft =
4618 				j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4619 			rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4620 				(j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4621 			req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4622 		}
4623 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4624 		if (ret) {
4625 			dev_err(&hdev->pdev->dev,
4626 				"Configure rss indir table fail,status = %d\n",
4627 				ret);
4628 			return ret;
4629 		}
4630 	}
4631 	return 0;
4632 }
4633 
4634 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4635 				 u16 *tc_size, u16 *tc_offset)
4636 {
4637 	struct hclge_rss_tc_mode_cmd *req;
4638 	struct hclge_desc desc;
4639 	int ret;
4640 	int i;
4641 
4642 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4643 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4644 
4645 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4646 		u16 mode = 0;
4647 
4648 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4649 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4650 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4651 		hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4652 			      tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4653 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4654 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4655 
4656 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4657 	}
4658 
4659 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4660 	if (ret)
4661 		dev_err(&hdev->pdev->dev,
4662 			"Configure rss tc mode fail, status = %d\n", ret);
4663 
4664 	return ret;
4665 }
4666 
4667 static void hclge_get_rss_type(struct hclge_vport *vport)
4668 {
4669 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4670 	    vport->rss_tuple_sets.ipv4_udp_en ||
4671 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4672 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4673 	    vport->rss_tuple_sets.ipv6_udp_en ||
4674 	    vport->rss_tuple_sets.ipv6_sctp_en)
4675 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4676 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4677 		 vport->rss_tuple_sets.ipv6_fragment_en)
4678 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4679 	else
4680 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4681 }
4682 
4683 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4684 {
4685 	struct hclge_rss_input_tuple_cmd *req;
4686 	struct hclge_desc desc;
4687 	int ret;
4688 
4689 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4690 
4691 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4692 
4693 	/* Get the tuple cfg from pf */
4694 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4695 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4696 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4697 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4698 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4699 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4700 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4701 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4702 	hclge_get_rss_type(&hdev->vport[0]);
4703 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4704 	if (ret)
4705 		dev_err(&hdev->pdev->dev,
4706 			"Configure rss input fail, status = %d\n", ret);
4707 	return ret;
4708 }
4709 
4710 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4711 			 u8 *key, u8 *hfunc)
4712 {
4713 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4714 	struct hclge_vport *vport = hclge_get_vport(handle);
4715 	int i;
4716 
4717 	/* Get hash algorithm */
4718 	if (hfunc) {
4719 		switch (vport->rss_algo) {
4720 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4721 			*hfunc = ETH_RSS_HASH_TOP;
4722 			break;
4723 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4724 			*hfunc = ETH_RSS_HASH_XOR;
4725 			break;
4726 		default:
4727 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4728 			break;
4729 		}
4730 	}
4731 
4732 	/* Get the RSS Key required by the user */
4733 	if (key)
4734 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4735 
4736 	/* Get indirect table */
4737 	if (indir)
4738 		for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4739 			indir[i] =  vport->rss_indirection_tbl[i];
4740 
4741 	return 0;
4742 }
4743 
4744 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4745 			 const  u8 *key, const  u8 hfunc)
4746 {
4747 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4748 	struct hclge_vport *vport = hclge_get_vport(handle);
4749 	struct hclge_dev *hdev = vport->back;
4750 	u8 hash_algo;
4751 	int ret, i;
4752 
4753 	/* Set the RSS Hash Key if specififed by the user */
4754 	if (key) {
4755 		switch (hfunc) {
4756 		case ETH_RSS_HASH_TOP:
4757 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4758 			break;
4759 		case ETH_RSS_HASH_XOR:
4760 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4761 			break;
4762 		case ETH_RSS_HASH_NO_CHANGE:
4763 			hash_algo = vport->rss_algo;
4764 			break;
4765 		default:
4766 			return -EINVAL;
4767 		}
4768 
4769 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4770 		if (ret)
4771 			return ret;
4772 
4773 		/* Update the shadow RSS key with user specified qids */
4774 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4775 		vport->rss_algo = hash_algo;
4776 	}
4777 
4778 	/* Update the shadow RSS table with user specified qids */
4779 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4780 		vport->rss_indirection_tbl[i] = indir[i];
4781 
4782 	/* Update the hardware */
4783 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4784 }
4785 
4786 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4787 {
4788 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4789 
4790 	if (nfc->data & RXH_L4_B_2_3)
4791 		hash_sets |= HCLGE_D_PORT_BIT;
4792 	else
4793 		hash_sets &= ~HCLGE_D_PORT_BIT;
4794 
4795 	if (nfc->data & RXH_IP_SRC)
4796 		hash_sets |= HCLGE_S_IP_BIT;
4797 	else
4798 		hash_sets &= ~HCLGE_S_IP_BIT;
4799 
4800 	if (nfc->data & RXH_IP_DST)
4801 		hash_sets |= HCLGE_D_IP_BIT;
4802 	else
4803 		hash_sets &= ~HCLGE_D_IP_BIT;
4804 
4805 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4806 		hash_sets |= HCLGE_V_TAG_BIT;
4807 
4808 	return hash_sets;
4809 }
4810 
4811 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4812 				    struct ethtool_rxnfc *nfc,
4813 				    struct hclge_rss_input_tuple_cmd *req)
4814 {
4815 	struct hclge_dev *hdev = vport->back;
4816 	u8 tuple_sets;
4817 
4818 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4819 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4820 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4821 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4822 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4823 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4824 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4825 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4826 
4827 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4828 	switch (nfc->flow_type) {
4829 	case TCP_V4_FLOW:
4830 		req->ipv4_tcp_en = tuple_sets;
4831 		break;
4832 	case TCP_V6_FLOW:
4833 		req->ipv6_tcp_en = tuple_sets;
4834 		break;
4835 	case UDP_V4_FLOW:
4836 		req->ipv4_udp_en = tuple_sets;
4837 		break;
4838 	case UDP_V6_FLOW:
4839 		req->ipv6_udp_en = tuple_sets;
4840 		break;
4841 	case SCTP_V4_FLOW:
4842 		req->ipv4_sctp_en = tuple_sets;
4843 		break;
4844 	case SCTP_V6_FLOW:
4845 		if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4846 		    (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4847 			return -EINVAL;
4848 
4849 		req->ipv6_sctp_en = tuple_sets;
4850 		break;
4851 	case IPV4_FLOW:
4852 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4853 		break;
4854 	case IPV6_FLOW:
4855 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4856 		break;
4857 	default:
4858 		return -EINVAL;
4859 	}
4860 
4861 	return 0;
4862 }
4863 
4864 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4865 			       struct ethtool_rxnfc *nfc)
4866 {
4867 	struct hclge_vport *vport = hclge_get_vport(handle);
4868 	struct hclge_dev *hdev = vport->back;
4869 	struct hclge_rss_input_tuple_cmd *req;
4870 	struct hclge_desc desc;
4871 	int ret;
4872 
4873 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4874 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4875 		return -EINVAL;
4876 
4877 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4878 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4879 
4880 	ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4881 	if (ret) {
4882 		dev_err(&hdev->pdev->dev,
4883 			"failed to init rss tuple cmd, ret = %d\n", ret);
4884 		return ret;
4885 	}
4886 
4887 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4888 	if (ret) {
4889 		dev_err(&hdev->pdev->dev,
4890 			"Set rss tuple fail, status = %d\n", ret);
4891 		return ret;
4892 	}
4893 
4894 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4895 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4896 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4897 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4898 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4899 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4900 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4901 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4902 	hclge_get_rss_type(vport);
4903 	return 0;
4904 }
4905 
4906 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4907 				     u8 *tuple_sets)
4908 {
4909 	switch (flow_type) {
4910 	case TCP_V4_FLOW:
4911 		*tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4912 		break;
4913 	case UDP_V4_FLOW:
4914 		*tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4915 		break;
4916 	case TCP_V6_FLOW:
4917 		*tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4918 		break;
4919 	case UDP_V6_FLOW:
4920 		*tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4921 		break;
4922 	case SCTP_V4_FLOW:
4923 		*tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4924 		break;
4925 	case SCTP_V6_FLOW:
4926 		*tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4927 		break;
4928 	case IPV4_FLOW:
4929 	case IPV6_FLOW:
4930 		*tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4931 		break;
4932 	default:
4933 		return -EINVAL;
4934 	}
4935 
4936 	return 0;
4937 }
4938 
4939 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4940 {
4941 	u64 tuple_data = 0;
4942 
4943 	if (tuple_sets & HCLGE_D_PORT_BIT)
4944 		tuple_data |= RXH_L4_B_2_3;
4945 	if (tuple_sets & HCLGE_S_PORT_BIT)
4946 		tuple_data |= RXH_L4_B_0_1;
4947 	if (tuple_sets & HCLGE_D_IP_BIT)
4948 		tuple_data |= RXH_IP_DST;
4949 	if (tuple_sets & HCLGE_S_IP_BIT)
4950 		tuple_data |= RXH_IP_SRC;
4951 
4952 	return tuple_data;
4953 }
4954 
4955 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4956 			       struct ethtool_rxnfc *nfc)
4957 {
4958 	struct hclge_vport *vport = hclge_get_vport(handle);
4959 	u8 tuple_sets;
4960 	int ret;
4961 
4962 	nfc->data = 0;
4963 
4964 	ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4965 	if (ret || !tuple_sets)
4966 		return ret;
4967 
4968 	nfc->data = hclge_convert_rss_tuple(tuple_sets);
4969 
4970 	return 0;
4971 }
4972 
4973 static int hclge_get_tc_size(struct hnae3_handle *handle)
4974 {
4975 	struct hclge_vport *vport = hclge_get_vport(handle);
4976 	struct hclge_dev *hdev = vport->back;
4977 
4978 	return hdev->pf_rss_size_max;
4979 }
4980 
4981 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4982 {
4983 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4984 	struct hclge_vport *vport = hdev->vport;
4985 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4986 	u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4987 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4988 	struct hnae3_tc_info *tc_info;
4989 	u16 roundup_size;
4990 	u16 rss_size;
4991 	int i;
4992 
4993 	tc_info = &vport->nic.kinfo.tc_info;
4994 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4995 		rss_size = tc_info->tqp_count[i];
4996 		tc_valid[i] = 0;
4997 
4998 		if (!(hdev->hw_tc_map & BIT(i)))
4999 			continue;
5000 
5001 		/* tc_size set to hardware is the log2 of roundup power of two
5002 		 * of rss_size, the acutal queue size is limited by indirection
5003 		 * table.
5004 		 */
5005 		if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
5006 		    rss_size == 0) {
5007 			dev_err(&hdev->pdev->dev,
5008 				"Configure rss tc size failed, invalid TC_SIZE = %u\n",
5009 				rss_size);
5010 			return -EINVAL;
5011 		}
5012 
5013 		roundup_size = roundup_pow_of_two(rss_size);
5014 		roundup_size = ilog2(roundup_size);
5015 
5016 		tc_valid[i] = 1;
5017 		tc_size[i] = roundup_size;
5018 		tc_offset[i] = tc_info->tqp_offset[i];
5019 	}
5020 
5021 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5022 }
5023 
5024 int hclge_rss_init_hw(struct hclge_dev *hdev)
5025 {
5026 	struct hclge_vport *vport = hdev->vport;
5027 	u16 *rss_indir = vport[0].rss_indirection_tbl;
5028 	u8 *key = vport[0].rss_hash_key;
5029 	u8 hfunc = vport[0].rss_algo;
5030 	int ret;
5031 
5032 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
5033 	if (ret)
5034 		return ret;
5035 
5036 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
5037 	if (ret)
5038 		return ret;
5039 
5040 	ret = hclge_set_rss_input_tuple(hdev);
5041 	if (ret)
5042 		return ret;
5043 
5044 	return hclge_init_rss_tc_mode(hdev);
5045 }
5046 
5047 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
5048 {
5049 	struct hclge_vport *vport = &hdev->vport[0];
5050 	int i;
5051 
5052 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
5053 		vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
5054 }
5055 
5056 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
5057 {
5058 	u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
5059 	int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
5060 	struct hclge_vport *vport = &hdev->vport[0];
5061 	u16 *rss_ind_tbl;
5062 
5063 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
5064 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
5065 
5066 	vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5067 	vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5068 	vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
5069 	vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5070 	vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5071 	vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5072 	vport->rss_tuple_sets.ipv6_sctp_en =
5073 		hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
5074 		HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
5075 		HCLGE_RSS_INPUT_TUPLE_SCTP;
5076 	vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5077 
5078 	vport->rss_algo = rss_algo;
5079 
5080 	rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
5081 				   sizeof(*rss_ind_tbl), GFP_KERNEL);
5082 	if (!rss_ind_tbl)
5083 		return -ENOMEM;
5084 
5085 	vport->rss_indirection_tbl = rss_ind_tbl;
5086 	memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
5087 
5088 	hclge_rss_indir_init_cfg(hdev);
5089 
5090 	return 0;
5091 }
5092 
5093 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5094 				int vector_id, bool en,
5095 				struct hnae3_ring_chain_node *ring_chain)
5096 {
5097 	struct hclge_dev *hdev = vport->back;
5098 	struct hnae3_ring_chain_node *node;
5099 	struct hclge_desc desc;
5100 	struct hclge_ctrl_vector_chain_cmd *req =
5101 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
5102 	enum hclge_cmd_status status;
5103 	enum hclge_opcode_type op;
5104 	u16 tqp_type_and_id;
5105 	int i;
5106 
5107 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5108 	hclge_cmd_setup_basic_desc(&desc, op, false);
5109 	req->int_vector_id_l = hnae3_get_field(vector_id,
5110 					       HCLGE_VECTOR_ID_L_M,
5111 					       HCLGE_VECTOR_ID_L_S);
5112 	req->int_vector_id_h = hnae3_get_field(vector_id,
5113 					       HCLGE_VECTOR_ID_H_M,
5114 					       HCLGE_VECTOR_ID_H_S);
5115 
5116 	i = 0;
5117 	for (node = ring_chain; node; node = node->next) {
5118 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5119 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
5120 				HCLGE_INT_TYPE_S,
5121 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5122 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5123 				HCLGE_TQP_ID_S, node->tqp_index);
5124 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5125 				HCLGE_INT_GL_IDX_S,
5126 				hnae3_get_field(node->int_gl_idx,
5127 						HNAE3_RING_GL_IDX_M,
5128 						HNAE3_RING_GL_IDX_S));
5129 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5130 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5131 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5132 			req->vfid = vport->vport_id;
5133 
5134 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
5135 			if (status) {
5136 				dev_err(&hdev->pdev->dev,
5137 					"Map TQP fail, status is %d.\n",
5138 					status);
5139 				return -EIO;
5140 			}
5141 			i = 0;
5142 
5143 			hclge_cmd_setup_basic_desc(&desc,
5144 						   op,
5145 						   false);
5146 			req->int_vector_id_l =
5147 				hnae3_get_field(vector_id,
5148 						HCLGE_VECTOR_ID_L_M,
5149 						HCLGE_VECTOR_ID_L_S);
5150 			req->int_vector_id_h =
5151 				hnae3_get_field(vector_id,
5152 						HCLGE_VECTOR_ID_H_M,
5153 						HCLGE_VECTOR_ID_H_S);
5154 		}
5155 	}
5156 
5157 	if (i > 0) {
5158 		req->int_cause_num = i;
5159 		req->vfid = vport->vport_id;
5160 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
5161 		if (status) {
5162 			dev_err(&hdev->pdev->dev,
5163 				"Map TQP fail, status is %d.\n", status);
5164 			return -EIO;
5165 		}
5166 	}
5167 
5168 	return 0;
5169 }
5170 
5171 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5172 				    struct hnae3_ring_chain_node *ring_chain)
5173 {
5174 	struct hclge_vport *vport = hclge_get_vport(handle);
5175 	struct hclge_dev *hdev = vport->back;
5176 	int vector_id;
5177 
5178 	vector_id = hclge_get_vector_index(hdev, vector);
5179 	if (vector_id < 0) {
5180 		dev_err(&hdev->pdev->dev,
5181 			"failed to get vector index. vector=%d\n", vector);
5182 		return vector_id;
5183 	}
5184 
5185 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5186 }
5187 
5188 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5189 				       struct hnae3_ring_chain_node *ring_chain)
5190 {
5191 	struct hclge_vport *vport = hclge_get_vport(handle);
5192 	struct hclge_dev *hdev = vport->back;
5193 	int vector_id, ret;
5194 
5195 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5196 		return 0;
5197 
5198 	vector_id = hclge_get_vector_index(hdev, vector);
5199 	if (vector_id < 0) {
5200 		dev_err(&handle->pdev->dev,
5201 			"Get vector index fail. ret =%d\n", vector_id);
5202 		return vector_id;
5203 	}
5204 
5205 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5206 	if (ret)
5207 		dev_err(&handle->pdev->dev,
5208 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5209 			vector_id, ret);
5210 
5211 	return ret;
5212 }
5213 
5214 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5215 				      bool en_uc, bool en_mc, bool en_bc)
5216 {
5217 	struct hclge_vport *vport = &hdev->vport[vf_id];
5218 	struct hnae3_handle *handle = &vport->nic;
5219 	struct hclge_promisc_cfg_cmd *req;
5220 	struct hclge_desc desc;
5221 	bool uc_tx_en = en_uc;
5222 	u8 promisc_cfg = 0;
5223 	int ret;
5224 
5225 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5226 
5227 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
5228 	req->vf_id = vf_id;
5229 
5230 	if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5231 		uc_tx_en = false;
5232 
5233 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5234 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5235 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5236 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5237 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5238 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5239 	req->extend_promisc = promisc_cfg;
5240 
5241 	/* to be compatible with DEVICE_VERSION_V1/2 */
5242 	promisc_cfg = 0;
5243 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5244 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5245 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5246 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5247 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5248 	req->promisc = promisc_cfg;
5249 
5250 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5251 	if (ret)
5252 		dev_err(&hdev->pdev->dev,
5253 			"failed to set vport %u promisc mode, ret = %d.\n",
5254 			vf_id, ret);
5255 
5256 	return ret;
5257 }
5258 
5259 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5260 				 bool en_mc_pmc, bool en_bc_pmc)
5261 {
5262 	return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5263 					  en_uc_pmc, en_mc_pmc, en_bc_pmc);
5264 }
5265 
5266 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5267 				  bool en_mc_pmc)
5268 {
5269 	struct hclge_vport *vport = hclge_get_vport(handle);
5270 	struct hclge_dev *hdev = vport->back;
5271 	bool en_bc_pmc = true;
5272 
5273 	/* For device whose version below V2, if broadcast promisc enabled,
5274 	 * vlan filter is always bypassed. So broadcast promisc should be
5275 	 * disabled until user enable promisc mode
5276 	 */
5277 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5278 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5279 
5280 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5281 					    en_bc_pmc);
5282 }
5283 
5284 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5285 {
5286 	struct hclge_vport *vport = hclge_get_vport(handle);
5287 
5288 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5289 }
5290 
5291 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5292 {
5293 	if (hlist_empty(&hdev->fd_rule_list))
5294 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5295 }
5296 
5297 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5298 {
5299 	if (!test_bit(location, hdev->fd_bmap)) {
5300 		set_bit(location, hdev->fd_bmap);
5301 		hdev->hclge_fd_rule_num++;
5302 	}
5303 }
5304 
5305 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5306 {
5307 	if (test_bit(location, hdev->fd_bmap)) {
5308 		clear_bit(location, hdev->fd_bmap);
5309 		hdev->hclge_fd_rule_num--;
5310 	}
5311 }
5312 
5313 static void hclge_fd_free_node(struct hclge_dev *hdev,
5314 			       struct hclge_fd_rule *rule)
5315 {
5316 	hlist_del(&rule->rule_node);
5317 	kfree(rule);
5318 	hclge_sync_fd_state(hdev);
5319 }
5320 
5321 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5322 				      struct hclge_fd_rule *old_rule,
5323 				      struct hclge_fd_rule *new_rule,
5324 				      enum HCLGE_FD_NODE_STATE state)
5325 {
5326 	switch (state) {
5327 	case HCLGE_FD_TO_ADD:
5328 	case HCLGE_FD_ACTIVE:
5329 		/* 1) if the new state is TO_ADD, just replace the old rule
5330 		 * with the same location, no matter its state, because the
5331 		 * new rule will be configured to the hardware.
5332 		 * 2) if the new state is ACTIVE, it means the new rule
5333 		 * has been configured to the hardware, so just replace
5334 		 * the old rule node with the same location.
5335 		 * 3) for it doesn't add a new node to the list, so it's
5336 		 * unnecessary to update the rule number and fd_bmap.
5337 		 */
5338 		new_rule->rule_node.next = old_rule->rule_node.next;
5339 		new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5340 		memcpy(old_rule, new_rule, sizeof(*old_rule));
5341 		kfree(new_rule);
5342 		break;
5343 	case HCLGE_FD_DELETED:
5344 		hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5345 		hclge_fd_free_node(hdev, old_rule);
5346 		break;
5347 	case HCLGE_FD_TO_DEL:
5348 		/* if new request is TO_DEL, and old rule is existent
5349 		 * 1) the state of old rule is TO_DEL, we need do nothing,
5350 		 * because we delete rule by location, other rule content
5351 		 * is unncessary.
5352 		 * 2) the state of old rule is ACTIVE, we need to change its
5353 		 * state to TO_DEL, so the rule will be deleted when periodic
5354 		 * task being scheduled.
5355 		 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5356 		 * been added to hardware, so we just delete the rule node from
5357 		 * fd_rule_list directly.
5358 		 */
5359 		if (old_rule->state == HCLGE_FD_TO_ADD) {
5360 			hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5361 			hclge_fd_free_node(hdev, old_rule);
5362 			return;
5363 		}
5364 		old_rule->state = HCLGE_FD_TO_DEL;
5365 		break;
5366 	}
5367 }
5368 
5369 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5370 						u16 location,
5371 						struct hclge_fd_rule **parent)
5372 {
5373 	struct hclge_fd_rule *rule;
5374 	struct hlist_node *node;
5375 
5376 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5377 		if (rule->location == location)
5378 			return rule;
5379 		else if (rule->location > location)
5380 			return NULL;
5381 		/* record the parent node, use to keep the nodes in fd_rule_list
5382 		 * in ascend order.
5383 		 */
5384 		*parent = rule;
5385 	}
5386 
5387 	return NULL;
5388 }
5389 
5390 /* insert fd rule node in ascend order according to rule->location */
5391 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5392 				      struct hclge_fd_rule *rule,
5393 				      struct hclge_fd_rule *parent)
5394 {
5395 	INIT_HLIST_NODE(&rule->rule_node);
5396 
5397 	if (parent)
5398 		hlist_add_behind(&rule->rule_node, &parent->rule_node);
5399 	else
5400 		hlist_add_head(&rule->rule_node, hlist);
5401 }
5402 
5403 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5404 				     struct hclge_fd_user_def_cfg *cfg)
5405 {
5406 	struct hclge_fd_user_def_cfg_cmd *req;
5407 	struct hclge_desc desc;
5408 	u16 data = 0;
5409 	int ret;
5410 
5411 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5412 
5413 	req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5414 
5415 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5416 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5417 			HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5418 	req->ol2_cfg = cpu_to_le16(data);
5419 
5420 	data = 0;
5421 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5422 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5423 			HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5424 	req->ol3_cfg = cpu_to_le16(data);
5425 
5426 	data = 0;
5427 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5428 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5429 			HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5430 	req->ol4_cfg = cpu_to_le16(data);
5431 
5432 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5433 	if (ret)
5434 		dev_err(&hdev->pdev->dev,
5435 			"failed to set fd user def data, ret= %d\n", ret);
5436 	return ret;
5437 }
5438 
5439 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5440 {
5441 	int ret;
5442 
5443 	if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5444 		return;
5445 
5446 	if (!locked)
5447 		spin_lock_bh(&hdev->fd_rule_lock);
5448 
5449 	ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5450 	if (ret)
5451 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5452 
5453 	if (!locked)
5454 		spin_unlock_bh(&hdev->fd_rule_lock);
5455 }
5456 
5457 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5458 					  struct hclge_fd_rule *rule)
5459 {
5460 	struct hlist_head *hlist = &hdev->fd_rule_list;
5461 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5462 	struct hclge_fd_user_def_info *info, *old_info;
5463 	struct hclge_fd_user_def_cfg *cfg;
5464 
5465 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5466 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5467 		return 0;
5468 
5469 	/* for valid layer is start from 1, so need minus 1 to get the cfg */
5470 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5471 	info = &rule->ep.user_def;
5472 
5473 	if (!cfg->ref_cnt || cfg->offset == info->offset)
5474 		return 0;
5475 
5476 	if (cfg->ref_cnt > 1)
5477 		goto error;
5478 
5479 	fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5480 	if (fd_rule) {
5481 		old_info = &fd_rule->ep.user_def;
5482 		if (info->layer == old_info->layer)
5483 			return 0;
5484 	}
5485 
5486 error:
5487 	dev_err(&hdev->pdev->dev,
5488 		"No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5489 		info->layer + 1);
5490 	return -ENOSPC;
5491 }
5492 
5493 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5494 					 struct hclge_fd_rule *rule)
5495 {
5496 	struct hclge_fd_user_def_cfg *cfg;
5497 
5498 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5499 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5500 		return;
5501 
5502 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5503 	if (!cfg->ref_cnt) {
5504 		cfg->offset = rule->ep.user_def.offset;
5505 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5506 	}
5507 	cfg->ref_cnt++;
5508 }
5509 
5510 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5511 					 struct hclge_fd_rule *rule)
5512 {
5513 	struct hclge_fd_user_def_cfg *cfg;
5514 
5515 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5516 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5517 		return;
5518 
5519 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5520 	if (!cfg->ref_cnt)
5521 		return;
5522 
5523 	cfg->ref_cnt--;
5524 	if (!cfg->ref_cnt) {
5525 		cfg->offset = 0;
5526 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5527 	}
5528 }
5529 
5530 static void hclge_update_fd_list(struct hclge_dev *hdev,
5531 				 enum HCLGE_FD_NODE_STATE state, u16 location,
5532 				 struct hclge_fd_rule *new_rule)
5533 {
5534 	struct hlist_head *hlist = &hdev->fd_rule_list;
5535 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5536 
5537 	fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5538 	if (fd_rule) {
5539 		hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5540 		if (state == HCLGE_FD_ACTIVE)
5541 			hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5542 		hclge_sync_fd_user_def_cfg(hdev, true);
5543 
5544 		hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5545 		return;
5546 	}
5547 
5548 	/* it's unlikely to fail here, because we have checked the rule
5549 	 * exist before.
5550 	 */
5551 	if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5552 		dev_warn(&hdev->pdev->dev,
5553 			 "failed to delete fd rule %u, it's inexistent\n",
5554 			 location);
5555 		return;
5556 	}
5557 
5558 	hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5559 	hclge_sync_fd_user_def_cfg(hdev, true);
5560 
5561 	hclge_fd_insert_rule_node(hlist, new_rule, parent);
5562 	hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5563 
5564 	if (state == HCLGE_FD_TO_ADD) {
5565 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5566 		hclge_task_schedule(hdev, 0);
5567 	}
5568 }
5569 
5570 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5571 {
5572 	struct hclge_get_fd_mode_cmd *req;
5573 	struct hclge_desc desc;
5574 	int ret;
5575 
5576 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5577 
5578 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
5579 
5580 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5581 	if (ret) {
5582 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5583 		return ret;
5584 	}
5585 
5586 	*fd_mode = req->mode;
5587 
5588 	return ret;
5589 }
5590 
5591 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5592 				   u32 *stage1_entry_num,
5593 				   u32 *stage2_entry_num,
5594 				   u16 *stage1_counter_num,
5595 				   u16 *stage2_counter_num)
5596 {
5597 	struct hclge_get_fd_allocation_cmd *req;
5598 	struct hclge_desc desc;
5599 	int ret;
5600 
5601 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5602 
5603 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5604 
5605 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5606 	if (ret) {
5607 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5608 			ret);
5609 		return ret;
5610 	}
5611 
5612 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5613 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5614 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5615 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5616 
5617 	return ret;
5618 }
5619 
5620 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5621 				   enum HCLGE_FD_STAGE stage_num)
5622 {
5623 	struct hclge_set_fd_key_config_cmd *req;
5624 	struct hclge_fd_key_cfg *stage;
5625 	struct hclge_desc desc;
5626 	int ret;
5627 
5628 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5629 
5630 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5631 	stage = &hdev->fd_cfg.key_cfg[stage_num];
5632 	req->stage = stage_num;
5633 	req->key_select = stage->key_sel;
5634 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5635 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5636 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5637 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5638 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5639 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5640 
5641 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5642 	if (ret)
5643 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5644 
5645 	return ret;
5646 }
5647 
5648 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5649 {
5650 	struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5651 
5652 	spin_lock_bh(&hdev->fd_rule_lock);
5653 	memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5654 	spin_unlock_bh(&hdev->fd_rule_lock);
5655 
5656 	hclge_fd_set_user_def_cmd(hdev, cfg);
5657 }
5658 
5659 static int hclge_init_fd_config(struct hclge_dev *hdev)
5660 {
5661 #define LOW_2_WORDS		0x03
5662 	struct hclge_fd_key_cfg *key_cfg;
5663 	int ret;
5664 
5665 	if (!hnae3_dev_fd_supported(hdev))
5666 		return 0;
5667 
5668 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5669 	if (ret)
5670 		return ret;
5671 
5672 	switch (hdev->fd_cfg.fd_mode) {
5673 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5674 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5675 		break;
5676 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5677 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5678 		break;
5679 	default:
5680 		dev_err(&hdev->pdev->dev,
5681 			"Unsupported flow director mode %u\n",
5682 			hdev->fd_cfg.fd_mode);
5683 		return -EOPNOTSUPP;
5684 	}
5685 
5686 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5687 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5688 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5689 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5690 	key_cfg->outer_sipv6_word_en = 0;
5691 	key_cfg->outer_dipv6_word_en = 0;
5692 
5693 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5694 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5695 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5696 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5697 
5698 	/* If use max 400bit key, we can support tuples for ether type */
5699 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5700 		key_cfg->tuple_active |=
5701 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5702 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5703 			key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5704 	}
5705 
5706 	/* roce_type is used to filter roce frames
5707 	 * dst_vport is used to specify the rule
5708 	 */
5709 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5710 
5711 	ret = hclge_get_fd_allocation(hdev,
5712 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5713 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5714 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5715 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5716 	if (ret)
5717 		return ret;
5718 
5719 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5720 }
5721 
5722 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5723 				int loc, u8 *key, bool is_add)
5724 {
5725 	struct hclge_fd_tcam_config_1_cmd *req1;
5726 	struct hclge_fd_tcam_config_2_cmd *req2;
5727 	struct hclge_fd_tcam_config_3_cmd *req3;
5728 	struct hclge_desc desc[3];
5729 	int ret;
5730 
5731 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5732 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5733 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5734 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5735 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5736 
5737 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5738 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5739 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5740 
5741 	req1->stage = stage;
5742 	req1->xy_sel = sel_x ? 1 : 0;
5743 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5744 	req1->index = cpu_to_le32(loc);
5745 	req1->entry_vld = sel_x ? is_add : 0;
5746 
5747 	if (key) {
5748 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5749 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5750 		       sizeof(req2->tcam_data));
5751 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5752 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5753 	}
5754 
5755 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
5756 	if (ret)
5757 		dev_err(&hdev->pdev->dev,
5758 			"config tcam key fail, ret=%d\n",
5759 			ret);
5760 
5761 	return ret;
5762 }
5763 
5764 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5765 			      struct hclge_fd_ad_data *action)
5766 {
5767 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5768 	struct hclge_fd_ad_config_cmd *req;
5769 	struct hclge_desc desc;
5770 	u64 ad_data = 0;
5771 	int ret;
5772 
5773 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5774 
5775 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
5776 	req->index = cpu_to_le32(loc);
5777 	req->stage = stage;
5778 
5779 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5780 		      action->write_rule_id_to_bd);
5781 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5782 			action->rule_id);
5783 	if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5784 		hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5785 			      action->override_tc);
5786 		hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5787 				HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5788 	}
5789 	ad_data <<= 32;
5790 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5791 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5792 		      action->forward_to_direct_queue);
5793 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5794 			action->queue_id);
5795 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5796 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5797 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5798 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5799 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5800 			action->counter_id);
5801 
5802 	req->ad_data = cpu_to_le64(ad_data);
5803 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5804 	if (ret)
5805 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5806 
5807 	return ret;
5808 }
5809 
5810 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5811 				   struct hclge_fd_rule *rule)
5812 {
5813 	int offset, moffset, ip_offset;
5814 	enum HCLGE_FD_KEY_OPT key_opt;
5815 	u16 tmp_x_s, tmp_y_s;
5816 	u32 tmp_x_l, tmp_y_l;
5817 	u8 *p = (u8 *)rule;
5818 	int i;
5819 
5820 	if (rule->unused_tuple & BIT(tuple_bit))
5821 		return true;
5822 
5823 	key_opt = tuple_key_info[tuple_bit].key_opt;
5824 	offset = tuple_key_info[tuple_bit].offset;
5825 	moffset = tuple_key_info[tuple_bit].moffset;
5826 
5827 	switch (key_opt) {
5828 	case KEY_OPT_U8:
5829 		calc_x(*key_x, p[offset], p[moffset]);
5830 		calc_y(*key_y, p[offset], p[moffset]);
5831 
5832 		return true;
5833 	case KEY_OPT_LE16:
5834 		calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5835 		calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5836 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5837 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5838 
5839 		return true;
5840 	case KEY_OPT_LE32:
5841 		calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5842 		calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5843 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5844 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5845 
5846 		return true;
5847 	case KEY_OPT_MAC:
5848 		for (i = 0; i < ETH_ALEN; i++) {
5849 			calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5850 			       p[moffset + i]);
5851 			calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5852 			       p[moffset + i]);
5853 		}
5854 
5855 		return true;
5856 	case KEY_OPT_IP:
5857 		ip_offset = IPV4_INDEX * sizeof(u32);
5858 		calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5859 		       *(u32 *)(&p[moffset + ip_offset]));
5860 		calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5861 		       *(u32 *)(&p[moffset + ip_offset]));
5862 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5863 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5864 
5865 		return true;
5866 	default:
5867 		return false;
5868 	}
5869 }
5870 
5871 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5872 				 u8 vf_id, u8 network_port_id)
5873 {
5874 	u32 port_number = 0;
5875 
5876 	if (port_type == HOST_PORT) {
5877 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5878 				pf_id);
5879 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5880 				vf_id);
5881 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5882 	} else {
5883 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5884 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5885 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5886 	}
5887 
5888 	return port_number;
5889 }
5890 
5891 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5892 				       __le32 *key_x, __le32 *key_y,
5893 				       struct hclge_fd_rule *rule)
5894 {
5895 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5896 	u8 cur_pos = 0, tuple_size, shift_bits;
5897 	unsigned int i;
5898 
5899 	for (i = 0; i < MAX_META_DATA; i++) {
5900 		tuple_size = meta_data_key_info[i].key_length;
5901 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5902 
5903 		switch (tuple_bit) {
5904 		case BIT(ROCE_TYPE):
5905 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5906 			cur_pos += tuple_size;
5907 			break;
5908 		case BIT(DST_VPORT):
5909 			port_number = hclge_get_port_number(HOST_PORT, 0,
5910 							    rule->vf_id, 0);
5911 			hnae3_set_field(meta_data,
5912 					GENMASK(cur_pos + tuple_size, cur_pos),
5913 					cur_pos, port_number);
5914 			cur_pos += tuple_size;
5915 			break;
5916 		default:
5917 			break;
5918 		}
5919 	}
5920 
5921 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5922 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5923 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5924 
5925 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5926 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5927 }
5928 
5929 /* A complete key is combined with meta data key and tuple key.
5930  * Meta data key is stored at the MSB region, and tuple key is stored at
5931  * the LSB region, unused bits will be filled 0.
5932  */
5933 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5934 			    struct hclge_fd_rule *rule)
5935 {
5936 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5937 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5938 	u8 *cur_key_x, *cur_key_y;
5939 	u8 meta_data_region;
5940 	u8 tuple_size;
5941 	int ret;
5942 	u32 i;
5943 
5944 	memset(key_x, 0, sizeof(key_x));
5945 	memset(key_y, 0, sizeof(key_y));
5946 	cur_key_x = key_x;
5947 	cur_key_y = key_y;
5948 
5949 	for (i = 0; i < MAX_TUPLE; i++) {
5950 		bool tuple_valid;
5951 
5952 		tuple_size = tuple_key_info[i].key_length / 8;
5953 		if (!(key_cfg->tuple_active & BIT(i)))
5954 			continue;
5955 
5956 		tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5957 						     cur_key_y, rule);
5958 		if (tuple_valid) {
5959 			cur_key_x += tuple_size;
5960 			cur_key_y += tuple_size;
5961 		}
5962 	}
5963 
5964 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5965 			MAX_META_DATA_LENGTH / 8;
5966 
5967 	hclge_fd_convert_meta_data(key_cfg,
5968 				   (__le32 *)(key_x + meta_data_region),
5969 				   (__le32 *)(key_y + meta_data_region),
5970 				   rule);
5971 
5972 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5973 				   true);
5974 	if (ret) {
5975 		dev_err(&hdev->pdev->dev,
5976 			"fd key_y config fail, loc=%u, ret=%d\n",
5977 			rule->queue_id, ret);
5978 		return ret;
5979 	}
5980 
5981 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5982 				   true);
5983 	if (ret)
5984 		dev_err(&hdev->pdev->dev,
5985 			"fd key_x config fail, loc=%u, ret=%d\n",
5986 			rule->queue_id, ret);
5987 	return ret;
5988 }
5989 
5990 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5991 			       struct hclge_fd_rule *rule)
5992 {
5993 	struct hclge_vport *vport = hdev->vport;
5994 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5995 	struct hclge_fd_ad_data ad_data;
5996 
5997 	memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5998 	ad_data.ad_id = rule->location;
5999 
6000 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6001 		ad_data.drop_packet = true;
6002 	} else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
6003 		ad_data.override_tc = true;
6004 		ad_data.queue_id =
6005 			kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
6006 		ad_data.tc_size =
6007 			ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
6008 	} else {
6009 		ad_data.forward_to_direct_queue = true;
6010 		ad_data.queue_id = rule->queue_id;
6011 	}
6012 
6013 	if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
6014 		ad_data.use_counter = true;
6015 		ad_data.counter_id = rule->vf_id %
6016 				     hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
6017 	} else {
6018 		ad_data.use_counter = false;
6019 		ad_data.counter_id = 0;
6020 	}
6021 
6022 	ad_data.use_next_stage = false;
6023 	ad_data.next_input_key = 0;
6024 
6025 	ad_data.write_rule_id_to_bd = true;
6026 	ad_data.rule_id = rule->location;
6027 
6028 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
6029 }
6030 
6031 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
6032 				       u32 *unused_tuple)
6033 {
6034 	if (!spec || !unused_tuple)
6035 		return -EINVAL;
6036 
6037 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6038 
6039 	if (!spec->ip4src)
6040 		*unused_tuple |= BIT(INNER_SRC_IP);
6041 
6042 	if (!spec->ip4dst)
6043 		*unused_tuple |= BIT(INNER_DST_IP);
6044 
6045 	if (!spec->psrc)
6046 		*unused_tuple |= BIT(INNER_SRC_PORT);
6047 
6048 	if (!spec->pdst)
6049 		*unused_tuple |= BIT(INNER_DST_PORT);
6050 
6051 	if (!spec->tos)
6052 		*unused_tuple |= BIT(INNER_IP_TOS);
6053 
6054 	return 0;
6055 }
6056 
6057 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
6058 				    u32 *unused_tuple)
6059 {
6060 	if (!spec || !unused_tuple)
6061 		return -EINVAL;
6062 
6063 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6064 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6065 
6066 	if (!spec->ip4src)
6067 		*unused_tuple |= BIT(INNER_SRC_IP);
6068 
6069 	if (!spec->ip4dst)
6070 		*unused_tuple |= BIT(INNER_DST_IP);
6071 
6072 	if (!spec->tos)
6073 		*unused_tuple |= BIT(INNER_IP_TOS);
6074 
6075 	if (!spec->proto)
6076 		*unused_tuple |= BIT(INNER_IP_PROTO);
6077 
6078 	if (spec->l4_4_bytes)
6079 		return -EOPNOTSUPP;
6080 
6081 	if (spec->ip_ver != ETH_RX_NFC_IP4)
6082 		return -EOPNOTSUPP;
6083 
6084 	return 0;
6085 }
6086 
6087 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
6088 				       u32 *unused_tuple)
6089 {
6090 	if (!spec || !unused_tuple)
6091 		return -EINVAL;
6092 
6093 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6094 
6095 	/* check whether src/dst ip address used */
6096 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6097 		*unused_tuple |= BIT(INNER_SRC_IP);
6098 
6099 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6100 		*unused_tuple |= BIT(INNER_DST_IP);
6101 
6102 	if (!spec->psrc)
6103 		*unused_tuple |= BIT(INNER_SRC_PORT);
6104 
6105 	if (!spec->pdst)
6106 		*unused_tuple |= BIT(INNER_DST_PORT);
6107 
6108 	if (!spec->tclass)
6109 		*unused_tuple |= BIT(INNER_IP_TOS);
6110 
6111 	return 0;
6112 }
6113 
6114 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6115 				    u32 *unused_tuple)
6116 {
6117 	if (!spec || !unused_tuple)
6118 		return -EINVAL;
6119 
6120 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6121 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6122 
6123 	/* check whether src/dst ip address used */
6124 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6125 		*unused_tuple |= BIT(INNER_SRC_IP);
6126 
6127 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6128 		*unused_tuple |= BIT(INNER_DST_IP);
6129 
6130 	if (!spec->l4_proto)
6131 		*unused_tuple |= BIT(INNER_IP_PROTO);
6132 
6133 	if (!spec->tclass)
6134 		*unused_tuple |= BIT(INNER_IP_TOS);
6135 
6136 	if (spec->l4_4_bytes)
6137 		return -EOPNOTSUPP;
6138 
6139 	return 0;
6140 }
6141 
6142 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6143 {
6144 	if (!spec || !unused_tuple)
6145 		return -EINVAL;
6146 
6147 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6148 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6149 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6150 
6151 	if (is_zero_ether_addr(spec->h_source))
6152 		*unused_tuple |= BIT(INNER_SRC_MAC);
6153 
6154 	if (is_zero_ether_addr(spec->h_dest))
6155 		*unused_tuple |= BIT(INNER_DST_MAC);
6156 
6157 	if (!spec->h_proto)
6158 		*unused_tuple |= BIT(INNER_ETH_TYPE);
6159 
6160 	return 0;
6161 }
6162 
6163 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6164 				    struct ethtool_rx_flow_spec *fs,
6165 				    u32 *unused_tuple)
6166 {
6167 	if (fs->flow_type & FLOW_EXT) {
6168 		if (fs->h_ext.vlan_etype) {
6169 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6170 			return -EOPNOTSUPP;
6171 		}
6172 
6173 		if (!fs->h_ext.vlan_tci)
6174 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6175 
6176 		if (fs->m_ext.vlan_tci &&
6177 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6178 			dev_err(&hdev->pdev->dev,
6179 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6180 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6181 			return -EINVAL;
6182 		}
6183 	} else {
6184 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6185 	}
6186 
6187 	if (fs->flow_type & FLOW_MAC_EXT) {
6188 		if (hdev->fd_cfg.fd_mode !=
6189 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6190 			dev_err(&hdev->pdev->dev,
6191 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
6192 			return -EOPNOTSUPP;
6193 		}
6194 
6195 		if (is_zero_ether_addr(fs->h_ext.h_dest))
6196 			*unused_tuple |= BIT(INNER_DST_MAC);
6197 		else
6198 			*unused_tuple &= ~BIT(INNER_DST_MAC);
6199 	}
6200 
6201 	return 0;
6202 }
6203 
6204 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6205 				       struct hclge_fd_user_def_info *info)
6206 {
6207 	switch (flow_type) {
6208 	case ETHER_FLOW:
6209 		info->layer = HCLGE_FD_USER_DEF_L2;
6210 		*unused_tuple &= ~BIT(INNER_L2_RSV);
6211 		break;
6212 	case IP_USER_FLOW:
6213 	case IPV6_USER_FLOW:
6214 		info->layer = HCLGE_FD_USER_DEF_L3;
6215 		*unused_tuple &= ~BIT(INNER_L3_RSV);
6216 		break;
6217 	case TCP_V4_FLOW:
6218 	case UDP_V4_FLOW:
6219 	case TCP_V6_FLOW:
6220 	case UDP_V6_FLOW:
6221 		info->layer = HCLGE_FD_USER_DEF_L4;
6222 		*unused_tuple &= ~BIT(INNER_L4_RSV);
6223 		break;
6224 	default:
6225 		return -EOPNOTSUPP;
6226 	}
6227 
6228 	return 0;
6229 }
6230 
6231 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6232 {
6233 	return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6234 }
6235 
6236 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6237 					 struct ethtool_rx_flow_spec *fs,
6238 					 u32 *unused_tuple,
6239 					 struct hclge_fd_user_def_info *info)
6240 {
6241 	u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6242 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6243 	u16 data, offset, data_mask, offset_mask;
6244 	int ret;
6245 
6246 	info->layer = HCLGE_FD_USER_DEF_NONE;
6247 	*unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6248 
6249 	if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6250 		return 0;
6251 
6252 	/* user-def data from ethtool is 64 bit value, the bit0~15 is used
6253 	 * for data, and bit32~47 is used for offset.
6254 	 */
6255 	data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6256 	data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6257 	offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6258 	offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6259 
6260 	if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6261 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6262 		return -EOPNOTSUPP;
6263 	}
6264 
6265 	if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6266 		dev_err(&hdev->pdev->dev,
6267 			"user-def offset[%u] should be no more than %u\n",
6268 			offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6269 		return -EINVAL;
6270 	}
6271 
6272 	if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6273 		dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6274 		return -EINVAL;
6275 	}
6276 
6277 	ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6278 	if (ret) {
6279 		dev_err(&hdev->pdev->dev,
6280 			"unsupported flow type for user-def bytes, ret = %d\n",
6281 			ret);
6282 		return ret;
6283 	}
6284 
6285 	info->data = data;
6286 	info->data_mask = data_mask;
6287 	info->offset = offset;
6288 
6289 	return 0;
6290 }
6291 
6292 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6293 			       struct ethtool_rx_flow_spec *fs,
6294 			       u32 *unused_tuple,
6295 			       struct hclge_fd_user_def_info *info)
6296 {
6297 	u32 flow_type;
6298 	int ret;
6299 
6300 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6301 		dev_err(&hdev->pdev->dev,
6302 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
6303 			fs->location,
6304 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6305 		return -EINVAL;
6306 	}
6307 
6308 	ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6309 	if (ret)
6310 		return ret;
6311 
6312 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6313 	switch (flow_type) {
6314 	case SCTP_V4_FLOW:
6315 	case TCP_V4_FLOW:
6316 	case UDP_V4_FLOW:
6317 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6318 						  unused_tuple);
6319 		break;
6320 	case IP_USER_FLOW:
6321 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6322 					       unused_tuple);
6323 		break;
6324 	case SCTP_V6_FLOW:
6325 	case TCP_V6_FLOW:
6326 	case UDP_V6_FLOW:
6327 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6328 						  unused_tuple);
6329 		break;
6330 	case IPV6_USER_FLOW:
6331 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6332 					       unused_tuple);
6333 		break;
6334 	case ETHER_FLOW:
6335 		if (hdev->fd_cfg.fd_mode !=
6336 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6337 			dev_err(&hdev->pdev->dev,
6338 				"ETHER_FLOW is not supported in current fd mode!\n");
6339 			return -EOPNOTSUPP;
6340 		}
6341 
6342 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6343 						 unused_tuple);
6344 		break;
6345 	default:
6346 		dev_err(&hdev->pdev->dev,
6347 			"unsupported protocol type, protocol type = %#x\n",
6348 			flow_type);
6349 		return -EOPNOTSUPP;
6350 	}
6351 
6352 	if (ret) {
6353 		dev_err(&hdev->pdev->dev,
6354 			"failed to check flow union tuple, ret = %d\n",
6355 			ret);
6356 		return ret;
6357 	}
6358 
6359 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6360 }
6361 
6362 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6363 				      struct ethtool_rx_flow_spec *fs,
6364 				      struct hclge_fd_rule *rule, u8 ip_proto)
6365 {
6366 	rule->tuples.src_ip[IPV4_INDEX] =
6367 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6368 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6369 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6370 
6371 	rule->tuples.dst_ip[IPV4_INDEX] =
6372 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6373 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6374 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6375 
6376 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6377 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6378 
6379 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6380 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6381 
6382 	rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6383 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6384 
6385 	rule->tuples.ether_proto = ETH_P_IP;
6386 	rule->tuples_mask.ether_proto = 0xFFFF;
6387 
6388 	rule->tuples.ip_proto = ip_proto;
6389 	rule->tuples_mask.ip_proto = 0xFF;
6390 }
6391 
6392 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6393 				   struct ethtool_rx_flow_spec *fs,
6394 				   struct hclge_fd_rule *rule)
6395 {
6396 	rule->tuples.src_ip[IPV4_INDEX] =
6397 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6398 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6399 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6400 
6401 	rule->tuples.dst_ip[IPV4_INDEX] =
6402 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6403 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6404 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6405 
6406 	rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6407 	rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6408 
6409 	rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6410 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6411 
6412 	rule->tuples.ether_proto = ETH_P_IP;
6413 	rule->tuples_mask.ether_proto = 0xFFFF;
6414 }
6415 
6416 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6417 				      struct ethtool_rx_flow_spec *fs,
6418 				      struct hclge_fd_rule *rule, u8 ip_proto)
6419 {
6420 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6421 			  IPV6_SIZE);
6422 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6423 			  IPV6_SIZE);
6424 
6425 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6426 			  IPV6_SIZE);
6427 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6428 			  IPV6_SIZE);
6429 
6430 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6431 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6432 
6433 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6434 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6435 
6436 	rule->tuples.ether_proto = ETH_P_IPV6;
6437 	rule->tuples_mask.ether_proto = 0xFFFF;
6438 
6439 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6440 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6441 
6442 	rule->tuples.ip_proto = ip_proto;
6443 	rule->tuples_mask.ip_proto = 0xFF;
6444 }
6445 
6446 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6447 				   struct ethtool_rx_flow_spec *fs,
6448 				   struct hclge_fd_rule *rule)
6449 {
6450 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6451 			  IPV6_SIZE);
6452 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6453 			  IPV6_SIZE);
6454 
6455 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6456 			  IPV6_SIZE);
6457 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6458 			  IPV6_SIZE);
6459 
6460 	rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6461 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6462 
6463 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6464 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6465 
6466 	rule->tuples.ether_proto = ETH_P_IPV6;
6467 	rule->tuples_mask.ether_proto = 0xFFFF;
6468 }
6469 
6470 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6471 				     struct ethtool_rx_flow_spec *fs,
6472 				     struct hclge_fd_rule *rule)
6473 {
6474 	ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6475 	ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6476 
6477 	ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6478 	ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6479 
6480 	rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6481 	rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6482 }
6483 
6484 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6485 					struct hclge_fd_rule *rule)
6486 {
6487 	switch (info->layer) {
6488 	case HCLGE_FD_USER_DEF_L2:
6489 		rule->tuples.l2_user_def = info->data;
6490 		rule->tuples_mask.l2_user_def = info->data_mask;
6491 		break;
6492 	case HCLGE_FD_USER_DEF_L3:
6493 		rule->tuples.l3_user_def = info->data;
6494 		rule->tuples_mask.l3_user_def = info->data_mask;
6495 		break;
6496 	case HCLGE_FD_USER_DEF_L4:
6497 		rule->tuples.l4_user_def = (u32)info->data << 16;
6498 		rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6499 		break;
6500 	default:
6501 		break;
6502 	}
6503 
6504 	rule->ep.user_def = *info;
6505 }
6506 
6507 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6508 			      struct ethtool_rx_flow_spec *fs,
6509 			      struct hclge_fd_rule *rule,
6510 			      struct hclge_fd_user_def_info *info)
6511 {
6512 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6513 
6514 	switch (flow_type) {
6515 	case SCTP_V4_FLOW:
6516 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6517 		break;
6518 	case TCP_V4_FLOW:
6519 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6520 		break;
6521 	case UDP_V4_FLOW:
6522 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6523 		break;
6524 	case IP_USER_FLOW:
6525 		hclge_fd_get_ip4_tuple(hdev, fs, rule);
6526 		break;
6527 	case SCTP_V6_FLOW:
6528 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6529 		break;
6530 	case TCP_V6_FLOW:
6531 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6532 		break;
6533 	case UDP_V6_FLOW:
6534 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6535 		break;
6536 	case IPV6_USER_FLOW:
6537 		hclge_fd_get_ip6_tuple(hdev, fs, rule);
6538 		break;
6539 	case ETHER_FLOW:
6540 		hclge_fd_get_ether_tuple(hdev, fs, rule);
6541 		break;
6542 	default:
6543 		return -EOPNOTSUPP;
6544 	}
6545 
6546 	if (fs->flow_type & FLOW_EXT) {
6547 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6548 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6549 		hclge_fd_get_user_def_tuple(info, rule);
6550 	}
6551 
6552 	if (fs->flow_type & FLOW_MAC_EXT) {
6553 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6554 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6555 	}
6556 
6557 	return 0;
6558 }
6559 
6560 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6561 				struct hclge_fd_rule *rule)
6562 {
6563 	int ret;
6564 
6565 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6566 	if (ret)
6567 		return ret;
6568 
6569 	return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6570 }
6571 
6572 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6573 				     struct hclge_fd_rule *rule)
6574 {
6575 	int ret;
6576 
6577 	spin_lock_bh(&hdev->fd_rule_lock);
6578 
6579 	if (hdev->fd_active_type != rule->rule_type &&
6580 	    (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6581 	     hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6582 		dev_err(&hdev->pdev->dev,
6583 			"mode conflict(new type %d, active type %d), please delete existent rules first\n",
6584 			rule->rule_type, hdev->fd_active_type);
6585 		spin_unlock_bh(&hdev->fd_rule_lock);
6586 		return -EINVAL;
6587 	}
6588 
6589 	ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6590 	if (ret)
6591 		goto out;
6592 
6593 	ret = hclge_clear_arfs_rules(hdev);
6594 	if (ret)
6595 		goto out;
6596 
6597 	ret = hclge_fd_config_rule(hdev, rule);
6598 	if (ret)
6599 		goto out;
6600 
6601 	rule->state = HCLGE_FD_ACTIVE;
6602 	hdev->fd_active_type = rule->rule_type;
6603 	hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6604 
6605 out:
6606 	spin_unlock_bh(&hdev->fd_rule_lock);
6607 	return ret;
6608 }
6609 
6610 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6611 {
6612 	struct hclge_vport *vport = hclge_get_vport(handle);
6613 	struct hclge_dev *hdev = vport->back;
6614 
6615 	return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6616 }
6617 
6618 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6619 				      u16 *vport_id, u8 *action, u16 *queue_id)
6620 {
6621 	struct hclge_vport *vport = hdev->vport;
6622 
6623 	if (ring_cookie == RX_CLS_FLOW_DISC) {
6624 		*action = HCLGE_FD_ACTION_DROP_PACKET;
6625 	} else {
6626 		u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6627 		u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6628 		u16 tqps;
6629 
6630 		if (vf > hdev->num_req_vfs) {
6631 			dev_err(&hdev->pdev->dev,
6632 				"Error: vf id (%u) > max vf num (%u)\n",
6633 				vf, hdev->num_req_vfs);
6634 			return -EINVAL;
6635 		}
6636 
6637 		*vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6638 		tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6639 
6640 		if (ring >= tqps) {
6641 			dev_err(&hdev->pdev->dev,
6642 				"Error: queue id (%u) > max tqp num (%u)\n",
6643 				ring, tqps - 1);
6644 			return -EINVAL;
6645 		}
6646 
6647 		*action = HCLGE_FD_ACTION_SELECT_QUEUE;
6648 		*queue_id = ring;
6649 	}
6650 
6651 	return 0;
6652 }
6653 
6654 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6655 			      struct ethtool_rxnfc *cmd)
6656 {
6657 	struct hclge_vport *vport = hclge_get_vport(handle);
6658 	struct hclge_dev *hdev = vport->back;
6659 	struct hclge_fd_user_def_info info;
6660 	u16 dst_vport_id = 0, q_index = 0;
6661 	struct ethtool_rx_flow_spec *fs;
6662 	struct hclge_fd_rule *rule;
6663 	u32 unused = 0;
6664 	u8 action;
6665 	int ret;
6666 
6667 	if (!hnae3_dev_fd_supported(hdev)) {
6668 		dev_err(&hdev->pdev->dev,
6669 			"flow table director is not supported\n");
6670 		return -EOPNOTSUPP;
6671 	}
6672 
6673 	if (!hdev->fd_en) {
6674 		dev_err(&hdev->pdev->dev,
6675 			"please enable flow director first\n");
6676 		return -EOPNOTSUPP;
6677 	}
6678 
6679 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6680 
6681 	ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6682 	if (ret)
6683 		return ret;
6684 
6685 	ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6686 					 &action, &q_index);
6687 	if (ret)
6688 		return ret;
6689 
6690 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6691 	if (!rule)
6692 		return -ENOMEM;
6693 
6694 	ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6695 	if (ret) {
6696 		kfree(rule);
6697 		return ret;
6698 	}
6699 
6700 	rule->flow_type = fs->flow_type;
6701 	rule->location = fs->location;
6702 	rule->unused_tuple = unused;
6703 	rule->vf_id = dst_vport_id;
6704 	rule->queue_id = q_index;
6705 	rule->action = action;
6706 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
6707 
6708 	ret = hclge_add_fd_entry_common(hdev, rule);
6709 	if (ret)
6710 		kfree(rule);
6711 
6712 	return ret;
6713 }
6714 
6715 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6716 			      struct ethtool_rxnfc *cmd)
6717 {
6718 	struct hclge_vport *vport = hclge_get_vport(handle);
6719 	struct hclge_dev *hdev = vport->back;
6720 	struct ethtool_rx_flow_spec *fs;
6721 	int ret;
6722 
6723 	if (!hnae3_dev_fd_supported(hdev))
6724 		return -EOPNOTSUPP;
6725 
6726 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6727 
6728 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6729 		return -EINVAL;
6730 
6731 	spin_lock_bh(&hdev->fd_rule_lock);
6732 	if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6733 	    !test_bit(fs->location, hdev->fd_bmap)) {
6734 		dev_err(&hdev->pdev->dev,
6735 			"Delete fail, rule %u is inexistent\n", fs->location);
6736 		spin_unlock_bh(&hdev->fd_rule_lock);
6737 		return -ENOENT;
6738 	}
6739 
6740 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6741 				   NULL, false);
6742 	if (ret)
6743 		goto out;
6744 
6745 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6746 
6747 out:
6748 	spin_unlock_bh(&hdev->fd_rule_lock);
6749 	return ret;
6750 }
6751 
6752 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6753 					 bool clear_list)
6754 {
6755 	struct hclge_fd_rule *rule;
6756 	struct hlist_node *node;
6757 	u16 location;
6758 
6759 	if (!hnae3_dev_fd_supported(hdev))
6760 		return;
6761 
6762 	spin_lock_bh(&hdev->fd_rule_lock);
6763 
6764 	for_each_set_bit(location, hdev->fd_bmap,
6765 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6766 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6767 				     NULL, false);
6768 
6769 	if (clear_list) {
6770 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6771 					  rule_node) {
6772 			hlist_del(&rule->rule_node);
6773 			kfree(rule);
6774 		}
6775 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6776 		hdev->hclge_fd_rule_num = 0;
6777 		bitmap_zero(hdev->fd_bmap,
6778 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6779 	}
6780 
6781 	spin_unlock_bh(&hdev->fd_rule_lock);
6782 }
6783 
6784 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6785 {
6786 	hclge_clear_fd_rules_in_list(hdev, true);
6787 	hclge_fd_disable_user_def(hdev);
6788 }
6789 
6790 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6791 {
6792 	struct hclge_vport *vport = hclge_get_vport(handle);
6793 	struct hclge_dev *hdev = vport->back;
6794 	struct hclge_fd_rule *rule;
6795 	struct hlist_node *node;
6796 
6797 	/* Return ok here, because reset error handling will check this
6798 	 * return value. If error is returned here, the reset process will
6799 	 * fail.
6800 	 */
6801 	if (!hnae3_dev_fd_supported(hdev))
6802 		return 0;
6803 
6804 	/* if fd is disabled, should not restore it when reset */
6805 	if (!hdev->fd_en)
6806 		return 0;
6807 
6808 	spin_lock_bh(&hdev->fd_rule_lock);
6809 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6810 		if (rule->state == HCLGE_FD_ACTIVE)
6811 			rule->state = HCLGE_FD_TO_ADD;
6812 	}
6813 	spin_unlock_bh(&hdev->fd_rule_lock);
6814 	set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6815 
6816 	return 0;
6817 }
6818 
6819 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6820 				 struct ethtool_rxnfc *cmd)
6821 {
6822 	struct hclge_vport *vport = hclge_get_vport(handle);
6823 	struct hclge_dev *hdev = vport->back;
6824 
6825 	if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6826 		return -EOPNOTSUPP;
6827 
6828 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
6829 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6830 
6831 	return 0;
6832 }
6833 
6834 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6835 				     struct ethtool_tcpip4_spec *spec,
6836 				     struct ethtool_tcpip4_spec *spec_mask)
6837 {
6838 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6839 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6840 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6841 
6842 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6843 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6844 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6845 
6846 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6847 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6848 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6849 
6850 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6851 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6852 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6853 
6854 	spec->tos = rule->tuples.ip_tos;
6855 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6856 			0 : rule->tuples_mask.ip_tos;
6857 }
6858 
6859 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6860 				  struct ethtool_usrip4_spec *spec,
6861 				  struct ethtool_usrip4_spec *spec_mask)
6862 {
6863 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6864 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6865 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6866 
6867 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6868 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6869 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6870 
6871 	spec->tos = rule->tuples.ip_tos;
6872 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6873 			0 : rule->tuples_mask.ip_tos;
6874 
6875 	spec->proto = rule->tuples.ip_proto;
6876 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6877 			0 : rule->tuples_mask.ip_proto;
6878 
6879 	spec->ip_ver = ETH_RX_NFC_IP4;
6880 }
6881 
6882 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6883 				     struct ethtool_tcpip6_spec *spec,
6884 				     struct ethtool_tcpip6_spec *spec_mask)
6885 {
6886 	cpu_to_be32_array(spec->ip6src,
6887 			  rule->tuples.src_ip, IPV6_SIZE);
6888 	cpu_to_be32_array(spec->ip6dst,
6889 			  rule->tuples.dst_ip, IPV6_SIZE);
6890 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6891 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6892 	else
6893 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6894 				  IPV6_SIZE);
6895 
6896 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6897 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6898 	else
6899 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6900 				  IPV6_SIZE);
6901 
6902 	spec->tclass = rule->tuples.ip_tos;
6903 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6904 			0 : rule->tuples_mask.ip_tos;
6905 
6906 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6907 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6908 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6909 
6910 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6911 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6912 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6913 }
6914 
6915 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6916 				  struct ethtool_usrip6_spec *spec,
6917 				  struct ethtool_usrip6_spec *spec_mask)
6918 {
6919 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6920 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6921 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6922 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6923 	else
6924 		cpu_to_be32_array(spec_mask->ip6src,
6925 				  rule->tuples_mask.src_ip, IPV6_SIZE);
6926 
6927 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6928 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6929 	else
6930 		cpu_to_be32_array(spec_mask->ip6dst,
6931 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
6932 
6933 	spec->tclass = rule->tuples.ip_tos;
6934 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6935 			0 : rule->tuples_mask.ip_tos;
6936 
6937 	spec->l4_proto = rule->tuples.ip_proto;
6938 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6939 			0 : rule->tuples_mask.ip_proto;
6940 }
6941 
6942 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6943 				    struct ethhdr *spec,
6944 				    struct ethhdr *spec_mask)
6945 {
6946 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6947 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6948 
6949 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6950 		eth_zero_addr(spec_mask->h_source);
6951 	else
6952 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6953 
6954 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
6955 		eth_zero_addr(spec_mask->h_dest);
6956 	else
6957 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6958 
6959 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6960 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6961 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6962 }
6963 
6964 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6965 				       struct hclge_fd_rule *rule)
6966 {
6967 	if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6968 	    HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6969 		fs->h_ext.data[0] = 0;
6970 		fs->h_ext.data[1] = 0;
6971 		fs->m_ext.data[0] = 0;
6972 		fs->m_ext.data[1] = 0;
6973 	} else {
6974 		fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6975 		fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6976 		fs->m_ext.data[0] =
6977 				cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6978 		fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6979 	}
6980 }
6981 
6982 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6983 				  struct hclge_fd_rule *rule)
6984 {
6985 	if (fs->flow_type & FLOW_EXT) {
6986 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6987 		fs->m_ext.vlan_tci =
6988 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6989 				0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6990 
6991 		hclge_fd_get_user_def_info(fs, rule);
6992 	}
6993 
6994 	if (fs->flow_type & FLOW_MAC_EXT) {
6995 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6996 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6997 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6998 		else
6999 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
7000 					rule->tuples_mask.dst_mac);
7001 	}
7002 }
7003 
7004 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
7005 				  struct ethtool_rxnfc *cmd)
7006 {
7007 	struct hclge_vport *vport = hclge_get_vport(handle);
7008 	struct hclge_fd_rule *rule = NULL;
7009 	struct hclge_dev *hdev = vport->back;
7010 	struct ethtool_rx_flow_spec *fs;
7011 	struct hlist_node *node2;
7012 
7013 	if (!hnae3_dev_fd_supported(hdev))
7014 		return -EOPNOTSUPP;
7015 
7016 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
7017 
7018 	spin_lock_bh(&hdev->fd_rule_lock);
7019 
7020 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
7021 		if (rule->location >= fs->location)
7022 			break;
7023 	}
7024 
7025 	if (!rule || fs->location != rule->location) {
7026 		spin_unlock_bh(&hdev->fd_rule_lock);
7027 
7028 		return -ENOENT;
7029 	}
7030 
7031 	fs->flow_type = rule->flow_type;
7032 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
7033 	case SCTP_V4_FLOW:
7034 	case TCP_V4_FLOW:
7035 	case UDP_V4_FLOW:
7036 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
7037 					 &fs->m_u.tcp_ip4_spec);
7038 		break;
7039 	case IP_USER_FLOW:
7040 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
7041 				      &fs->m_u.usr_ip4_spec);
7042 		break;
7043 	case SCTP_V6_FLOW:
7044 	case TCP_V6_FLOW:
7045 	case UDP_V6_FLOW:
7046 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
7047 					 &fs->m_u.tcp_ip6_spec);
7048 		break;
7049 	case IPV6_USER_FLOW:
7050 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
7051 				      &fs->m_u.usr_ip6_spec);
7052 		break;
7053 	/* The flow type of fd rule has been checked before adding in to rule
7054 	 * list. As other flow types have been handled, it must be ETHER_FLOW
7055 	 * for the default case
7056 	 */
7057 	default:
7058 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
7059 					&fs->m_u.ether_spec);
7060 		break;
7061 	}
7062 
7063 	hclge_fd_get_ext_info(fs, rule);
7064 
7065 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
7066 		fs->ring_cookie = RX_CLS_FLOW_DISC;
7067 	} else {
7068 		u64 vf_id;
7069 
7070 		fs->ring_cookie = rule->queue_id;
7071 		vf_id = rule->vf_id;
7072 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
7073 		fs->ring_cookie |= vf_id;
7074 	}
7075 
7076 	spin_unlock_bh(&hdev->fd_rule_lock);
7077 
7078 	return 0;
7079 }
7080 
7081 static int hclge_get_all_rules(struct hnae3_handle *handle,
7082 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
7083 {
7084 	struct hclge_vport *vport = hclge_get_vport(handle);
7085 	struct hclge_dev *hdev = vport->back;
7086 	struct hclge_fd_rule *rule;
7087 	struct hlist_node *node2;
7088 	int cnt = 0;
7089 
7090 	if (!hnae3_dev_fd_supported(hdev))
7091 		return -EOPNOTSUPP;
7092 
7093 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7094 
7095 	spin_lock_bh(&hdev->fd_rule_lock);
7096 	hlist_for_each_entry_safe(rule, node2,
7097 				  &hdev->fd_rule_list, rule_node) {
7098 		if (cnt == cmd->rule_cnt) {
7099 			spin_unlock_bh(&hdev->fd_rule_lock);
7100 			return -EMSGSIZE;
7101 		}
7102 
7103 		if (rule->state == HCLGE_FD_TO_DEL)
7104 			continue;
7105 
7106 		rule_locs[cnt] = rule->location;
7107 		cnt++;
7108 	}
7109 
7110 	spin_unlock_bh(&hdev->fd_rule_lock);
7111 
7112 	cmd->rule_cnt = cnt;
7113 
7114 	return 0;
7115 }
7116 
7117 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7118 				     struct hclge_fd_rule_tuples *tuples)
7119 {
7120 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7121 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7122 
7123 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7124 	tuples->ip_proto = fkeys->basic.ip_proto;
7125 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7126 
7127 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7128 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7129 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7130 	} else {
7131 		int i;
7132 
7133 		for (i = 0; i < IPV6_SIZE; i++) {
7134 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7135 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7136 		}
7137 	}
7138 }
7139 
7140 /* traverse all rules, check whether an existed rule has the same tuples */
7141 static struct hclge_fd_rule *
7142 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7143 			  const struct hclge_fd_rule_tuples *tuples)
7144 {
7145 	struct hclge_fd_rule *rule = NULL;
7146 	struct hlist_node *node;
7147 
7148 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7149 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7150 			return rule;
7151 	}
7152 
7153 	return NULL;
7154 }
7155 
7156 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7157 				     struct hclge_fd_rule *rule)
7158 {
7159 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7160 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7161 			     BIT(INNER_SRC_PORT);
7162 	rule->action = 0;
7163 	rule->vf_id = 0;
7164 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7165 	rule->state = HCLGE_FD_TO_ADD;
7166 	if (tuples->ether_proto == ETH_P_IP) {
7167 		if (tuples->ip_proto == IPPROTO_TCP)
7168 			rule->flow_type = TCP_V4_FLOW;
7169 		else
7170 			rule->flow_type = UDP_V4_FLOW;
7171 	} else {
7172 		if (tuples->ip_proto == IPPROTO_TCP)
7173 			rule->flow_type = TCP_V6_FLOW;
7174 		else
7175 			rule->flow_type = UDP_V6_FLOW;
7176 	}
7177 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7178 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7179 }
7180 
7181 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7182 				      u16 flow_id, struct flow_keys *fkeys)
7183 {
7184 	struct hclge_vport *vport = hclge_get_vport(handle);
7185 	struct hclge_fd_rule_tuples new_tuples = {};
7186 	struct hclge_dev *hdev = vport->back;
7187 	struct hclge_fd_rule *rule;
7188 	u16 bit_id;
7189 
7190 	if (!hnae3_dev_fd_supported(hdev))
7191 		return -EOPNOTSUPP;
7192 
7193 	/* when there is already fd rule existed add by user,
7194 	 * arfs should not work
7195 	 */
7196 	spin_lock_bh(&hdev->fd_rule_lock);
7197 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7198 	    hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7199 		spin_unlock_bh(&hdev->fd_rule_lock);
7200 		return -EOPNOTSUPP;
7201 	}
7202 
7203 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7204 
7205 	/* check is there flow director filter existed for this flow,
7206 	 * if not, create a new filter for it;
7207 	 * if filter exist with different queue id, modify the filter;
7208 	 * if filter exist with same queue id, do nothing
7209 	 */
7210 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7211 	if (!rule) {
7212 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7213 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7214 			spin_unlock_bh(&hdev->fd_rule_lock);
7215 			return -ENOSPC;
7216 		}
7217 
7218 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7219 		if (!rule) {
7220 			spin_unlock_bh(&hdev->fd_rule_lock);
7221 			return -ENOMEM;
7222 		}
7223 
7224 		rule->location = bit_id;
7225 		rule->arfs.flow_id = flow_id;
7226 		rule->queue_id = queue_id;
7227 		hclge_fd_build_arfs_rule(&new_tuples, rule);
7228 		hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7229 		hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7230 	} else if (rule->queue_id != queue_id) {
7231 		rule->queue_id = queue_id;
7232 		rule->state = HCLGE_FD_TO_ADD;
7233 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7234 		hclge_task_schedule(hdev, 0);
7235 	}
7236 	spin_unlock_bh(&hdev->fd_rule_lock);
7237 	return rule->location;
7238 }
7239 
7240 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7241 {
7242 #ifdef CONFIG_RFS_ACCEL
7243 	struct hnae3_handle *handle = &hdev->vport[0].nic;
7244 	struct hclge_fd_rule *rule;
7245 	struct hlist_node *node;
7246 
7247 	spin_lock_bh(&hdev->fd_rule_lock);
7248 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7249 		spin_unlock_bh(&hdev->fd_rule_lock);
7250 		return;
7251 	}
7252 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7253 		if (rule->state != HCLGE_FD_ACTIVE)
7254 			continue;
7255 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7256 					rule->arfs.flow_id, rule->location)) {
7257 			rule->state = HCLGE_FD_TO_DEL;
7258 			set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7259 		}
7260 	}
7261 	spin_unlock_bh(&hdev->fd_rule_lock);
7262 #endif
7263 }
7264 
7265 /* make sure being called after lock up with fd_rule_lock */
7266 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7267 {
7268 #ifdef CONFIG_RFS_ACCEL
7269 	struct hclge_fd_rule *rule;
7270 	struct hlist_node *node;
7271 	int ret;
7272 
7273 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7274 		return 0;
7275 
7276 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7277 		switch (rule->state) {
7278 		case HCLGE_FD_TO_DEL:
7279 		case HCLGE_FD_ACTIVE:
7280 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7281 						   rule->location, NULL, false);
7282 			if (ret)
7283 				return ret;
7284 			fallthrough;
7285 		case HCLGE_FD_TO_ADD:
7286 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7287 			hlist_del(&rule->rule_node);
7288 			kfree(rule);
7289 			break;
7290 		default:
7291 			break;
7292 		}
7293 	}
7294 	hclge_sync_fd_state(hdev);
7295 
7296 #endif
7297 	return 0;
7298 }
7299 
7300 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7301 				    struct hclge_fd_rule *rule)
7302 {
7303 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7304 		struct flow_match_basic match;
7305 		u16 ethtype_key, ethtype_mask;
7306 
7307 		flow_rule_match_basic(flow, &match);
7308 		ethtype_key = ntohs(match.key->n_proto);
7309 		ethtype_mask = ntohs(match.mask->n_proto);
7310 
7311 		if (ethtype_key == ETH_P_ALL) {
7312 			ethtype_key = 0;
7313 			ethtype_mask = 0;
7314 		}
7315 		rule->tuples.ether_proto = ethtype_key;
7316 		rule->tuples_mask.ether_proto = ethtype_mask;
7317 		rule->tuples.ip_proto = match.key->ip_proto;
7318 		rule->tuples_mask.ip_proto = match.mask->ip_proto;
7319 	} else {
7320 		rule->unused_tuple |= BIT(INNER_IP_PROTO);
7321 		rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7322 	}
7323 }
7324 
7325 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7326 				  struct hclge_fd_rule *rule)
7327 {
7328 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7329 		struct flow_match_eth_addrs match;
7330 
7331 		flow_rule_match_eth_addrs(flow, &match);
7332 		ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7333 		ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7334 		ether_addr_copy(rule->tuples.src_mac, match.key->src);
7335 		ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7336 	} else {
7337 		rule->unused_tuple |= BIT(INNER_DST_MAC);
7338 		rule->unused_tuple |= BIT(INNER_SRC_MAC);
7339 	}
7340 }
7341 
7342 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7343 				   struct hclge_fd_rule *rule)
7344 {
7345 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7346 		struct flow_match_vlan match;
7347 
7348 		flow_rule_match_vlan(flow, &match);
7349 		rule->tuples.vlan_tag1 = match.key->vlan_id |
7350 				(match.key->vlan_priority << VLAN_PRIO_SHIFT);
7351 		rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7352 				(match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7353 	} else {
7354 		rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7355 	}
7356 }
7357 
7358 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7359 				 struct hclge_fd_rule *rule)
7360 {
7361 	u16 addr_type = 0;
7362 
7363 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7364 		struct flow_match_control match;
7365 
7366 		flow_rule_match_control(flow, &match);
7367 		addr_type = match.key->addr_type;
7368 	}
7369 
7370 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7371 		struct flow_match_ipv4_addrs match;
7372 
7373 		flow_rule_match_ipv4_addrs(flow, &match);
7374 		rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7375 		rule->tuples_mask.src_ip[IPV4_INDEX] =
7376 						be32_to_cpu(match.mask->src);
7377 		rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7378 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
7379 						be32_to_cpu(match.mask->dst);
7380 	} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7381 		struct flow_match_ipv6_addrs match;
7382 
7383 		flow_rule_match_ipv6_addrs(flow, &match);
7384 		be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7385 				  IPV6_SIZE);
7386 		be32_to_cpu_array(rule->tuples_mask.src_ip,
7387 				  match.mask->src.s6_addr32, IPV6_SIZE);
7388 		be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7389 				  IPV6_SIZE);
7390 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
7391 				  match.mask->dst.s6_addr32, IPV6_SIZE);
7392 	} else {
7393 		rule->unused_tuple |= BIT(INNER_SRC_IP);
7394 		rule->unused_tuple |= BIT(INNER_DST_IP);
7395 	}
7396 }
7397 
7398 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7399 				   struct hclge_fd_rule *rule)
7400 {
7401 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7402 		struct flow_match_ports match;
7403 
7404 		flow_rule_match_ports(flow, &match);
7405 
7406 		rule->tuples.src_port = be16_to_cpu(match.key->src);
7407 		rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7408 		rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7409 		rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7410 	} else {
7411 		rule->unused_tuple |= BIT(INNER_SRC_PORT);
7412 		rule->unused_tuple |= BIT(INNER_DST_PORT);
7413 	}
7414 }
7415 
7416 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7417 				  struct flow_cls_offload *cls_flower,
7418 				  struct hclge_fd_rule *rule)
7419 {
7420 	struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7421 	struct flow_dissector *dissector = flow->match.dissector;
7422 
7423 	if (dissector->used_keys &
7424 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7425 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
7426 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7427 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
7428 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7429 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7430 	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7431 		dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7432 			dissector->used_keys);
7433 		return -EOPNOTSUPP;
7434 	}
7435 
7436 	hclge_get_cls_key_basic(flow, rule);
7437 	hclge_get_cls_key_mac(flow, rule);
7438 	hclge_get_cls_key_vlan(flow, rule);
7439 	hclge_get_cls_key_ip(flow, rule);
7440 	hclge_get_cls_key_port(flow, rule);
7441 
7442 	return 0;
7443 }
7444 
7445 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7446 				  struct flow_cls_offload *cls_flower, int tc)
7447 {
7448 	u32 prio = cls_flower->common.prio;
7449 
7450 	if (tc < 0 || tc > hdev->tc_max) {
7451 		dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7452 		return -EINVAL;
7453 	}
7454 
7455 	if (prio == 0 ||
7456 	    prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7457 		dev_err(&hdev->pdev->dev,
7458 			"prio %u should be in range[1, %u]\n",
7459 			prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7460 		return -EINVAL;
7461 	}
7462 
7463 	if (test_bit(prio - 1, hdev->fd_bmap)) {
7464 		dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7465 		return -EINVAL;
7466 	}
7467 	return 0;
7468 }
7469 
7470 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7471 				struct flow_cls_offload *cls_flower,
7472 				int tc)
7473 {
7474 	struct hclge_vport *vport = hclge_get_vport(handle);
7475 	struct hclge_dev *hdev = vport->back;
7476 	struct hclge_fd_rule *rule;
7477 	int ret;
7478 
7479 	ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7480 	if (ret) {
7481 		dev_err(&hdev->pdev->dev,
7482 			"failed to check cls flower params, ret = %d\n", ret);
7483 		return ret;
7484 	}
7485 
7486 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7487 	if (!rule)
7488 		return -ENOMEM;
7489 
7490 	ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7491 	if (ret) {
7492 		kfree(rule);
7493 		return ret;
7494 	}
7495 
7496 	rule->action = HCLGE_FD_ACTION_SELECT_TC;
7497 	rule->cls_flower.tc = tc;
7498 	rule->location = cls_flower->common.prio - 1;
7499 	rule->vf_id = 0;
7500 	rule->cls_flower.cookie = cls_flower->cookie;
7501 	rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7502 
7503 	ret = hclge_add_fd_entry_common(hdev, rule);
7504 	if (ret)
7505 		kfree(rule);
7506 
7507 	return ret;
7508 }
7509 
7510 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7511 						   unsigned long cookie)
7512 {
7513 	struct hclge_fd_rule *rule;
7514 	struct hlist_node *node;
7515 
7516 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7517 		if (rule->cls_flower.cookie == cookie)
7518 			return rule;
7519 	}
7520 
7521 	return NULL;
7522 }
7523 
7524 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7525 				struct flow_cls_offload *cls_flower)
7526 {
7527 	struct hclge_vport *vport = hclge_get_vport(handle);
7528 	struct hclge_dev *hdev = vport->back;
7529 	struct hclge_fd_rule *rule;
7530 	int ret;
7531 
7532 	spin_lock_bh(&hdev->fd_rule_lock);
7533 
7534 	rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7535 	if (!rule) {
7536 		spin_unlock_bh(&hdev->fd_rule_lock);
7537 		return -EINVAL;
7538 	}
7539 
7540 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7541 				   NULL, false);
7542 	if (ret) {
7543 		spin_unlock_bh(&hdev->fd_rule_lock);
7544 		return ret;
7545 	}
7546 
7547 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7548 	spin_unlock_bh(&hdev->fd_rule_lock);
7549 
7550 	return 0;
7551 }
7552 
7553 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7554 {
7555 	struct hclge_fd_rule *rule;
7556 	struct hlist_node *node;
7557 	int ret = 0;
7558 
7559 	if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7560 		return;
7561 
7562 	spin_lock_bh(&hdev->fd_rule_lock);
7563 
7564 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7565 		switch (rule->state) {
7566 		case HCLGE_FD_TO_ADD:
7567 			ret = hclge_fd_config_rule(hdev, rule);
7568 			if (ret)
7569 				goto out;
7570 			rule->state = HCLGE_FD_ACTIVE;
7571 			break;
7572 		case HCLGE_FD_TO_DEL:
7573 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7574 						   rule->location, NULL, false);
7575 			if (ret)
7576 				goto out;
7577 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7578 			hclge_fd_free_node(hdev, rule);
7579 			break;
7580 		default:
7581 			break;
7582 		}
7583 	}
7584 
7585 out:
7586 	if (ret)
7587 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7588 
7589 	spin_unlock_bh(&hdev->fd_rule_lock);
7590 }
7591 
7592 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7593 {
7594 	if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7595 		bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7596 
7597 		hclge_clear_fd_rules_in_list(hdev, clear_list);
7598 	}
7599 
7600 	hclge_sync_fd_user_def_cfg(hdev, false);
7601 
7602 	hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7603 }
7604 
7605 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7606 {
7607 	struct hclge_vport *vport = hclge_get_vport(handle);
7608 	struct hclge_dev *hdev = vport->back;
7609 
7610 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7611 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7612 }
7613 
7614 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7615 {
7616 	struct hclge_vport *vport = hclge_get_vport(handle);
7617 	struct hclge_dev *hdev = vport->back;
7618 
7619 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7620 }
7621 
7622 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7623 {
7624 	struct hclge_vport *vport = hclge_get_vport(handle);
7625 	struct hclge_dev *hdev = vport->back;
7626 
7627 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7628 }
7629 
7630 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7631 {
7632 	struct hclge_vport *vport = hclge_get_vport(handle);
7633 	struct hclge_dev *hdev = vport->back;
7634 
7635 	return hdev->rst_stats.hw_reset_done_cnt;
7636 }
7637 
7638 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7639 {
7640 	struct hclge_vport *vport = hclge_get_vport(handle);
7641 	struct hclge_dev *hdev = vport->back;
7642 
7643 	hdev->fd_en = enable;
7644 
7645 	if (!enable)
7646 		set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7647 	else
7648 		hclge_restore_fd_entries(handle);
7649 
7650 	hclge_task_schedule(hdev, 0);
7651 }
7652 
7653 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7654 {
7655 	struct hclge_desc desc;
7656 	struct hclge_config_mac_mode_cmd *req =
7657 		(struct hclge_config_mac_mode_cmd *)desc.data;
7658 	u32 loop_en = 0;
7659 	int ret;
7660 
7661 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7662 
7663 	if (enable) {
7664 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7665 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7666 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7667 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7668 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7669 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7670 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7671 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7672 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7673 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7674 	}
7675 
7676 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7677 
7678 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7679 	if (ret)
7680 		dev_err(&hdev->pdev->dev,
7681 			"mac enable fail, ret =%d.\n", ret);
7682 }
7683 
7684 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7685 				     u8 switch_param, u8 param_mask)
7686 {
7687 	struct hclge_mac_vlan_switch_cmd *req;
7688 	struct hclge_desc desc;
7689 	u32 func_id;
7690 	int ret;
7691 
7692 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7693 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7694 
7695 	/* read current config parameter */
7696 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7697 				   true);
7698 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7699 	req->func_id = cpu_to_le32(func_id);
7700 
7701 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7702 	if (ret) {
7703 		dev_err(&hdev->pdev->dev,
7704 			"read mac vlan switch parameter fail, ret = %d\n", ret);
7705 		return ret;
7706 	}
7707 
7708 	/* modify and write new config parameter */
7709 	hclge_cmd_reuse_desc(&desc, false);
7710 	req->switch_param = (req->switch_param & param_mask) | switch_param;
7711 	req->param_mask = param_mask;
7712 
7713 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7714 	if (ret)
7715 		dev_err(&hdev->pdev->dev,
7716 			"set mac vlan switch parameter fail, ret = %d\n", ret);
7717 	return ret;
7718 }
7719 
7720 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7721 				       int link_ret)
7722 {
7723 #define HCLGE_PHY_LINK_STATUS_NUM  200
7724 
7725 	struct phy_device *phydev = hdev->hw.mac.phydev;
7726 	int i = 0;
7727 	int ret;
7728 
7729 	do {
7730 		ret = phy_read_status(phydev);
7731 		if (ret) {
7732 			dev_err(&hdev->pdev->dev,
7733 				"phy update link status fail, ret = %d\n", ret);
7734 			return;
7735 		}
7736 
7737 		if (phydev->link == link_ret)
7738 			break;
7739 
7740 		msleep(HCLGE_LINK_STATUS_MS);
7741 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7742 }
7743 
7744 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7745 {
7746 #define HCLGE_MAC_LINK_STATUS_NUM  100
7747 
7748 	int link_status;
7749 	int i = 0;
7750 	int ret;
7751 
7752 	do {
7753 		ret = hclge_get_mac_link_status(hdev, &link_status);
7754 		if (ret)
7755 			return ret;
7756 		if (link_status == link_ret)
7757 			return 0;
7758 
7759 		msleep(HCLGE_LINK_STATUS_MS);
7760 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7761 	return -EBUSY;
7762 }
7763 
7764 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7765 					  bool is_phy)
7766 {
7767 	int link_ret;
7768 
7769 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7770 
7771 	if (is_phy)
7772 		hclge_phy_link_status_wait(hdev, link_ret);
7773 
7774 	return hclge_mac_link_status_wait(hdev, link_ret);
7775 }
7776 
7777 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7778 {
7779 	struct hclge_config_mac_mode_cmd *req;
7780 	struct hclge_desc desc;
7781 	u32 loop_en;
7782 	int ret;
7783 
7784 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7785 	/* 1 Read out the MAC mode config at first */
7786 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7787 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7788 	if (ret) {
7789 		dev_err(&hdev->pdev->dev,
7790 			"mac loopback get fail, ret =%d.\n", ret);
7791 		return ret;
7792 	}
7793 
7794 	/* 2 Then setup the loopback flag */
7795 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7796 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7797 
7798 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7799 
7800 	/* 3 Config mac work mode with loopback flag
7801 	 * and its original configure parameters
7802 	 */
7803 	hclge_cmd_reuse_desc(&desc, false);
7804 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7805 	if (ret)
7806 		dev_err(&hdev->pdev->dev,
7807 			"mac loopback set fail, ret =%d.\n", ret);
7808 	return ret;
7809 }
7810 
7811 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7812 				     enum hnae3_loop loop_mode)
7813 {
7814 #define HCLGE_COMMON_LB_RETRY_MS	10
7815 #define HCLGE_COMMON_LB_RETRY_NUM	100
7816 
7817 	struct hclge_common_lb_cmd *req;
7818 	struct hclge_desc desc;
7819 	int ret, i = 0;
7820 	u8 loop_mode_b;
7821 
7822 	req = (struct hclge_common_lb_cmd *)desc.data;
7823 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7824 
7825 	switch (loop_mode) {
7826 	case HNAE3_LOOP_SERIAL_SERDES:
7827 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7828 		break;
7829 	case HNAE3_LOOP_PARALLEL_SERDES:
7830 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7831 		break;
7832 	case HNAE3_LOOP_PHY:
7833 		loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7834 		break;
7835 	default:
7836 		dev_err(&hdev->pdev->dev,
7837 			"unsupported common loopback mode %d\n", loop_mode);
7838 		return -ENOTSUPP;
7839 	}
7840 
7841 	if (en) {
7842 		req->enable = loop_mode_b;
7843 		req->mask = loop_mode_b;
7844 	} else {
7845 		req->mask = loop_mode_b;
7846 	}
7847 
7848 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7849 	if (ret) {
7850 		dev_err(&hdev->pdev->dev,
7851 			"common loopback set fail, ret = %d\n", ret);
7852 		return ret;
7853 	}
7854 
7855 	do {
7856 		msleep(HCLGE_COMMON_LB_RETRY_MS);
7857 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7858 					   true);
7859 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7860 		if (ret) {
7861 			dev_err(&hdev->pdev->dev,
7862 				"common loopback get, ret = %d\n", ret);
7863 			return ret;
7864 		}
7865 	} while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7866 		 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7867 
7868 	if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7869 		dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7870 		return -EBUSY;
7871 	} else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7872 		dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7873 		return -EIO;
7874 	}
7875 	return ret;
7876 }
7877 
7878 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7879 				     enum hnae3_loop loop_mode)
7880 {
7881 	int ret;
7882 
7883 	ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7884 	if (ret)
7885 		return ret;
7886 
7887 	hclge_cfg_mac_mode(hdev, en);
7888 
7889 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7890 	if (ret)
7891 		dev_err(&hdev->pdev->dev,
7892 			"serdes loopback config mac mode timeout\n");
7893 
7894 	return ret;
7895 }
7896 
7897 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7898 				     struct phy_device *phydev)
7899 {
7900 	int ret;
7901 
7902 	if (!phydev->suspended) {
7903 		ret = phy_suspend(phydev);
7904 		if (ret)
7905 			return ret;
7906 	}
7907 
7908 	ret = phy_resume(phydev);
7909 	if (ret)
7910 		return ret;
7911 
7912 	return phy_loopback(phydev, true);
7913 }
7914 
7915 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7916 				      struct phy_device *phydev)
7917 {
7918 	int ret;
7919 
7920 	ret = phy_loopback(phydev, false);
7921 	if (ret)
7922 		return ret;
7923 
7924 	return phy_suspend(phydev);
7925 }
7926 
7927 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7928 {
7929 	struct phy_device *phydev = hdev->hw.mac.phydev;
7930 	int ret;
7931 
7932 	if (!phydev) {
7933 		if (hnae3_dev_phy_imp_supported(hdev))
7934 			return hclge_set_common_loopback(hdev, en,
7935 							 HNAE3_LOOP_PHY);
7936 		return -ENOTSUPP;
7937 	}
7938 
7939 	if (en)
7940 		ret = hclge_enable_phy_loopback(hdev, phydev);
7941 	else
7942 		ret = hclge_disable_phy_loopback(hdev, phydev);
7943 	if (ret) {
7944 		dev_err(&hdev->pdev->dev,
7945 			"set phy loopback fail, ret = %d\n", ret);
7946 		return ret;
7947 	}
7948 
7949 	hclge_cfg_mac_mode(hdev, en);
7950 
7951 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7952 	if (ret)
7953 		dev_err(&hdev->pdev->dev,
7954 			"phy loopback config mac mode timeout\n");
7955 
7956 	return ret;
7957 }
7958 
7959 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7960 				     u16 stream_id, bool enable)
7961 {
7962 	struct hclge_desc desc;
7963 	struct hclge_cfg_com_tqp_queue_cmd *req =
7964 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7965 
7966 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7967 	req->tqp_id = cpu_to_le16(tqp_id);
7968 	req->stream_id = cpu_to_le16(stream_id);
7969 	if (enable)
7970 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7971 
7972 	return hclge_cmd_send(&hdev->hw, &desc, 1);
7973 }
7974 
7975 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7976 {
7977 	struct hclge_vport *vport = hclge_get_vport(handle);
7978 	struct hclge_dev *hdev = vport->back;
7979 	int ret;
7980 	u16 i;
7981 
7982 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
7983 		ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7984 		if (ret)
7985 			return ret;
7986 	}
7987 	return 0;
7988 }
7989 
7990 static int hclge_set_loopback(struct hnae3_handle *handle,
7991 			      enum hnae3_loop loop_mode, bool en)
7992 {
7993 	struct hclge_vport *vport = hclge_get_vport(handle);
7994 	struct hclge_dev *hdev = vport->back;
7995 	int ret;
7996 
7997 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7998 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7999 	 * the same, the packets are looped back in the SSU. If SSU loopback
8000 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
8001 	 */
8002 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8003 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
8004 
8005 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
8006 						HCLGE_SWITCH_ALW_LPBK_MASK);
8007 		if (ret)
8008 			return ret;
8009 	}
8010 
8011 	switch (loop_mode) {
8012 	case HNAE3_LOOP_APP:
8013 		ret = hclge_set_app_loopback(hdev, en);
8014 		break;
8015 	case HNAE3_LOOP_SERIAL_SERDES:
8016 	case HNAE3_LOOP_PARALLEL_SERDES:
8017 		ret = hclge_set_common_loopback(hdev, en, loop_mode);
8018 		break;
8019 	case HNAE3_LOOP_PHY:
8020 		ret = hclge_set_phy_loopback(hdev, en);
8021 		break;
8022 	default:
8023 		ret = -ENOTSUPP;
8024 		dev_err(&hdev->pdev->dev,
8025 			"loop_mode %d is not supported\n", loop_mode);
8026 		break;
8027 	}
8028 
8029 	if (ret)
8030 		return ret;
8031 
8032 	ret = hclge_tqp_enable(handle, en);
8033 	if (ret)
8034 		dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
8035 			en ? "enable" : "disable", ret);
8036 
8037 	return ret;
8038 }
8039 
8040 static int hclge_set_default_loopback(struct hclge_dev *hdev)
8041 {
8042 	int ret;
8043 
8044 	ret = hclge_set_app_loopback(hdev, false);
8045 	if (ret)
8046 		return ret;
8047 
8048 	ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8049 	if (ret)
8050 		return ret;
8051 
8052 	return hclge_cfg_common_loopback(hdev, false,
8053 					 HNAE3_LOOP_PARALLEL_SERDES);
8054 }
8055 
8056 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
8057 {
8058 	struct hclge_vport *vport = hclge_get_vport(handle);
8059 	struct hnae3_knic_private_info *kinfo;
8060 	struct hnae3_queue *queue;
8061 	struct hclge_tqp *tqp;
8062 	int i;
8063 
8064 	kinfo = &vport->nic.kinfo;
8065 	for (i = 0; i < kinfo->num_tqps; i++) {
8066 		queue = handle->kinfo.tqp[i];
8067 		tqp = container_of(queue, struct hclge_tqp, q);
8068 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
8069 	}
8070 }
8071 
8072 static void hclge_flush_link_update(struct hclge_dev *hdev)
8073 {
8074 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
8075 
8076 	unsigned long last = hdev->serv_processed_cnt;
8077 	int i = 0;
8078 
8079 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8080 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8081 	       last == hdev->serv_processed_cnt)
8082 		usleep_range(1, 1);
8083 }
8084 
8085 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8086 {
8087 	struct hclge_vport *vport = hclge_get_vport(handle);
8088 	struct hclge_dev *hdev = vport->back;
8089 
8090 	if (enable) {
8091 		hclge_task_schedule(hdev, 0);
8092 	} else {
8093 		/* Set the DOWN flag here to disable link updating */
8094 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
8095 
8096 		/* flush memory to make sure DOWN is seen by service task */
8097 		smp_mb__before_atomic();
8098 		hclge_flush_link_update(hdev);
8099 	}
8100 }
8101 
8102 static int hclge_ae_start(struct hnae3_handle *handle)
8103 {
8104 	struct hclge_vport *vport = hclge_get_vport(handle);
8105 	struct hclge_dev *hdev = vport->back;
8106 
8107 	/* mac enable */
8108 	hclge_cfg_mac_mode(hdev, true);
8109 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8110 	hdev->hw.mac.link = 0;
8111 
8112 	/* reset tqp stats */
8113 	hclge_reset_tqp_stats(handle);
8114 
8115 	hclge_mac_start_phy(hdev);
8116 
8117 	return 0;
8118 }
8119 
8120 static void hclge_ae_stop(struct hnae3_handle *handle)
8121 {
8122 	struct hclge_vport *vport = hclge_get_vport(handle);
8123 	struct hclge_dev *hdev = vport->back;
8124 
8125 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8126 	spin_lock_bh(&hdev->fd_rule_lock);
8127 	hclge_clear_arfs_rules(hdev);
8128 	spin_unlock_bh(&hdev->fd_rule_lock);
8129 
8130 	/* If it is not PF reset or FLR, the firmware will disable the MAC,
8131 	 * so it only need to stop phy here.
8132 	 */
8133 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8134 	    hdev->reset_type != HNAE3_FUNC_RESET &&
8135 	    hdev->reset_type != HNAE3_FLR_RESET) {
8136 		hclge_mac_stop_phy(hdev);
8137 		hclge_update_link_status(hdev);
8138 		return;
8139 	}
8140 
8141 	hclge_reset_tqp(handle);
8142 
8143 	hclge_config_mac_tnl_int(hdev, false);
8144 
8145 	/* Mac disable */
8146 	hclge_cfg_mac_mode(hdev, false);
8147 
8148 	hclge_mac_stop_phy(hdev);
8149 
8150 	/* reset tqp stats */
8151 	hclge_reset_tqp_stats(handle);
8152 	hclge_update_link_status(hdev);
8153 }
8154 
8155 int hclge_vport_start(struct hclge_vport *vport)
8156 {
8157 	struct hclge_dev *hdev = vport->back;
8158 
8159 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8160 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8161 	vport->last_active_jiffies = jiffies;
8162 
8163 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8164 		if (vport->vport_id) {
8165 			hclge_restore_mac_table_common(vport);
8166 			hclge_restore_vport_vlan_table(vport);
8167 		} else {
8168 			hclge_restore_hw_table(hdev);
8169 		}
8170 	}
8171 
8172 	clear_bit(vport->vport_id, hdev->vport_config_block);
8173 
8174 	return 0;
8175 }
8176 
8177 void hclge_vport_stop(struct hclge_vport *vport)
8178 {
8179 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8180 }
8181 
8182 static int hclge_client_start(struct hnae3_handle *handle)
8183 {
8184 	struct hclge_vport *vport = hclge_get_vport(handle);
8185 
8186 	return hclge_vport_start(vport);
8187 }
8188 
8189 static void hclge_client_stop(struct hnae3_handle *handle)
8190 {
8191 	struct hclge_vport *vport = hclge_get_vport(handle);
8192 
8193 	hclge_vport_stop(vport);
8194 }
8195 
8196 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8197 					 u16 cmdq_resp, u8  resp_code,
8198 					 enum hclge_mac_vlan_tbl_opcode op)
8199 {
8200 	struct hclge_dev *hdev = vport->back;
8201 
8202 	if (cmdq_resp) {
8203 		dev_err(&hdev->pdev->dev,
8204 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8205 			cmdq_resp);
8206 		return -EIO;
8207 	}
8208 
8209 	if (op == HCLGE_MAC_VLAN_ADD) {
8210 		if (!resp_code || resp_code == 1)
8211 			return 0;
8212 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8213 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
8214 			return -ENOSPC;
8215 
8216 		dev_err(&hdev->pdev->dev,
8217 			"add mac addr failed for undefined, code=%u.\n",
8218 			resp_code);
8219 		return -EIO;
8220 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
8221 		if (!resp_code) {
8222 			return 0;
8223 		} else if (resp_code == 1) {
8224 			dev_dbg(&hdev->pdev->dev,
8225 				"remove mac addr failed for miss.\n");
8226 			return -ENOENT;
8227 		}
8228 
8229 		dev_err(&hdev->pdev->dev,
8230 			"remove mac addr failed for undefined, code=%u.\n",
8231 			resp_code);
8232 		return -EIO;
8233 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
8234 		if (!resp_code) {
8235 			return 0;
8236 		} else if (resp_code == 1) {
8237 			dev_dbg(&hdev->pdev->dev,
8238 				"lookup mac addr failed for miss.\n");
8239 			return -ENOENT;
8240 		}
8241 
8242 		dev_err(&hdev->pdev->dev,
8243 			"lookup mac addr failed for undefined, code=%u.\n",
8244 			resp_code);
8245 		return -EIO;
8246 	}
8247 
8248 	dev_err(&hdev->pdev->dev,
8249 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8250 
8251 	return -EINVAL;
8252 }
8253 
8254 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8255 {
8256 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8257 
8258 	unsigned int word_num;
8259 	unsigned int bit_num;
8260 
8261 	if (vfid > 255 || vfid < 0)
8262 		return -EIO;
8263 
8264 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8265 		word_num = vfid / 32;
8266 		bit_num  = vfid % 32;
8267 		if (clr)
8268 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8269 		else
8270 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8271 	} else {
8272 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8273 		bit_num  = vfid % 32;
8274 		if (clr)
8275 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8276 		else
8277 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8278 	}
8279 
8280 	return 0;
8281 }
8282 
8283 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8284 {
8285 #define HCLGE_DESC_NUMBER 3
8286 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8287 	int i, j;
8288 
8289 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8290 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8291 			if (desc[i].data[j])
8292 				return false;
8293 
8294 	return true;
8295 }
8296 
8297 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8298 				   const u8 *addr, bool is_mc)
8299 {
8300 	const unsigned char *mac_addr = addr;
8301 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8302 		       (mac_addr[0]) | (mac_addr[1] << 8);
8303 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8304 
8305 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8306 	if (is_mc) {
8307 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8308 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8309 	}
8310 
8311 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8312 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8313 }
8314 
8315 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8316 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
8317 {
8318 	struct hclge_dev *hdev = vport->back;
8319 	struct hclge_desc desc;
8320 	u8 resp_code;
8321 	u16 retval;
8322 	int ret;
8323 
8324 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8325 
8326 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8327 
8328 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8329 	if (ret) {
8330 		dev_err(&hdev->pdev->dev,
8331 			"del mac addr failed for cmd_send, ret =%d.\n",
8332 			ret);
8333 		return ret;
8334 	}
8335 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8336 	retval = le16_to_cpu(desc.retval);
8337 
8338 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8339 					     HCLGE_MAC_VLAN_REMOVE);
8340 }
8341 
8342 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8343 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
8344 				     struct hclge_desc *desc,
8345 				     bool is_mc)
8346 {
8347 	struct hclge_dev *hdev = vport->back;
8348 	u8 resp_code;
8349 	u16 retval;
8350 	int ret;
8351 
8352 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8353 	if (is_mc) {
8354 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8355 		memcpy(desc[0].data,
8356 		       req,
8357 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8358 		hclge_cmd_setup_basic_desc(&desc[1],
8359 					   HCLGE_OPC_MAC_VLAN_ADD,
8360 					   true);
8361 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8362 		hclge_cmd_setup_basic_desc(&desc[2],
8363 					   HCLGE_OPC_MAC_VLAN_ADD,
8364 					   true);
8365 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
8366 	} else {
8367 		memcpy(desc[0].data,
8368 		       req,
8369 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8370 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
8371 	}
8372 	if (ret) {
8373 		dev_err(&hdev->pdev->dev,
8374 			"lookup mac addr failed for cmd_send, ret =%d.\n",
8375 			ret);
8376 		return ret;
8377 	}
8378 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8379 	retval = le16_to_cpu(desc[0].retval);
8380 
8381 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8382 					     HCLGE_MAC_VLAN_LKUP);
8383 }
8384 
8385 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8386 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
8387 				  struct hclge_desc *mc_desc)
8388 {
8389 	struct hclge_dev *hdev = vport->back;
8390 	int cfg_status;
8391 	u8 resp_code;
8392 	u16 retval;
8393 	int ret;
8394 
8395 	if (!mc_desc) {
8396 		struct hclge_desc desc;
8397 
8398 		hclge_cmd_setup_basic_desc(&desc,
8399 					   HCLGE_OPC_MAC_VLAN_ADD,
8400 					   false);
8401 		memcpy(desc.data, req,
8402 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8403 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8404 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8405 		retval = le16_to_cpu(desc.retval);
8406 
8407 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8408 							   resp_code,
8409 							   HCLGE_MAC_VLAN_ADD);
8410 	} else {
8411 		hclge_cmd_reuse_desc(&mc_desc[0], false);
8412 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8413 		hclge_cmd_reuse_desc(&mc_desc[1], false);
8414 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8415 		hclge_cmd_reuse_desc(&mc_desc[2], false);
8416 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8417 		memcpy(mc_desc[0].data, req,
8418 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8419 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8420 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8421 		retval = le16_to_cpu(mc_desc[0].retval);
8422 
8423 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8424 							   resp_code,
8425 							   HCLGE_MAC_VLAN_ADD);
8426 	}
8427 
8428 	if (ret) {
8429 		dev_err(&hdev->pdev->dev,
8430 			"add mac addr failed for cmd_send, ret =%d.\n",
8431 			ret);
8432 		return ret;
8433 	}
8434 
8435 	return cfg_status;
8436 }
8437 
8438 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8439 			       u16 *allocated_size)
8440 {
8441 	struct hclge_umv_spc_alc_cmd *req;
8442 	struct hclge_desc desc;
8443 	int ret;
8444 
8445 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8446 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8447 
8448 	req->space_size = cpu_to_le32(space_size);
8449 
8450 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8451 	if (ret) {
8452 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8453 			ret);
8454 		return ret;
8455 	}
8456 
8457 	*allocated_size = le32_to_cpu(desc.data[1]);
8458 
8459 	return 0;
8460 }
8461 
8462 static int hclge_init_umv_space(struct hclge_dev *hdev)
8463 {
8464 	u16 allocated_size = 0;
8465 	int ret;
8466 
8467 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8468 	if (ret)
8469 		return ret;
8470 
8471 	if (allocated_size < hdev->wanted_umv_size)
8472 		dev_warn(&hdev->pdev->dev,
8473 			 "failed to alloc umv space, want %u, get %u\n",
8474 			 hdev->wanted_umv_size, allocated_size);
8475 
8476 	hdev->max_umv_size = allocated_size;
8477 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8478 	hdev->share_umv_size = hdev->priv_umv_size +
8479 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8480 
8481 	return 0;
8482 }
8483 
8484 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8485 {
8486 	struct hclge_vport *vport;
8487 	int i;
8488 
8489 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8490 		vport = &hdev->vport[i];
8491 		vport->used_umv_num = 0;
8492 	}
8493 
8494 	mutex_lock(&hdev->vport_lock);
8495 	hdev->share_umv_size = hdev->priv_umv_size +
8496 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8497 	mutex_unlock(&hdev->vport_lock);
8498 }
8499 
8500 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8501 {
8502 	struct hclge_dev *hdev = vport->back;
8503 	bool is_full;
8504 
8505 	if (need_lock)
8506 		mutex_lock(&hdev->vport_lock);
8507 
8508 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8509 		   hdev->share_umv_size == 0);
8510 
8511 	if (need_lock)
8512 		mutex_unlock(&hdev->vport_lock);
8513 
8514 	return is_full;
8515 }
8516 
8517 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8518 {
8519 	struct hclge_dev *hdev = vport->back;
8520 
8521 	if (is_free) {
8522 		if (vport->used_umv_num > hdev->priv_umv_size)
8523 			hdev->share_umv_size++;
8524 
8525 		if (vport->used_umv_num > 0)
8526 			vport->used_umv_num--;
8527 	} else {
8528 		if (vport->used_umv_num >= hdev->priv_umv_size &&
8529 		    hdev->share_umv_size > 0)
8530 			hdev->share_umv_size--;
8531 		vport->used_umv_num++;
8532 	}
8533 }
8534 
8535 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8536 						  const u8 *mac_addr)
8537 {
8538 	struct hclge_mac_node *mac_node, *tmp;
8539 
8540 	list_for_each_entry_safe(mac_node, tmp, list, node)
8541 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8542 			return mac_node;
8543 
8544 	return NULL;
8545 }
8546 
8547 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8548 				  enum HCLGE_MAC_NODE_STATE state)
8549 {
8550 	switch (state) {
8551 	/* from set_rx_mode or tmp_add_list */
8552 	case HCLGE_MAC_TO_ADD:
8553 		if (mac_node->state == HCLGE_MAC_TO_DEL)
8554 			mac_node->state = HCLGE_MAC_ACTIVE;
8555 		break;
8556 	/* only from set_rx_mode */
8557 	case HCLGE_MAC_TO_DEL:
8558 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
8559 			list_del(&mac_node->node);
8560 			kfree(mac_node);
8561 		} else {
8562 			mac_node->state = HCLGE_MAC_TO_DEL;
8563 		}
8564 		break;
8565 	/* only from tmp_add_list, the mac_node->state won't be
8566 	 * ACTIVE.
8567 	 */
8568 	case HCLGE_MAC_ACTIVE:
8569 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8570 			mac_node->state = HCLGE_MAC_ACTIVE;
8571 
8572 		break;
8573 	}
8574 }
8575 
8576 int hclge_update_mac_list(struct hclge_vport *vport,
8577 			  enum HCLGE_MAC_NODE_STATE state,
8578 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
8579 			  const unsigned char *addr)
8580 {
8581 	struct hclge_dev *hdev = vport->back;
8582 	struct hclge_mac_node *mac_node;
8583 	struct list_head *list;
8584 
8585 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8586 		&vport->uc_mac_list : &vport->mc_mac_list;
8587 
8588 	spin_lock_bh(&vport->mac_list_lock);
8589 
8590 	/* if the mac addr is already in the mac list, no need to add a new
8591 	 * one into it, just check the mac addr state, convert it to a new
8592 	 * state, or just remove it, or do nothing.
8593 	 */
8594 	mac_node = hclge_find_mac_node(list, addr);
8595 	if (mac_node) {
8596 		hclge_update_mac_node(mac_node, state);
8597 		spin_unlock_bh(&vport->mac_list_lock);
8598 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8599 		return 0;
8600 	}
8601 
8602 	/* if this address is never added, unnecessary to delete */
8603 	if (state == HCLGE_MAC_TO_DEL) {
8604 		spin_unlock_bh(&vport->mac_list_lock);
8605 		dev_err(&hdev->pdev->dev,
8606 			"failed to delete address %pM from mac list\n",
8607 			addr);
8608 		return -ENOENT;
8609 	}
8610 
8611 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8612 	if (!mac_node) {
8613 		spin_unlock_bh(&vport->mac_list_lock);
8614 		return -ENOMEM;
8615 	}
8616 
8617 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8618 
8619 	mac_node->state = state;
8620 	ether_addr_copy(mac_node->mac_addr, addr);
8621 	list_add_tail(&mac_node->node, list);
8622 
8623 	spin_unlock_bh(&vport->mac_list_lock);
8624 
8625 	return 0;
8626 }
8627 
8628 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8629 			     const unsigned char *addr)
8630 {
8631 	struct hclge_vport *vport = hclge_get_vport(handle);
8632 
8633 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8634 				     addr);
8635 }
8636 
8637 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8638 			     const unsigned char *addr)
8639 {
8640 	struct hclge_dev *hdev = vport->back;
8641 	struct hclge_mac_vlan_tbl_entry_cmd req;
8642 	struct hclge_desc desc;
8643 	u16 egress_port = 0;
8644 	int ret;
8645 
8646 	/* mac addr check */
8647 	if (is_zero_ether_addr(addr) ||
8648 	    is_broadcast_ether_addr(addr) ||
8649 	    is_multicast_ether_addr(addr)) {
8650 		dev_err(&hdev->pdev->dev,
8651 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8652 			 addr, is_zero_ether_addr(addr),
8653 			 is_broadcast_ether_addr(addr),
8654 			 is_multicast_ether_addr(addr));
8655 		return -EINVAL;
8656 	}
8657 
8658 	memset(&req, 0, sizeof(req));
8659 
8660 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8661 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8662 
8663 	req.egress_port = cpu_to_le16(egress_port);
8664 
8665 	hclge_prepare_mac_addr(&req, addr, false);
8666 
8667 	/* Lookup the mac address in the mac_vlan table, and add
8668 	 * it if the entry is inexistent. Repeated unicast entry
8669 	 * is not allowed in the mac vlan table.
8670 	 */
8671 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8672 	if (ret == -ENOENT) {
8673 		mutex_lock(&hdev->vport_lock);
8674 		if (!hclge_is_umv_space_full(vport, false)) {
8675 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8676 			if (!ret)
8677 				hclge_update_umv_space(vport, false);
8678 			mutex_unlock(&hdev->vport_lock);
8679 			return ret;
8680 		}
8681 		mutex_unlock(&hdev->vport_lock);
8682 
8683 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8684 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8685 				hdev->priv_umv_size);
8686 
8687 		return -ENOSPC;
8688 	}
8689 
8690 	/* check if we just hit the duplicate */
8691 	if (!ret) {
8692 		dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8693 			 vport->vport_id, addr);
8694 		return 0;
8695 	}
8696 
8697 	dev_err(&hdev->pdev->dev,
8698 		"PF failed to add unicast entry(%pM) in the MAC table\n",
8699 		addr);
8700 
8701 	return ret;
8702 }
8703 
8704 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8705 			    const unsigned char *addr)
8706 {
8707 	struct hclge_vport *vport = hclge_get_vport(handle);
8708 
8709 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8710 				     addr);
8711 }
8712 
8713 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8714 			    const unsigned char *addr)
8715 {
8716 	struct hclge_dev *hdev = vport->back;
8717 	struct hclge_mac_vlan_tbl_entry_cmd req;
8718 	int ret;
8719 
8720 	/* mac addr check */
8721 	if (is_zero_ether_addr(addr) ||
8722 	    is_broadcast_ether_addr(addr) ||
8723 	    is_multicast_ether_addr(addr)) {
8724 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8725 			addr);
8726 		return -EINVAL;
8727 	}
8728 
8729 	memset(&req, 0, sizeof(req));
8730 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8731 	hclge_prepare_mac_addr(&req, addr, false);
8732 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
8733 	if (!ret) {
8734 		mutex_lock(&hdev->vport_lock);
8735 		hclge_update_umv_space(vport, true);
8736 		mutex_unlock(&hdev->vport_lock);
8737 	} else if (ret == -ENOENT) {
8738 		ret = 0;
8739 	}
8740 
8741 	return ret;
8742 }
8743 
8744 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8745 			     const unsigned char *addr)
8746 {
8747 	struct hclge_vport *vport = hclge_get_vport(handle);
8748 
8749 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8750 				     addr);
8751 }
8752 
8753 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8754 			     const unsigned char *addr)
8755 {
8756 	struct hclge_dev *hdev = vport->back;
8757 	struct hclge_mac_vlan_tbl_entry_cmd req;
8758 	struct hclge_desc desc[3];
8759 	int status;
8760 
8761 	/* mac addr check */
8762 	if (!is_multicast_ether_addr(addr)) {
8763 		dev_err(&hdev->pdev->dev,
8764 			"Add mc mac err! invalid mac:%pM.\n",
8765 			 addr);
8766 		return -EINVAL;
8767 	}
8768 	memset(&req, 0, sizeof(req));
8769 	hclge_prepare_mac_addr(&req, addr, true);
8770 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8771 	if (status) {
8772 		/* This mac addr do not exist, add new entry for it */
8773 		memset(desc[0].data, 0, sizeof(desc[0].data));
8774 		memset(desc[1].data, 0, sizeof(desc[0].data));
8775 		memset(desc[2].data, 0, sizeof(desc[0].data));
8776 	}
8777 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8778 	if (status)
8779 		return status;
8780 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8781 	/* if already overflow, not to print each time */
8782 	if (status == -ENOSPC &&
8783 	    !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8784 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8785 
8786 	return status;
8787 }
8788 
8789 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8790 			    const unsigned char *addr)
8791 {
8792 	struct hclge_vport *vport = hclge_get_vport(handle);
8793 
8794 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8795 				     addr);
8796 }
8797 
8798 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8799 			    const unsigned char *addr)
8800 {
8801 	struct hclge_dev *hdev = vport->back;
8802 	struct hclge_mac_vlan_tbl_entry_cmd req;
8803 	enum hclge_cmd_status status;
8804 	struct hclge_desc desc[3];
8805 
8806 	/* mac addr check */
8807 	if (!is_multicast_ether_addr(addr)) {
8808 		dev_dbg(&hdev->pdev->dev,
8809 			"Remove mc mac err! invalid mac:%pM.\n",
8810 			 addr);
8811 		return -EINVAL;
8812 	}
8813 
8814 	memset(&req, 0, sizeof(req));
8815 	hclge_prepare_mac_addr(&req, addr, true);
8816 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8817 	if (!status) {
8818 		/* This mac addr exist, remove this handle's VFID for it */
8819 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8820 		if (status)
8821 			return status;
8822 
8823 		if (hclge_is_all_function_id_zero(desc))
8824 			/* All the vfid is zero, so need to delete this entry */
8825 			status = hclge_remove_mac_vlan_tbl(vport, &req);
8826 		else
8827 			/* Not all the vfid is zero, update the vfid */
8828 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8829 	} else if (status == -ENOENT) {
8830 		status = 0;
8831 	}
8832 
8833 	return status;
8834 }
8835 
8836 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8837 				      struct list_head *list,
8838 				      int (*sync)(struct hclge_vport *,
8839 						  const unsigned char *))
8840 {
8841 	struct hclge_mac_node *mac_node, *tmp;
8842 	int ret;
8843 
8844 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8845 		ret = sync(vport, mac_node->mac_addr);
8846 		if (!ret) {
8847 			mac_node->state = HCLGE_MAC_ACTIVE;
8848 		} else {
8849 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8850 				&vport->state);
8851 			break;
8852 		}
8853 	}
8854 }
8855 
8856 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8857 					struct list_head *list,
8858 					int (*unsync)(struct hclge_vport *,
8859 						      const unsigned char *))
8860 {
8861 	struct hclge_mac_node *mac_node, *tmp;
8862 	int ret;
8863 
8864 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8865 		ret = unsync(vport, mac_node->mac_addr);
8866 		if (!ret || ret == -ENOENT) {
8867 			list_del(&mac_node->node);
8868 			kfree(mac_node);
8869 		} else {
8870 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8871 				&vport->state);
8872 			break;
8873 		}
8874 	}
8875 }
8876 
8877 static bool hclge_sync_from_add_list(struct list_head *add_list,
8878 				     struct list_head *mac_list)
8879 {
8880 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8881 	bool all_added = true;
8882 
8883 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8884 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8885 			all_added = false;
8886 
8887 		/* if the mac address from tmp_add_list is not in the
8888 		 * uc/mc_mac_list, it means have received a TO_DEL request
8889 		 * during the time window of adding the mac address into mac
8890 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8891 		 * then it will be removed at next time. else it must be TO_ADD,
8892 		 * this address hasn't been added into mac table,
8893 		 * so just remove the mac node.
8894 		 */
8895 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8896 		if (new_node) {
8897 			hclge_update_mac_node(new_node, mac_node->state);
8898 			list_del(&mac_node->node);
8899 			kfree(mac_node);
8900 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8901 			mac_node->state = HCLGE_MAC_TO_DEL;
8902 			list_move_tail(&mac_node->node, mac_list);
8903 		} else {
8904 			list_del(&mac_node->node);
8905 			kfree(mac_node);
8906 		}
8907 	}
8908 
8909 	return all_added;
8910 }
8911 
8912 static void hclge_sync_from_del_list(struct list_head *del_list,
8913 				     struct list_head *mac_list)
8914 {
8915 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8916 
8917 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8918 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8919 		if (new_node) {
8920 			/* If the mac addr exists in the mac list, it means
8921 			 * received a new TO_ADD request during the time window
8922 			 * of configuring the mac address. For the mac node
8923 			 * state is TO_ADD, and the address is already in the
8924 			 * in the hardware(due to delete fail), so we just need
8925 			 * to change the mac node state to ACTIVE.
8926 			 */
8927 			new_node->state = HCLGE_MAC_ACTIVE;
8928 			list_del(&mac_node->node);
8929 			kfree(mac_node);
8930 		} else {
8931 			list_move_tail(&mac_node->node, mac_list);
8932 		}
8933 	}
8934 }
8935 
8936 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8937 					enum HCLGE_MAC_ADDR_TYPE mac_type,
8938 					bool is_all_added)
8939 {
8940 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8941 		if (is_all_added)
8942 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8943 		else
8944 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8945 	} else {
8946 		if (is_all_added)
8947 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8948 		else
8949 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8950 	}
8951 }
8952 
8953 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8954 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
8955 {
8956 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8957 	struct list_head tmp_add_list, tmp_del_list;
8958 	struct list_head *list;
8959 	bool all_added;
8960 
8961 	INIT_LIST_HEAD(&tmp_add_list);
8962 	INIT_LIST_HEAD(&tmp_del_list);
8963 
8964 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
8965 	 * we can add/delete these mac addr outside the spin lock
8966 	 */
8967 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8968 		&vport->uc_mac_list : &vport->mc_mac_list;
8969 
8970 	spin_lock_bh(&vport->mac_list_lock);
8971 
8972 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8973 		switch (mac_node->state) {
8974 		case HCLGE_MAC_TO_DEL:
8975 			list_move_tail(&mac_node->node, &tmp_del_list);
8976 			break;
8977 		case HCLGE_MAC_TO_ADD:
8978 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8979 			if (!new_node)
8980 				goto stop_traverse;
8981 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8982 			new_node->state = mac_node->state;
8983 			list_add_tail(&new_node->node, &tmp_add_list);
8984 			break;
8985 		default:
8986 			break;
8987 		}
8988 	}
8989 
8990 stop_traverse:
8991 	spin_unlock_bh(&vport->mac_list_lock);
8992 
8993 	/* delete first, in order to get max mac table space for adding */
8994 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8995 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8996 					    hclge_rm_uc_addr_common);
8997 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8998 					  hclge_add_uc_addr_common);
8999 	} else {
9000 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9001 					    hclge_rm_mc_addr_common);
9002 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
9003 					  hclge_add_mc_addr_common);
9004 	}
9005 
9006 	/* if some mac addresses were added/deleted fail, move back to the
9007 	 * mac_list, and retry at next time.
9008 	 */
9009 	spin_lock_bh(&vport->mac_list_lock);
9010 
9011 	hclge_sync_from_del_list(&tmp_del_list, list);
9012 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
9013 
9014 	spin_unlock_bh(&vport->mac_list_lock);
9015 
9016 	hclge_update_overflow_flags(vport, mac_type, all_added);
9017 }
9018 
9019 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
9020 {
9021 	struct hclge_dev *hdev = vport->back;
9022 
9023 	if (test_bit(vport->vport_id, hdev->vport_config_block))
9024 		return false;
9025 
9026 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9027 		return true;
9028 
9029 	return false;
9030 }
9031 
9032 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9033 {
9034 	int i;
9035 
9036 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9037 		struct hclge_vport *vport = &hdev->vport[i];
9038 
9039 		if (!hclge_need_sync_mac_table(vport))
9040 			continue;
9041 
9042 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9043 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9044 	}
9045 }
9046 
9047 static void hclge_build_del_list(struct list_head *list,
9048 				 bool is_del_list,
9049 				 struct list_head *tmp_del_list)
9050 {
9051 	struct hclge_mac_node *mac_cfg, *tmp;
9052 
9053 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9054 		switch (mac_cfg->state) {
9055 		case HCLGE_MAC_TO_DEL:
9056 		case HCLGE_MAC_ACTIVE:
9057 			list_move_tail(&mac_cfg->node, tmp_del_list);
9058 			break;
9059 		case HCLGE_MAC_TO_ADD:
9060 			if (is_del_list) {
9061 				list_del(&mac_cfg->node);
9062 				kfree(mac_cfg);
9063 			}
9064 			break;
9065 		}
9066 	}
9067 }
9068 
9069 static void hclge_unsync_del_list(struct hclge_vport *vport,
9070 				  int (*unsync)(struct hclge_vport *vport,
9071 						const unsigned char *addr),
9072 				  bool is_del_list,
9073 				  struct list_head *tmp_del_list)
9074 {
9075 	struct hclge_mac_node *mac_cfg, *tmp;
9076 	int ret;
9077 
9078 	list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9079 		ret = unsync(vport, mac_cfg->mac_addr);
9080 		if (!ret || ret == -ENOENT) {
9081 			/* clear all mac addr from hardware, but remain these
9082 			 * mac addr in the mac list, and restore them after
9083 			 * vf reset finished.
9084 			 */
9085 			if (!is_del_list &&
9086 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
9087 				mac_cfg->state = HCLGE_MAC_TO_ADD;
9088 			} else {
9089 				list_del(&mac_cfg->node);
9090 				kfree(mac_cfg);
9091 			}
9092 		} else if (is_del_list) {
9093 			mac_cfg->state = HCLGE_MAC_TO_DEL;
9094 		}
9095 	}
9096 }
9097 
9098 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9099 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
9100 {
9101 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9102 	struct hclge_dev *hdev = vport->back;
9103 	struct list_head tmp_del_list, *list;
9104 
9105 	if (mac_type == HCLGE_MAC_ADDR_UC) {
9106 		list = &vport->uc_mac_list;
9107 		unsync = hclge_rm_uc_addr_common;
9108 	} else {
9109 		list = &vport->mc_mac_list;
9110 		unsync = hclge_rm_mc_addr_common;
9111 	}
9112 
9113 	INIT_LIST_HEAD(&tmp_del_list);
9114 
9115 	if (!is_del_list)
9116 		set_bit(vport->vport_id, hdev->vport_config_block);
9117 
9118 	spin_lock_bh(&vport->mac_list_lock);
9119 
9120 	hclge_build_del_list(list, is_del_list, &tmp_del_list);
9121 
9122 	spin_unlock_bh(&vport->mac_list_lock);
9123 
9124 	hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9125 
9126 	spin_lock_bh(&vport->mac_list_lock);
9127 
9128 	hclge_sync_from_del_list(&tmp_del_list, list);
9129 
9130 	spin_unlock_bh(&vport->mac_list_lock);
9131 }
9132 
9133 /* remove all mac address when uninitailize */
9134 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9135 					enum HCLGE_MAC_ADDR_TYPE mac_type)
9136 {
9137 	struct hclge_mac_node *mac_node, *tmp;
9138 	struct hclge_dev *hdev = vport->back;
9139 	struct list_head tmp_del_list, *list;
9140 
9141 	INIT_LIST_HEAD(&tmp_del_list);
9142 
9143 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9144 		&vport->uc_mac_list : &vport->mc_mac_list;
9145 
9146 	spin_lock_bh(&vport->mac_list_lock);
9147 
9148 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9149 		switch (mac_node->state) {
9150 		case HCLGE_MAC_TO_DEL:
9151 		case HCLGE_MAC_ACTIVE:
9152 			list_move_tail(&mac_node->node, &tmp_del_list);
9153 			break;
9154 		case HCLGE_MAC_TO_ADD:
9155 			list_del(&mac_node->node);
9156 			kfree(mac_node);
9157 			break;
9158 		}
9159 	}
9160 
9161 	spin_unlock_bh(&vport->mac_list_lock);
9162 
9163 	if (mac_type == HCLGE_MAC_ADDR_UC)
9164 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9165 					    hclge_rm_uc_addr_common);
9166 	else
9167 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9168 					    hclge_rm_mc_addr_common);
9169 
9170 	if (!list_empty(&tmp_del_list))
9171 		dev_warn(&hdev->pdev->dev,
9172 			 "uninit %s mac list for vport %u not completely.\n",
9173 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9174 			 vport->vport_id);
9175 
9176 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9177 		list_del(&mac_node->node);
9178 		kfree(mac_node);
9179 	}
9180 }
9181 
9182 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9183 {
9184 	struct hclge_vport *vport;
9185 	int i;
9186 
9187 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9188 		vport = &hdev->vport[i];
9189 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9190 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9191 	}
9192 }
9193 
9194 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9195 					      u16 cmdq_resp, u8 resp_code)
9196 {
9197 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
9198 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
9199 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
9200 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
9201 
9202 	int return_status;
9203 
9204 	if (cmdq_resp) {
9205 		dev_err(&hdev->pdev->dev,
9206 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9207 			cmdq_resp);
9208 		return -EIO;
9209 	}
9210 
9211 	switch (resp_code) {
9212 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
9213 	case HCLGE_ETHERTYPE_ALREADY_ADD:
9214 		return_status = 0;
9215 		break;
9216 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9217 		dev_err(&hdev->pdev->dev,
9218 			"add mac ethertype failed for manager table overflow.\n");
9219 		return_status = -EIO;
9220 		break;
9221 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
9222 		dev_err(&hdev->pdev->dev,
9223 			"add mac ethertype failed for key conflict.\n");
9224 		return_status = -EIO;
9225 		break;
9226 	default:
9227 		dev_err(&hdev->pdev->dev,
9228 			"add mac ethertype failed for undefined, code=%u.\n",
9229 			resp_code);
9230 		return_status = -EIO;
9231 	}
9232 
9233 	return return_status;
9234 }
9235 
9236 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9237 				     u8 *mac_addr)
9238 {
9239 	struct hclge_mac_vlan_tbl_entry_cmd req;
9240 	struct hclge_dev *hdev = vport->back;
9241 	struct hclge_desc desc;
9242 	u16 egress_port = 0;
9243 	int i;
9244 
9245 	if (is_zero_ether_addr(mac_addr))
9246 		return false;
9247 
9248 	memset(&req, 0, sizeof(req));
9249 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9250 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9251 	req.egress_port = cpu_to_le16(egress_port);
9252 	hclge_prepare_mac_addr(&req, mac_addr, false);
9253 
9254 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9255 		return true;
9256 
9257 	vf_idx += HCLGE_VF_VPORT_START_NUM;
9258 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9259 		if (i != vf_idx &&
9260 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9261 			return true;
9262 
9263 	return false;
9264 }
9265 
9266 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9267 			    u8 *mac_addr)
9268 {
9269 	struct hclge_vport *vport = hclge_get_vport(handle);
9270 	struct hclge_dev *hdev = vport->back;
9271 
9272 	vport = hclge_get_vf_vport(hdev, vf);
9273 	if (!vport)
9274 		return -EINVAL;
9275 
9276 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9277 		dev_info(&hdev->pdev->dev,
9278 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
9279 			 mac_addr);
9280 		return 0;
9281 	}
9282 
9283 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9284 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9285 			mac_addr);
9286 		return -EEXIST;
9287 	}
9288 
9289 	ether_addr_copy(vport->vf_info.mac, mac_addr);
9290 
9291 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9292 		dev_info(&hdev->pdev->dev,
9293 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9294 			 vf, mac_addr);
9295 		return hclge_inform_reset_assert_to_vf(vport);
9296 	}
9297 
9298 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9299 		 vf, mac_addr);
9300 	return 0;
9301 }
9302 
9303 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9304 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
9305 {
9306 	struct hclge_desc desc;
9307 	u8 resp_code;
9308 	u16 retval;
9309 	int ret;
9310 
9311 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9312 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9313 
9314 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9315 	if (ret) {
9316 		dev_err(&hdev->pdev->dev,
9317 			"add mac ethertype failed for cmd_send, ret =%d.\n",
9318 			ret);
9319 		return ret;
9320 	}
9321 
9322 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9323 	retval = le16_to_cpu(desc.retval);
9324 
9325 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9326 }
9327 
9328 static int init_mgr_tbl(struct hclge_dev *hdev)
9329 {
9330 	int ret;
9331 	int i;
9332 
9333 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9334 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9335 		if (ret) {
9336 			dev_err(&hdev->pdev->dev,
9337 				"add mac ethertype failed, ret =%d.\n",
9338 				ret);
9339 			return ret;
9340 		}
9341 	}
9342 
9343 	return 0;
9344 }
9345 
9346 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9347 {
9348 	struct hclge_vport *vport = hclge_get_vport(handle);
9349 	struct hclge_dev *hdev = vport->back;
9350 
9351 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
9352 }
9353 
9354 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9355 				       const u8 *old_addr, const u8 *new_addr)
9356 {
9357 	struct list_head *list = &vport->uc_mac_list;
9358 	struct hclge_mac_node *old_node, *new_node;
9359 
9360 	new_node = hclge_find_mac_node(list, new_addr);
9361 	if (!new_node) {
9362 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9363 		if (!new_node)
9364 			return -ENOMEM;
9365 
9366 		new_node->state = HCLGE_MAC_TO_ADD;
9367 		ether_addr_copy(new_node->mac_addr, new_addr);
9368 		list_add(&new_node->node, list);
9369 	} else {
9370 		if (new_node->state == HCLGE_MAC_TO_DEL)
9371 			new_node->state = HCLGE_MAC_ACTIVE;
9372 
9373 		/* make sure the new addr is in the list head, avoid dev
9374 		 * addr may be not re-added into mac table for the umv space
9375 		 * limitation after global/imp reset which will clear mac
9376 		 * table by hardware.
9377 		 */
9378 		list_move(&new_node->node, list);
9379 	}
9380 
9381 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9382 		old_node = hclge_find_mac_node(list, old_addr);
9383 		if (old_node) {
9384 			if (old_node->state == HCLGE_MAC_TO_ADD) {
9385 				list_del(&old_node->node);
9386 				kfree(old_node);
9387 			} else {
9388 				old_node->state = HCLGE_MAC_TO_DEL;
9389 			}
9390 		}
9391 	}
9392 
9393 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9394 
9395 	return 0;
9396 }
9397 
9398 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9399 			      bool is_first)
9400 {
9401 	const unsigned char *new_addr = (const unsigned char *)p;
9402 	struct hclge_vport *vport = hclge_get_vport(handle);
9403 	struct hclge_dev *hdev = vport->back;
9404 	unsigned char *old_addr = NULL;
9405 	int ret;
9406 
9407 	/* mac addr check */
9408 	if (is_zero_ether_addr(new_addr) ||
9409 	    is_broadcast_ether_addr(new_addr) ||
9410 	    is_multicast_ether_addr(new_addr)) {
9411 		dev_err(&hdev->pdev->dev,
9412 			"change uc mac err! invalid mac: %pM.\n",
9413 			 new_addr);
9414 		return -EINVAL;
9415 	}
9416 
9417 	ret = hclge_pause_addr_cfg(hdev, new_addr);
9418 	if (ret) {
9419 		dev_err(&hdev->pdev->dev,
9420 			"failed to configure mac pause address, ret = %d\n",
9421 			ret);
9422 		return ret;
9423 	}
9424 
9425 	if (!is_first)
9426 		old_addr = hdev->hw.mac.mac_addr;
9427 
9428 	spin_lock_bh(&vport->mac_list_lock);
9429 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9430 	if (ret) {
9431 		dev_err(&hdev->pdev->dev,
9432 			"failed to change the mac addr:%pM, ret = %d\n",
9433 			new_addr, ret);
9434 		spin_unlock_bh(&vport->mac_list_lock);
9435 
9436 		if (!is_first)
9437 			hclge_pause_addr_cfg(hdev, old_addr);
9438 
9439 		return ret;
9440 	}
9441 	/* we must update dev addr with spin lock protect, preventing dev addr
9442 	 * being removed by set_rx_mode path.
9443 	 */
9444 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9445 	spin_unlock_bh(&vport->mac_list_lock);
9446 
9447 	hclge_task_schedule(hdev, 0);
9448 
9449 	return 0;
9450 }
9451 
9452 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9453 {
9454 	struct mii_ioctl_data *data = if_mii(ifr);
9455 
9456 	if (!hnae3_dev_phy_imp_supported(hdev))
9457 		return -EOPNOTSUPP;
9458 
9459 	switch (cmd) {
9460 	case SIOCGMIIPHY:
9461 		data->phy_id = hdev->hw.mac.phy_addr;
9462 		/* this command reads phy id and register at the same time */
9463 		fallthrough;
9464 	case SIOCGMIIREG:
9465 		data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9466 		return 0;
9467 
9468 	case SIOCSMIIREG:
9469 		return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9470 	default:
9471 		return -EOPNOTSUPP;
9472 	}
9473 }
9474 
9475 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9476 			  int cmd)
9477 {
9478 	struct hclge_vport *vport = hclge_get_vport(handle);
9479 	struct hclge_dev *hdev = vport->back;
9480 
9481 	switch (cmd) {
9482 	case SIOCGHWTSTAMP:
9483 		return hclge_ptp_get_cfg(hdev, ifr);
9484 	case SIOCSHWTSTAMP:
9485 		return hclge_ptp_set_cfg(hdev, ifr);
9486 	default:
9487 		if (!hdev->hw.mac.phydev)
9488 			return hclge_mii_ioctl(hdev, ifr, cmd);
9489 	}
9490 
9491 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9492 }
9493 
9494 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9495 					     bool bypass_en)
9496 {
9497 	struct hclge_port_vlan_filter_bypass_cmd *req;
9498 	struct hclge_desc desc;
9499 	int ret;
9500 
9501 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9502 	req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9503 	req->vf_id = vf_id;
9504 	hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9505 		      bypass_en ? 1 : 0);
9506 
9507 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9508 	if (ret)
9509 		dev_err(&hdev->pdev->dev,
9510 			"failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9511 			vf_id, ret);
9512 
9513 	return ret;
9514 }
9515 
9516 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9517 				      u8 fe_type, bool filter_en, u8 vf_id)
9518 {
9519 	struct hclge_vlan_filter_ctrl_cmd *req;
9520 	struct hclge_desc desc;
9521 	int ret;
9522 
9523 	/* read current vlan filter parameter */
9524 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9525 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9526 	req->vlan_type = vlan_type;
9527 	req->vf_id = vf_id;
9528 
9529 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9530 	if (ret) {
9531 		dev_err(&hdev->pdev->dev,
9532 			"failed to get vlan filter config, ret = %d.\n", ret);
9533 		return ret;
9534 	}
9535 
9536 	/* modify and write new config parameter */
9537 	hclge_cmd_reuse_desc(&desc, false);
9538 	req->vlan_fe = filter_en ?
9539 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9540 
9541 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9542 	if (ret)
9543 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9544 			ret);
9545 
9546 	return ret;
9547 }
9548 
9549 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9550 {
9551 	struct hclge_dev *hdev = vport->back;
9552 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9553 	int ret;
9554 
9555 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9556 		return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9557 						  HCLGE_FILTER_FE_EGRESS_V1_B,
9558 						  enable, vport->vport_id);
9559 
9560 	ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9561 					 HCLGE_FILTER_FE_EGRESS, enable,
9562 					 vport->vport_id);
9563 	if (ret)
9564 		return ret;
9565 
9566 	if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9567 		ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9568 							!enable);
9569 	} else if (!vport->vport_id) {
9570 		if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9571 			enable = false;
9572 
9573 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9574 						 HCLGE_FILTER_FE_INGRESS,
9575 						 enable, 0);
9576 	}
9577 
9578 	return ret;
9579 }
9580 
9581 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9582 {
9583 	struct hnae3_handle *handle = &vport->nic;
9584 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9585 	struct hclge_dev *hdev = vport->back;
9586 
9587 	if (vport->vport_id) {
9588 		if (vport->port_base_vlan_cfg.state !=
9589 			HNAE3_PORT_BASE_VLAN_DISABLE)
9590 			return true;
9591 
9592 		if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9593 			return false;
9594 	} else if (handle->netdev_flags & HNAE3_USER_UPE) {
9595 		return false;
9596 	}
9597 
9598 	if (!vport->req_vlan_fltr_en)
9599 		return false;
9600 
9601 	/* compatible with former device, always enable vlan filter */
9602 	if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9603 		return true;
9604 
9605 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9606 		if (vlan->vlan_id != 0)
9607 			return true;
9608 
9609 	return false;
9610 }
9611 
9612 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9613 {
9614 	struct hclge_dev *hdev = vport->back;
9615 	bool need_en;
9616 	int ret;
9617 
9618 	mutex_lock(&hdev->vport_lock);
9619 
9620 	vport->req_vlan_fltr_en = request_en;
9621 
9622 	need_en = hclge_need_enable_vport_vlan_filter(vport);
9623 	if (need_en == vport->cur_vlan_fltr_en) {
9624 		mutex_unlock(&hdev->vport_lock);
9625 		return 0;
9626 	}
9627 
9628 	ret = hclge_set_vport_vlan_filter(vport, need_en);
9629 	if (ret) {
9630 		mutex_unlock(&hdev->vport_lock);
9631 		return ret;
9632 	}
9633 
9634 	vport->cur_vlan_fltr_en = need_en;
9635 
9636 	mutex_unlock(&hdev->vport_lock);
9637 
9638 	return 0;
9639 }
9640 
9641 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9642 {
9643 	struct hclge_vport *vport = hclge_get_vport(handle);
9644 
9645 	return hclge_enable_vport_vlan_filter(vport, enable);
9646 }
9647 
9648 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9649 					bool is_kill, u16 vlan,
9650 					struct hclge_desc *desc)
9651 {
9652 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
9653 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
9654 	u8 vf_byte_val;
9655 	u8 vf_byte_off;
9656 	int ret;
9657 
9658 	hclge_cmd_setup_basic_desc(&desc[0],
9659 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9660 	hclge_cmd_setup_basic_desc(&desc[1],
9661 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9662 
9663 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9664 
9665 	vf_byte_off = vfid / 8;
9666 	vf_byte_val = 1 << (vfid % 8);
9667 
9668 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9669 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9670 
9671 	req0->vlan_id  = cpu_to_le16(vlan);
9672 	req0->vlan_cfg = is_kill;
9673 
9674 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9675 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9676 	else
9677 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9678 
9679 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
9680 	if (ret) {
9681 		dev_err(&hdev->pdev->dev,
9682 			"Send vf vlan command fail, ret =%d.\n",
9683 			ret);
9684 		return ret;
9685 	}
9686 
9687 	return 0;
9688 }
9689 
9690 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9691 					  bool is_kill, struct hclge_desc *desc)
9692 {
9693 	struct hclge_vlan_filter_vf_cfg_cmd *req;
9694 
9695 	req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9696 
9697 	if (!is_kill) {
9698 #define HCLGE_VF_VLAN_NO_ENTRY	2
9699 		if (!req->resp_code || req->resp_code == 1)
9700 			return 0;
9701 
9702 		if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9703 			set_bit(vfid, hdev->vf_vlan_full);
9704 			dev_warn(&hdev->pdev->dev,
9705 				 "vf vlan table is full, vf vlan filter is disabled\n");
9706 			return 0;
9707 		}
9708 
9709 		dev_err(&hdev->pdev->dev,
9710 			"Add vf vlan filter fail, ret =%u.\n",
9711 			req->resp_code);
9712 	} else {
9713 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
9714 		if (!req->resp_code)
9715 			return 0;
9716 
9717 		/* vf vlan filter is disabled when vf vlan table is full,
9718 		 * then new vlan id will not be added into vf vlan table.
9719 		 * Just return 0 without warning, avoid massive verbose
9720 		 * print logs when unload.
9721 		 */
9722 		if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9723 			return 0;
9724 
9725 		dev_err(&hdev->pdev->dev,
9726 			"Kill vf vlan filter fail, ret =%u.\n",
9727 			req->resp_code);
9728 	}
9729 
9730 	return -EIO;
9731 }
9732 
9733 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9734 				    bool is_kill, u16 vlan)
9735 {
9736 	struct hclge_vport *vport = &hdev->vport[vfid];
9737 	struct hclge_desc desc[2];
9738 	int ret;
9739 
9740 	/* if vf vlan table is full, firmware will close vf vlan filter, it
9741 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
9742 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
9743 	 * new vlan, because tx packets with these vlan id will be dropped.
9744 	 */
9745 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9746 		if (vport->vf_info.spoofchk && vlan) {
9747 			dev_err(&hdev->pdev->dev,
9748 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
9749 			return -EPERM;
9750 		}
9751 		return 0;
9752 	}
9753 
9754 	ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9755 	if (ret)
9756 		return ret;
9757 
9758 	return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9759 }
9760 
9761 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9762 				      u16 vlan_id, bool is_kill)
9763 {
9764 	struct hclge_vlan_filter_pf_cfg_cmd *req;
9765 	struct hclge_desc desc;
9766 	u8 vlan_offset_byte_val;
9767 	u8 vlan_offset_byte;
9768 	u8 vlan_offset_160;
9769 	int ret;
9770 
9771 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9772 
9773 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9774 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9775 			   HCLGE_VLAN_BYTE_SIZE;
9776 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9777 
9778 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9779 	req->vlan_offset = vlan_offset_160;
9780 	req->vlan_cfg = is_kill;
9781 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9782 
9783 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9784 	if (ret)
9785 		dev_err(&hdev->pdev->dev,
9786 			"port vlan command, send fail, ret =%d.\n", ret);
9787 	return ret;
9788 }
9789 
9790 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9791 				    u16 vport_id, u16 vlan_id,
9792 				    bool is_kill)
9793 {
9794 	u16 vport_idx, vport_num = 0;
9795 	int ret;
9796 
9797 	if (is_kill && !vlan_id)
9798 		return 0;
9799 
9800 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9801 	if (ret) {
9802 		dev_err(&hdev->pdev->dev,
9803 			"Set %u vport vlan filter config fail, ret =%d.\n",
9804 			vport_id, ret);
9805 		return ret;
9806 	}
9807 
9808 	/* vlan 0 may be added twice when 8021q module is enabled */
9809 	if (!is_kill && !vlan_id &&
9810 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
9811 		return 0;
9812 
9813 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9814 		dev_err(&hdev->pdev->dev,
9815 			"Add port vlan failed, vport %u is already in vlan %u\n",
9816 			vport_id, vlan_id);
9817 		return -EINVAL;
9818 	}
9819 
9820 	if (is_kill &&
9821 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9822 		dev_err(&hdev->pdev->dev,
9823 			"Delete port vlan failed, vport %u is not in vlan %u\n",
9824 			vport_id, vlan_id);
9825 		return -EINVAL;
9826 	}
9827 
9828 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9829 		vport_num++;
9830 
9831 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9832 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9833 						 is_kill);
9834 
9835 	return ret;
9836 }
9837 
9838 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9839 {
9840 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9841 	struct hclge_vport_vtag_tx_cfg_cmd *req;
9842 	struct hclge_dev *hdev = vport->back;
9843 	struct hclge_desc desc;
9844 	u16 bmap_index;
9845 	int status;
9846 
9847 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9848 
9849 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9850 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9851 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9852 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9853 		      vcfg->accept_tag1 ? 1 : 0);
9854 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9855 		      vcfg->accept_untag1 ? 1 : 0);
9856 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9857 		      vcfg->accept_tag2 ? 1 : 0);
9858 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9859 		      vcfg->accept_untag2 ? 1 : 0);
9860 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9861 		      vcfg->insert_tag1_en ? 1 : 0);
9862 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9863 		      vcfg->insert_tag2_en ? 1 : 0);
9864 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9865 		      vcfg->tag_shift_mode_en ? 1 : 0);
9866 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9867 
9868 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9869 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9870 			HCLGE_VF_NUM_PER_BYTE;
9871 	req->vf_bitmap[bmap_index] =
9872 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9873 
9874 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9875 	if (status)
9876 		dev_err(&hdev->pdev->dev,
9877 			"Send port txvlan cfg command fail, ret =%d\n",
9878 			status);
9879 
9880 	return status;
9881 }
9882 
9883 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9884 {
9885 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9886 	struct hclge_vport_vtag_rx_cfg_cmd *req;
9887 	struct hclge_dev *hdev = vport->back;
9888 	struct hclge_desc desc;
9889 	u16 bmap_index;
9890 	int status;
9891 
9892 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9893 
9894 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9895 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9896 		      vcfg->strip_tag1_en ? 1 : 0);
9897 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9898 		      vcfg->strip_tag2_en ? 1 : 0);
9899 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9900 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
9901 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9902 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
9903 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9904 		      vcfg->strip_tag1_discard_en ? 1 : 0);
9905 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9906 		      vcfg->strip_tag2_discard_en ? 1 : 0);
9907 
9908 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9909 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9910 			HCLGE_VF_NUM_PER_BYTE;
9911 	req->vf_bitmap[bmap_index] =
9912 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9913 
9914 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9915 	if (status)
9916 		dev_err(&hdev->pdev->dev,
9917 			"Send port rxvlan cfg command fail, ret =%d\n",
9918 			status);
9919 
9920 	return status;
9921 }
9922 
9923 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9924 				  u16 port_base_vlan_state,
9925 				  u16 vlan_tag, u8 qos)
9926 {
9927 	int ret;
9928 
9929 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9930 		vport->txvlan_cfg.accept_tag1 = true;
9931 		vport->txvlan_cfg.insert_tag1_en = false;
9932 		vport->txvlan_cfg.default_tag1 = 0;
9933 	} else {
9934 		struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9935 
9936 		vport->txvlan_cfg.accept_tag1 =
9937 			ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9938 		vport->txvlan_cfg.insert_tag1_en = true;
9939 		vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9940 						 vlan_tag;
9941 	}
9942 
9943 	vport->txvlan_cfg.accept_untag1 = true;
9944 
9945 	/* accept_tag2 and accept_untag2 are not supported on
9946 	 * pdev revision(0x20), new revision support them,
9947 	 * this two fields can not be configured by user.
9948 	 */
9949 	vport->txvlan_cfg.accept_tag2 = true;
9950 	vport->txvlan_cfg.accept_untag2 = true;
9951 	vport->txvlan_cfg.insert_tag2_en = false;
9952 	vport->txvlan_cfg.default_tag2 = 0;
9953 	vport->txvlan_cfg.tag_shift_mode_en = true;
9954 
9955 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9956 		vport->rxvlan_cfg.strip_tag1_en = false;
9957 		vport->rxvlan_cfg.strip_tag2_en =
9958 				vport->rxvlan_cfg.rx_vlan_offload_en;
9959 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
9960 	} else {
9961 		vport->rxvlan_cfg.strip_tag1_en =
9962 				vport->rxvlan_cfg.rx_vlan_offload_en;
9963 		vport->rxvlan_cfg.strip_tag2_en = true;
9964 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
9965 	}
9966 
9967 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
9968 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9969 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9970 
9971 	ret = hclge_set_vlan_tx_offload_cfg(vport);
9972 	if (ret)
9973 		return ret;
9974 
9975 	return hclge_set_vlan_rx_offload_cfg(vport);
9976 }
9977 
9978 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9979 {
9980 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9981 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9982 	struct hclge_desc desc;
9983 	int status;
9984 
9985 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9986 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9987 	rx_req->ot_fst_vlan_type =
9988 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9989 	rx_req->ot_sec_vlan_type =
9990 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9991 	rx_req->in_fst_vlan_type =
9992 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9993 	rx_req->in_sec_vlan_type =
9994 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9995 
9996 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9997 	if (status) {
9998 		dev_err(&hdev->pdev->dev,
9999 			"Send rxvlan protocol type command fail, ret =%d\n",
10000 			status);
10001 		return status;
10002 	}
10003 
10004 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
10005 
10006 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
10007 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
10008 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
10009 
10010 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
10011 	if (status)
10012 		dev_err(&hdev->pdev->dev,
10013 			"Send txvlan protocol type command fail, ret =%d\n",
10014 			status);
10015 
10016 	return status;
10017 }
10018 
10019 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10020 {
10021 #define HCLGE_DEF_VLAN_TYPE		0x8100
10022 
10023 	struct hnae3_handle *handle = &hdev->vport[0].nic;
10024 	struct hclge_vport *vport;
10025 	int ret;
10026 	int i;
10027 
10028 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
10029 		/* for revision 0x21, vf vlan filter is per function */
10030 		for (i = 0; i < hdev->num_alloc_vport; i++) {
10031 			vport = &hdev->vport[i];
10032 			ret = hclge_set_vlan_filter_ctrl(hdev,
10033 							 HCLGE_FILTER_TYPE_VF,
10034 							 HCLGE_FILTER_FE_EGRESS,
10035 							 true,
10036 							 vport->vport_id);
10037 			if (ret)
10038 				return ret;
10039 			vport->cur_vlan_fltr_en = true;
10040 		}
10041 
10042 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10043 						 HCLGE_FILTER_FE_INGRESS, true,
10044 						 0);
10045 		if (ret)
10046 			return ret;
10047 	} else {
10048 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10049 						 HCLGE_FILTER_FE_EGRESS_V1_B,
10050 						 true, 0);
10051 		if (ret)
10052 			return ret;
10053 	}
10054 
10055 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10056 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10057 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10058 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10059 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
10060 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
10061 
10062 	ret = hclge_set_vlan_protocol_type(hdev);
10063 	if (ret)
10064 		return ret;
10065 
10066 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10067 		u16 vlan_tag;
10068 		u8 qos;
10069 
10070 		vport = &hdev->vport[i];
10071 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10072 		qos = vport->port_base_vlan_cfg.vlan_info.qos;
10073 
10074 		ret = hclge_vlan_offload_cfg(vport,
10075 					     vport->port_base_vlan_cfg.state,
10076 					     vlan_tag, qos);
10077 		if (ret)
10078 			return ret;
10079 	}
10080 
10081 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10082 }
10083 
10084 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10085 				       bool writen_to_tbl)
10086 {
10087 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10088 
10089 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
10090 		if (vlan->vlan_id == vlan_id)
10091 			return;
10092 
10093 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10094 	if (!vlan)
10095 		return;
10096 
10097 	vlan->hd_tbl_status = writen_to_tbl;
10098 	vlan->vlan_id = vlan_id;
10099 
10100 	list_add_tail(&vlan->node, &vport->vlan_list);
10101 }
10102 
10103 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10104 {
10105 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10106 	struct hclge_dev *hdev = vport->back;
10107 	int ret;
10108 
10109 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10110 		if (!vlan->hd_tbl_status) {
10111 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10112 						       vport->vport_id,
10113 						       vlan->vlan_id, false);
10114 			if (ret) {
10115 				dev_err(&hdev->pdev->dev,
10116 					"restore vport vlan list failed, ret=%d\n",
10117 					ret);
10118 				return ret;
10119 			}
10120 		}
10121 		vlan->hd_tbl_status = true;
10122 	}
10123 
10124 	return 0;
10125 }
10126 
10127 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10128 				      bool is_write_tbl)
10129 {
10130 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10131 	struct hclge_dev *hdev = vport->back;
10132 
10133 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10134 		if (vlan->vlan_id == vlan_id) {
10135 			if (is_write_tbl && vlan->hd_tbl_status)
10136 				hclge_set_vlan_filter_hw(hdev,
10137 							 htons(ETH_P_8021Q),
10138 							 vport->vport_id,
10139 							 vlan_id,
10140 							 true);
10141 
10142 			list_del(&vlan->node);
10143 			kfree(vlan);
10144 			break;
10145 		}
10146 	}
10147 }
10148 
10149 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10150 {
10151 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10152 	struct hclge_dev *hdev = vport->back;
10153 
10154 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10155 		if (vlan->hd_tbl_status)
10156 			hclge_set_vlan_filter_hw(hdev,
10157 						 htons(ETH_P_8021Q),
10158 						 vport->vport_id,
10159 						 vlan->vlan_id,
10160 						 true);
10161 
10162 		vlan->hd_tbl_status = false;
10163 		if (is_del_list) {
10164 			list_del(&vlan->node);
10165 			kfree(vlan);
10166 		}
10167 	}
10168 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
10169 }
10170 
10171 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10172 {
10173 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10174 	struct hclge_vport *vport;
10175 	int i;
10176 
10177 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10178 		vport = &hdev->vport[i];
10179 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10180 			list_del(&vlan->node);
10181 			kfree(vlan);
10182 		}
10183 	}
10184 }
10185 
10186 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10187 {
10188 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10189 	struct hclge_dev *hdev = vport->back;
10190 	u16 vlan_proto;
10191 	u16 vlan_id;
10192 	u16 state;
10193 	int ret;
10194 
10195 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10196 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10197 	state = vport->port_base_vlan_cfg.state;
10198 
10199 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10200 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10201 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10202 					 vport->vport_id, vlan_id,
10203 					 false);
10204 		return;
10205 	}
10206 
10207 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10208 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10209 					       vport->vport_id,
10210 					       vlan->vlan_id, false);
10211 		if (ret)
10212 			break;
10213 		vlan->hd_tbl_status = true;
10214 	}
10215 }
10216 
10217 /* For global reset and imp reset, hardware will clear the mac table,
10218  * so we change the mac address state from ACTIVE to TO_ADD, then they
10219  * can be restored in the service task after reset complete. Furtherly,
10220  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10221  * be restored after reset, so just remove these mac nodes from mac_list.
10222  */
10223 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10224 {
10225 	struct hclge_mac_node *mac_node, *tmp;
10226 
10227 	list_for_each_entry_safe(mac_node, tmp, list, node) {
10228 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
10229 			mac_node->state = HCLGE_MAC_TO_ADD;
10230 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10231 			list_del(&mac_node->node);
10232 			kfree(mac_node);
10233 		}
10234 	}
10235 }
10236 
10237 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10238 {
10239 	spin_lock_bh(&vport->mac_list_lock);
10240 
10241 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10242 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10243 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10244 
10245 	spin_unlock_bh(&vport->mac_list_lock);
10246 }
10247 
10248 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10249 {
10250 	struct hclge_vport *vport = &hdev->vport[0];
10251 	struct hnae3_handle *handle = &vport->nic;
10252 
10253 	hclge_restore_mac_table_common(vport);
10254 	hclge_restore_vport_vlan_table(vport);
10255 	set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10256 	hclge_restore_fd_entries(handle);
10257 }
10258 
10259 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10260 {
10261 	struct hclge_vport *vport = hclge_get_vport(handle);
10262 
10263 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10264 		vport->rxvlan_cfg.strip_tag1_en = false;
10265 		vport->rxvlan_cfg.strip_tag2_en = enable;
10266 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
10267 	} else {
10268 		vport->rxvlan_cfg.strip_tag1_en = enable;
10269 		vport->rxvlan_cfg.strip_tag2_en = true;
10270 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
10271 	}
10272 
10273 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
10274 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10275 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10276 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10277 
10278 	return hclge_set_vlan_rx_offload_cfg(vport);
10279 }
10280 
10281 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10282 {
10283 	struct hclge_dev *hdev = vport->back;
10284 
10285 	if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10286 		set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10287 }
10288 
10289 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10290 					    u16 port_base_vlan_state,
10291 					    struct hclge_vlan_info *new_info,
10292 					    struct hclge_vlan_info *old_info)
10293 {
10294 	struct hclge_dev *hdev = vport->back;
10295 	int ret;
10296 
10297 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10298 		hclge_rm_vport_all_vlan_table(vport, false);
10299 		/* force clear VLAN 0 */
10300 		ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10301 		if (ret)
10302 			return ret;
10303 		return hclge_set_vlan_filter_hw(hdev,
10304 						 htons(new_info->vlan_proto),
10305 						 vport->vport_id,
10306 						 new_info->vlan_tag,
10307 						 false);
10308 	}
10309 
10310 	/* force add VLAN 0 */
10311 	ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10312 	if (ret)
10313 		return ret;
10314 
10315 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10316 				       vport->vport_id, old_info->vlan_tag,
10317 				       true);
10318 	if (ret)
10319 		return ret;
10320 
10321 	return hclge_add_vport_all_vlan_table(vport);
10322 }
10323 
10324 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10325 					  const struct hclge_vlan_info *old_cfg)
10326 {
10327 	if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10328 		return true;
10329 
10330 	if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10331 		return true;
10332 
10333 	return false;
10334 }
10335 
10336 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10337 				    struct hclge_vlan_info *vlan_info)
10338 {
10339 	struct hnae3_handle *nic = &vport->nic;
10340 	struct hclge_vlan_info *old_vlan_info;
10341 	struct hclge_dev *hdev = vport->back;
10342 	int ret;
10343 
10344 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10345 
10346 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10347 				     vlan_info->qos);
10348 	if (ret)
10349 		return ret;
10350 
10351 	if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10352 		goto out;
10353 
10354 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10355 		/* add new VLAN tag */
10356 		ret = hclge_set_vlan_filter_hw(hdev,
10357 					       htons(vlan_info->vlan_proto),
10358 					       vport->vport_id,
10359 					       vlan_info->vlan_tag,
10360 					       false);
10361 		if (ret)
10362 			return ret;
10363 
10364 		/* remove old VLAN tag */
10365 		if (old_vlan_info->vlan_tag == 0)
10366 			ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10367 						       true, 0);
10368 		else
10369 			ret = hclge_set_vlan_filter_hw(hdev,
10370 						       htons(ETH_P_8021Q),
10371 						       vport->vport_id,
10372 						       old_vlan_info->vlan_tag,
10373 						       true);
10374 		if (ret) {
10375 			dev_err(&hdev->pdev->dev,
10376 				"failed to clear vport%u port base vlan %u, ret = %d.\n",
10377 				vport->vport_id, old_vlan_info->vlan_tag, ret);
10378 			return ret;
10379 		}
10380 
10381 		goto out;
10382 	}
10383 
10384 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10385 					       old_vlan_info);
10386 	if (ret)
10387 		return ret;
10388 
10389 out:
10390 	vport->port_base_vlan_cfg.state = state;
10391 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10392 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10393 	else
10394 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10395 
10396 	vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10397 	hclge_set_vport_vlan_fltr_change(vport);
10398 
10399 	return 0;
10400 }
10401 
10402 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10403 					  enum hnae3_port_base_vlan_state state,
10404 					  u16 vlan, u8 qos)
10405 {
10406 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10407 		if (!vlan && !qos)
10408 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10409 
10410 		return HNAE3_PORT_BASE_VLAN_ENABLE;
10411 	}
10412 
10413 	if (!vlan && !qos)
10414 		return HNAE3_PORT_BASE_VLAN_DISABLE;
10415 
10416 	if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10417 	    vport->port_base_vlan_cfg.vlan_info.qos == qos)
10418 		return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10419 
10420 	return HNAE3_PORT_BASE_VLAN_MODIFY;
10421 }
10422 
10423 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10424 				    u16 vlan, u8 qos, __be16 proto)
10425 {
10426 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10427 	struct hclge_vport *vport = hclge_get_vport(handle);
10428 	struct hclge_dev *hdev = vport->back;
10429 	struct hclge_vlan_info vlan_info;
10430 	u16 state;
10431 	int ret;
10432 
10433 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10434 		return -EOPNOTSUPP;
10435 
10436 	vport = hclge_get_vf_vport(hdev, vfid);
10437 	if (!vport)
10438 		return -EINVAL;
10439 
10440 	/* qos is a 3 bits value, so can not be bigger than 7 */
10441 	if (vlan > VLAN_N_VID - 1 || qos > 7)
10442 		return -EINVAL;
10443 	if (proto != htons(ETH_P_8021Q))
10444 		return -EPROTONOSUPPORT;
10445 
10446 	state = hclge_get_port_base_vlan_state(vport,
10447 					       vport->port_base_vlan_cfg.state,
10448 					       vlan, qos);
10449 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10450 		return 0;
10451 
10452 	vlan_info.vlan_tag = vlan;
10453 	vlan_info.qos = qos;
10454 	vlan_info.vlan_proto = ntohs(proto);
10455 
10456 	ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10457 	if (ret) {
10458 		dev_err(&hdev->pdev->dev,
10459 			"failed to update port base vlan for vf %d, ret = %d\n",
10460 			vfid, ret);
10461 		return ret;
10462 	}
10463 
10464 	/* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10465 	 * VLAN state.
10466 	 */
10467 	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10468 	    test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10469 		hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10470 						  vport->vport_id, state,
10471 						  &vlan_info);
10472 
10473 	return 0;
10474 }
10475 
10476 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10477 {
10478 	struct hclge_vlan_info *vlan_info;
10479 	struct hclge_vport *vport;
10480 	int ret;
10481 	int vf;
10482 
10483 	/* clear port base vlan for all vf */
10484 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10485 		vport = &hdev->vport[vf];
10486 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10487 
10488 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10489 					       vport->vport_id,
10490 					       vlan_info->vlan_tag, true);
10491 		if (ret)
10492 			dev_err(&hdev->pdev->dev,
10493 				"failed to clear vf vlan for vf%d, ret = %d\n",
10494 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10495 	}
10496 }
10497 
10498 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10499 			  u16 vlan_id, bool is_kill)
10500 {
10501 	struct hclge_vport *vport = hclge_get_vport(handle);
10502 	struct hclge_dev *hdev = vport->back;
10503 	bool writen_to_tbl = false;
10504 	int ret = 0;
10505 
10506 	/* When device is resetting or reset failed, firmware is unable to
10507 	 * handle mailbox. Just record the vlan id, and remove it after
10508 	 * reset finished.
10509 	 */
10510 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10511 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10512 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10513 		return -EBUSY;
10514 	}
10515 
10516 	/* when port base vlan enabled, we use port base vlan as the vlan
10517 	 * filter entry. In this case, we don't update vlan filter table
10518 	 * when user add new vlan or remove exist vlan, just update the vport
10519 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
10520 	 * table until port base vlan disabled
10521 	 */
10522 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10523 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10524 					       vlan_id, is_kill);
10525 		writen_to_tbl = true;
10526 	}
10527 
10528 	if (!ret) {
10529 		if (is_kill)
10530 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10531 		else
10532 			hclge_add_vport_vlan_table(vport, vlan_id,
10533 						   writen_to_tbl);
10534 	} else if (is_kill) {
10535 		/* when remove hw vlan filter failed, record the vlan id,
10536 		 * and try to remove it from hw later, to be consistence
10537 		 * with stack
10538 		 */
10539 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10540 	}
10541 
10542 	hclge_set_vport_vlan_fltr_change(vport);
10543 
10544 	return ret;
10545 }
10546 
10547 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10548 {
10549 	struct hclge_vport *vport;
10550 	int ret;
10551 	u16 i;
10552 
10553 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10554 		vport = &hdev->vport[i];
10555 		if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10556 					&vport->state))
10557 			continue;
10558 
10559 		ret = hclge_enable_vport_vlan_filter(vport,
10560 						     vport->req_vlan_fltr_en);
10561 		if (ret) {
10562 			dev_err(&hdev->pdev->dev,
10563 				"failed to sync vlan filter state for vport%u, ret = %d\n",
10564 				vport->vport_id, ret);
10565 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10566 				&vport->state);
10567 			return;
10568 		}
10569 	}
10570 }
10571 
10572 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10573 {
10574 #define HCLGE_MAX_SYNC_COUNT	60
10575 
10576 	int i, ret, sync_cnt = 0;
10577 	u16 vlan_id;
10578 
10579 	/* start from vport 1 for PF is always alive */
10580 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10581 		struct hclge_vport *vport = &hdev->vport[i];
10582 
10583 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10584 					 VLAN_N_VID);
10585 		while (vlan_id != VLAN_N_VID) {
10586 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10587 						       vport->vport_id, vlan_id,
10588 						       true);
10589 			if (ret && ret != -EINVAL)
10590 				return;
10591 
10592 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10593 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10594 			hclge_set_vport_vlan_fltr_change(vport);
10595 
10596 			sync_cnt++;
10597 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10598 				return;
10599 
10600 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10601 						 VLAN_N_VID);
10602 		}
10603 	}
10604 
10605 	hclge_sync_vlan_fltr_state(hdev);
10606 }
10607 
10608 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10609 {
10610 	struct hclge_config_max_frm_size_cmd *req;
10611 	struct hclge_desc desc;
10612 
10613 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10614 
10615 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10616 	req->max_frm_size = cpu_to_le16(new_mps);
10617 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10618 
10619 	return hclge_cmd_send(&hdev->hw, &desc, 1);
10620 }
10621 
10622 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10623 {
10624 	struct hclge_vport *vport = hclge_get_vport(handle);
10625 
10626 	return hclge_set_vport_mtu(vport, new_mtu);
10627 }
10628 
10629 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10630 {
10631 	struct hclge_dev *hdev = vport->back;
10632 	int i, max_frm_size, ret;
10633 
10634 	/* HW supprt 2 layer vlan */
10635 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10636 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10637 	    max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10638 		return -EINVAL;
10639 
10640 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10641 	mutex_lock(&hdev->vport_lock);
10642 	/* VF's mps must fit within hdev->mps */
10643 	if (vport->vport_id && max_frm_size > hdev->mps) {
10644 		mutex_unlock(&hdev->vport_lock);
10645 		return -EINVAL;
10646 	} else if (vport->vport_id) {
10647 		vport->mps = max_frm_size;
10648 		mutex_unlock(&hdev->vport_lock);
10649 		return 0;
10650 	}
10651 
10652 	/* PF's mps must be greater then VF's mps */
10653 	for (i = 1; i < hdev->num_alloc_vport; i++)
10654 		if (max_frm_size < hdev->vport[i].mps) {
10655 			mutex_unlock(&hdev->vport_lock);
10656 			return -EINVAL;
10657 		}
10658 
10659 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10660 
10661 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
10662 	if (ret) {
10663 		dev_err(&hdev->pdev->dev,
10664 			"Change mtu fail, ret =%d\n", ret);
10665 		goto out;
10666 	}
10667 
10668 	hdev->mps = max_frm_size;
10669 	vport->mps = max_frm_size;
10670 
10671 	ret = hclge_buffer_alloc(hdev);
10672 	if (ret)
10673 		dev_err(&hdev->pdev->dev,
10674 			"Allocate buffer fail, ret =%d\n", ret);
10675 
10676 out:
10677 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10678 	mutex_unlock(&hdev->vport_lock);
10679 	return ret;
10680 }
10681 
10682 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10683 				    bool enable)
10684 {
10685 	struct hclge_reset_tqp_queue_cmd *req;
10686 	struct hclge_desc desc;
10687 	int ret;
10688 
10689 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10690 
10691 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10692 	req->tqp_id = cpu_to_le16(queue_id);
10693 	if (enable)
10694 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10695 
10696 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10697 	if (ret) {
10698 		dev_err(&hdev->pdev->dev,
10699 			"Send tqp reset cmd error, status =%d\n", ret);
10700 		return ret;
10701 	}
10702 
10703 	return 0;
10704 }
10705 
10706 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10707 {
10708 	struct hclge_reset_tqp_queue_cmd *req;
10709 	struct hclge_desc desc;
10710 	int ret;
10711 
10712 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10713 
10714 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10715 	req->tqp_id = cpu_to_le16(queue_id);
10716 
10717 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10718 	if (ret) {
10719 		dev_err(&hdev->pdev->dev,
10720 			"Get reset status error, status =%d\n", ret);
10721 		return ret;
10722 	}
10723 
10724 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10725 }
10726 
10727 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10728 {
10729 	struct hnae3_queue *queue;
10730 	struct hclge_tqp *tqp;
10731 
10732 	queue = handle->kinfo.tqp[queue_id];
10733 	tqp = container_of(queue, struct hclge_tqp, q);
10734 
10735 	return tqp->index;
10736 }
10737 
10738 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10739 {
10740 	struct hclge_vport *vport = hclge_get_vport(handle);
10741 	struct hclge_dev *hdev = vport->back;
10742 	u16 reset_try_times = 0;
10743 	int reset_status;
10744 	u16 queue_gid;
10745 	int ret;
10746 	u16 i;
10747 
10748 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
10749 		queue_gid = hclge_covert_handle_qid_global(handle, i);
10750 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10751 		if (ret) {
10752 			dev_err(&hdev->pdev->dev,
10753 				"failed to send reset tqp cmd, ret = %d\n",
10754 				ret);
10755 			return ret;
10756 		}
10757 
10758 		while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10759 			reset_status = hclge_get_reset_status(hdev, queue_gid);
10760 			if (reset_status)
10761 				break;
10762 
10763 			/* Wait for tqp hw reset */
10764 			usleep_range(1000, 1200);
10765 		}
10766 
10767 		if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10768 			dev_err(&hdev->pdev->dev,
10769 				"wait for tqp hw reset timeout\n");
10770 			return -ETIME;
10771 		}
10772 
10773 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10774 		if (ret) {
10775 			dev_err(&hdev->pdev->dev,
10776 				"failed to deassert soft reset, ret = %d\n",
10777 				ret);
10778 			return ret;
10779 		}
10780 		reset_try_times = 0;
10781 	}
10782 	return 0;
10783 }
10784 
10785 static int hclge_reset_rcb(struct hnae3_handle *handle)
10786 {
10787 #define HCLGE_RESET_RCB_NOT_SUPPORT	0U
10788 #define HCLGE_RESET_RCB_SUCCESS		1U
10789 
10790 	struct hclge_vport *vport = hclge_get_vport(handle);
10791 	struct hclge_dev *hdev = vport->back;
10792 	struct hclge_reset_cmd *req;
10793 	struct hclge_desc desc;
10794 	u8 return_status;
10795 	u16 queue_gid;
10796 	int ret;
10797 
10798 	queue_gid = hclge_covert_handle_qid_global(handle, 0);
10799 
10800 	req = (struct hclge_reset_cmd *)desc.data;
10801 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10802 	hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10803 	req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10804 	req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10805 
10806 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10807 	if (ret) {
10808 		dev_err(&hdev->pdev->dev,
10809 			"failed to send rcb reset cmd, ret = %d\n", ret);
10810 		return ret;
10811 	}
10812 
10813 	return_status = req->fun_reset_rcb_return_status;
10814 	if (return_status == HCLGE_RESET_RCB_SUCCESS)
10815 		return 0;
10816 
10817 	if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10818 		dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10819 			return_status);
10820 		return -EIO;
10821 	}
10822 
10823 	/* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10824 	 * again to reset all tqps
10825 	 */
10826 	return hclge_reset_tqp_cmd(handle);
10827 }
10828 
10829 int hclge_reset_tqp(struct hnae3_handle *handle)
10830 {
10831 	struct hclge_vport *vport = hclge_get_vport(handle);
10832 	struct hclge_dev *hdev = vport->back;
10833 	int ret;
10834 
10835 	/* only need to disable PF's tqp */
10836 	if (!vport->vport_id) {
10837 		ret = hclge_tqp_enable(handle, false);
10838 		if (ret) {
10839 			dev_err(&hdev->pdev->dev,
10840 				"failed to disable tqp, ret = %d\n", ret);
10841 			return ret;
10842 		}
10843 	}
10844 
10845 	return hclge_reset_rcb(handle);
10846 }
10847 
10848 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10849 {
10850 	struct hclge_vport *vport = hclge_get_vport(handle);
10851 	struct hclge_dev *hdev = vport->back;
10852 
10853 	return hdev->fw_version;
10854 }
10855 
10856 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10857 {
10858 	struct phy_device *phydev = hdev->hw.mac.phydev;
10859 
10860 	if (!phydev)
10861 		return;
10862 
10863 	phy_set_asym_pause(phydev, rx_en, tx_en);
10864 }
10865 
10866 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10867 {
10868 	int ret;
10869 
10870 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10871 		return 0;
10872 
10873 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10874 	if (ret)
10875 		dev_err(&hdev->pdev->dev,
10876 			"configure pauseparam error, ret = %d.\n", ret);
10877 
10878 	return ret;
10879 }
10880 
10881 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10882 {
10883 	struct phy_device *phydev = hdev->hw.mac.phydev;
10884 	u16 remote_advertising = 0;
10885 	u16 local_advertising;
10886 	u32 rx_pause, tx_pause;
10887 	u8 flowctl;
10888 
10889 	if (!phydev->link || !phydev->autoneg)
10890 		return 0;
10891 
10892 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10893 
10894 	if (phydev->pause)
10895 		remote_advertising = LPA_PAUSE_CAP;
10896 
10897 	if (phydev->asym_pause)
10898 		remote_advertising |= LPA_PAUSE_ASYM;
10899 
10900 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10901 					   remote_advertising);
10902 	tx_pause = flowctl & FLOW_CTRL_TX;
10903 	rx_pause = flowctl & FLOW_CTRL_RX;
10904 
10905 	if (phydev->duplex == HCLGE_MAC_HALF) {
10906 		tx_pause = 0;
10907 		rx_pause = 0;
10908 	}
10909 
10910 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10911 }
10912 
10913 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10914 				 u32 *rx_en, u32 *tx_en)
10915 {
10916 	struct hclge_vport *vport = hclge_get_vport(handle);
10917 	struct hclge_dev *hdev = vport->back;
10918 	u8 media_type = hdev->hw.mac.media_type;
10919 
10920 	*auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10921 		    hclge_get_autoneg(handle) : 0;
10922 
10923 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10924 		*rx_en = 0;
10925 		*tx_en = 0;
10926 		return;
10927 	}
10928 
10929 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10930 		*rx_en = 1;
10931 		*tx_en = 0;
10932 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10933 		*tx_en = 1;
10934 		*rx_en = 0;
10935 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10936 		*rx_en = 1;
10937 		*tx_en = 1;
10938 	} else {
10939 		*rx_en = 0;
10940 		*tx_en = 0;
10941 	}
10942 }
10943 
10944 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10945 					 u32 rx_en, u32 tx_en)
10946 {
10947 	if (rx_en && tx_en)
10948 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
10949 	else if (rx_en && !tx_en)
10950 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10951 	else if (!rx_en && tx_en)
10952 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10953 	else
10954 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
10955 
10956 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10957 }
10958 
10959 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10960 				u32 rx_en, u32 tx_en)
10961 {
10962 	struct hclge_vport *vport = hclge_get_vport(handle);
10963 	struct hclge_dev *hdev = vport->back;
10964 	struct phy_device *phydev = hdev->hw.mac.phydev;
10965 	u32 fc_autoneg;
10966 
10967 	if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10968 		fc_autoneg = hclge_get_autoneg(handle);
10969 		if (auto_neg != fc_autoneg) {
10970 			dev_info(&hdev->pdev->dev,
10971 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10972 			return -EOPNOTSUPP;
10973 		}
10974 	}
10975 
10976 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10977 		dev_info(&hdev->pdev->dev,
10978 			 "Priority flow control enabled. Cannot set link flow control.\n");
10979 		return -EOPNOTSUPP;
10980 	}
10981 
10982 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10983 
10984 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10985 
10986 	if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10987 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10988 
10989 	if (phydev)
10990 		return phy_start_aneg(phydev);
10991 
10992 	return -EOPNOTSUPP;
10993 }
10994 
10995 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10996 					  u8 *auto_neg, u32 *speed, u8 *duplex)
10997 {
10998 	struct hclge_vport *vport = hclge_get_vport(handle);
10999 	struct hclge_dev *hdev = vport->back;
11000 
11001 	if (speed)
11002 		*speed = hdev->hw.mac.speed;
11003 	if (duplex)
11004 		*duplex = hdev->hw.mac.duplex;
11005 	if (auto_neg)
11006 		*auto_neg = hdev->hw.mac.autoneg;
11007 }
11008 
11009 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11010 				 u8 *module_type)
11011 {
11012 	struct hclge_vport *vport = hclge_get_vport(handle);
11013 	struct hclge_dev *hdev = vport->back;
11014 
11015 	/* When nic is down, the service task is not running, doesn't update
11016 	 * the port information per second. Query the port information before
11017 	 * return the media type, ensure getting the correct media information.
11018 	 */
11019 	hclge_update_port_info(hdev);
11020 
11021 	if (media_type)
11022 		*media_type = hdev->hw.mac.media_type;
11023 
11024 	if (module_type)
11025 		*module_type = hdev->hw.mac.module_type;
11026 }
11027 
11028 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11029 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
11030 {
11031 	struct hclge_vport *vport = hclge_get_vport(handle);
11032 	struct hclge_dev *hdev = vport->back;
11033 	struct phy_device *phydev = hdev->hw.mac.phydev;
11034 	int mdix_ctrl, mdix, is_resolved;
11035 	unsigned int retval;
11036 
11037 	if (!phydev) {
11038 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11039 		*tp_mdix = ETH_TP_MDI_INVALID;
11040 		return;
11041 	}
11042 
11043 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11044 
11045 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11046 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11047 				    HCLGE_PHY_MDIX_CTRL_S);
11048 
11049 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11050 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11051 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11052 
11053 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11054 
11055 	switch (mdix_ctrl) {
11056 	case 0x0:
11057 		*tp_mdix_ctrl = ETH_TP_MDI;
11058 		break;
11059 	case 0x1:
11060 		*tp_mdix_ctrl = ETH_TP_MDI_X;
11061 		break;
11062 	case 0x3:
11063 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11064 		break;
11065 	default:
11066 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11067 		break;
11068 	}
11069 
11070 	if (!is_resolved)
11071 		*tp_mdix = ETH_TP_MDI_INVALID;
11072 	else if (mdix)
11073 		*tp_mdix = ETH_TP_MDI_X;
11074 	else
11075 		*tp_mdix = ETH_TP_MDI;
11076 }
11077 
11078 static void hclge_info_show(struct hclge_dev *hdev)
11079 {
11080 	struct device *dev = &hdev->pdev->dev;
11081 
11082 	dev_info(dev, "PF info begin:\n");
11083 
11084 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11085 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11086 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11087 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11088 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11089 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11090 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11091 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11092 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11093 	dev_info(dev, "This is %s PF\n",
11094 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11095 	dev_info(dev, "DCB %s\n",
11096 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11097 	dev_info(dev, "MQPRIO %s\n",
11098 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11099 	dev_info(dev, "Default tx spare buffer size: %u\n",
11100 		 hdev->tx_spare_buf_size);
11101 
11102 	dev_info(dev, "PF info end.\n");
11103 }
11104 
11105 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11106 					  struct hclge_vport *vport)
11107 {
11108 	struct hnae3_client *client = vport->nic.client;
11109 	struct hclge_dev *hdev = ae_dev->priv;
11110 	int rst_cnt = hdev->rst_stats.reset_cnt;
11111 	int ret;
11112 
11113 	ret = client->ops->init_instance(&vport->nic);
11114 	if (ret)
11115 		return ret;
11116 
11117 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11118 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11119 	    rst_cnt != hdev->rst_stats.reset_cnt) {
11120 		ret = -EBUSY;
11121 		goto init_nic_err;
11122 	}
11123 
11124 	/* Enable nic hw error interrupts */
11125 	ret = hclge_config_nic_hw_error(hdev, true);
11126 	if (ret) {
11127 		dev_err(&ae_dev->pdev->dev,
11128 			"fail(%d) to enable hw error interrupts\n", ret);
11129 		goto init_nic_err;
11130 	}
11131 
11132 	hnae3_set_client_init_flag(client, ae_dev, 1);
11133 
11134 	if (netif_msg_drv(&hdev->vport->nic))
11135 		hclge_info_show(hdev);
11136 
11137 	return ret;
11138 
11139 init_nic_err:
11140 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11141 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11142 		msleep(HCLGE_WAIT_RESET_DONE);
11143 
11144 	client->ops->uninit_instance(&vport->nic, 0);
11145 
11146 	return ret;
11147 }
11148 
11149 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11150 					   struct hclge_vport *vport)
11151 {
11152 	struct hclge_dev *hdev = ae_dev->priv;
11153 	struct hnae3_client *client;
11154 	int rst_cnt;
11155 	int ret;
11156 
11157 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11158 	    !hdev->nic_client)
11159 		return 0;
11160 
11161 	client = hdev->roce_client;
11162 	ret = hclge_init_roce_base_info(vport);
11163 	if (ret)
11164 		return ret;
11165 
11166 	rst_cnt = hdev->rst_stats.reset_cnt;
11167 	ret = client->ops->init_instance(&vport->roce);
11168 	if (ret)
11169 		return ret;
11170 
11171 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11172 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11173 	    rst_cnt != hdev->rst_stats.reset_cnt) {
11174 		ret = -EBUSY;
11175 		goto init_roce_err;
11176 	}
11177 
11178 	/* Enable roce ras interrupts */
11179 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
11180 	if (ret) {
11181 		dev_err(&ae_dev->pdev->dev,
11182 			"fail(%d) to enable roce ras interrupts\n", ret);
11183 		goto init_roce_err;
11184 	}
11185 
11186 	hnae3_set_client_init_flag(client, ae_dev, 1);
11187 
11188 	return 0;
11189 
11190 init_roce_err:
11191 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11192 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11193 		msleep(HCLGE_WAIT_RESET_DONE);
11194 
11195 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11196 
11197 	return ret;
11198 }
11199 
11200 static int hclge_init_client_instance(struct hnae3_client *client,
11201 				      struct hnae3_ae_dev *ae_dev)
11202 {
11203 	struct hclge_dev *hdev = ae_dev->priv;
11204 	struct hclge_vport *vport = &hdev->vport[0];
11205 	int ret;
11206 
11207 	switch (client->type) {
11208 	case HNAE3_CLIENT_KNIC:
11209 		hdev->nic_client = client;
11210 		vport->nic.client = client;
11211 		ret = hclge_init_nic_client_instance(ae_dev, vport);
11212 		if (ret)
11213 			goto clear_nic;
11214 
11215 		ret = hclge_init_roce_client_instance(ae_dev, vport);
11216 		if (ret)
11217 			goto clear_roce;
11218 
11219 		break;
11220 	case HNAE3_CLIENT_ROCE:
11221 		if (hnae3_dev_roce_supported(hdev)) {
11222 			hdev->roce_client = client;
11223 			vport->roce.client = client;
11224 		}
11225 
11226 		ret = hclge_init_roce_client_instance(ae_dev, vport);
11227 		if (ret)
11228 			goto clear_roce;
11229 
11230 		break;
11231 	default:
11232 		return -EINVAL;
11233 	}
11234 
11235 	return 0;
11236 
11237 clear_nic:
11238 	hdev->nic_client = NULL;
11239 	vport->nic.client = NULL;
11240 	return ret;
11241 clear_roce:
11242 	hdev->roce_client = NULL;
11243 	vport->roce.client = NULL;
11244 	return ret;
11245 }
11246 
11247 static void hclge_uninit_client_instance(struct hnae3_client *client,
11248 					 struct hnae3_ae_dev *ae_dev)
11249 {
11250 	struct hclge_dev *hdev = ae_dev->priv;
11251 	struct hclge_vport *vport = &hdev->vport[0];
11252 
11253 	if (hdev->roce_client) {
11254 		clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11255 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11256 			msleep(HCLGE_WAIT_RESET_DONE);
11257 
11258 		hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11259 		hdev->roce_client = NULL;
11260 		vport->roce.client = NULL;
11261 	}
11262 	if (client->type == HNAE3_CLIENT_ROCE)
11263 		return;
11264 	if (hdev->nic_client && client->ops->uninit_instance) {
11265 		clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11266 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11267 			msleep(HCLGE_WAIT_RESET_DONE);
11268 
11269 		client->ops->uninit_instance(&vport->nic, 0);
11270 		hdev->nic_client = NULL;
11271 		vport->nic.client = NULL;
11272 	}
11273 }
11274 
11275 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11276 {
11277 #define HCLGE_MEM_BAR		4
11278 
11279 	struct pci_dev *pdev = hdev->pdev;
11280 	struct hclge_hw *hw = &hdev->hw;
11281 
11282 	/* for device does not have device memory, return directly */
11283 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11284 		return 0;
11285 
11286 	hw->mem_base = devm_ioremap_wc(&pdev->dev,
11287 				       pci_resource_start(pdev, HCLGE_MEM_BAR),
11288 				       pci_resource_len(pdev, HCLGE_MEM_BAR));
11289 	if (!hw->mem_base) {
11290 		dev_err(&pdev->dev, "failed to map device memory\n");
11291 		return -EFAULT;
11292 	}
11293 
11294 	return 0;
11295 }
11296 
11297 static int hclge_pci_init(struct hclge_dev *hdev)
11298 {
11299 	struct pci_dev *pdev = hdev->pdev;
11300 	struct hclge_hw *hw;
11301 	int ret;
11302 
11303 	ret = pci_enable_device(pdev);
11304 	if (ret) {
11305 		dev_err(&pdev->dev, "failed to enable PCI device\n");
11306 		return ret;
11307 	}
11308 
11309 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11310 	if (ret) {
11311 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11312 		if (ret) {
11313 			dev_err(&pdev->dev,
11314 				"can't set consistent PCI DMA");
11315 			goto err_disable_device;
11316 		}
11317 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11318 	}
11319 
11320 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11321 	if (ret) {
11322 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11323 		goto err_disable_device;
11324 	}
11325 
11326 	pci_set_master(pdev);
11327 	hw = &hdev->hw;
11328 	hw->io_base = pcim_iomap(pdev, 2, 0);
11329 	if (!hw->io_base) {
11330 		dev_err(&pdev->dev, "Can't map configuration register space\n");
11331 		ret = -ENOMEM;
11332 		goto err_clr_master;
11333 	}
11334 
11335 	ret = hclge_dev_mem_map(hdev);
11336 	if (ret)
11337 		goto err_unmap_io_base;
11338 
11339 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11340 
11341 	return 0;
11342 
11343 err_unmap_io_base:
11344 	pcim_iounmap(pdev, hdev->hw.io_base);
11345 err_clr_master:
11346 	pci_clear_master(pdev);
11347 	pci_release_regions(pdev);
11348 err_disable_device:
11349 	pci_disable_device(pdev);
11350 
11351 	return ret;
11352 }
11353 
11354 static void hclge_pci_uninit(struct hclge_dev *hdev)
11355 {
11356 	struct pci_dev *pdev = hdev->pdev;
11357 
11358 	if (hdev->hw.mem_base)
11359 		devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11360 
11361 	pcim_iounmap(pdev, hdev->hw.io_base);
11362 	pci_free_irq_vectors(pdev);
11363 	pci_clear_master(pdev);
11364 	pci_release_mem_regions(pdev);
11365 	pci_disable_device(pdev);
11366 }
11367 
11368 static void hclge_state_init(struct hclge_dev *hdev)
11369 {
11370 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11371 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11372 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11373 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11374 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11375 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11376 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11377 }
11378 
11379 static void hclge_state_uninit(struct hclge_dev *hdev)
11380 {
11381 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11382 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11383 
11384 	if (hdev->reset_timer.function)
11385 		del_timer_sync(&hdev->reset_timer);
11386 	if (hdev->service_task.work.func)
11387 		cancel_delayed_work_sync(&hdev->service_task);
11388 }
11389 
11390 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11391 					enum hnae3_reset_type rst_type)
11392 {
11393 #define HCLGE_RESET_RETRY_WAIT_MS	500
11394 #define HCLGE_RESET_RETRY_CNT	5
11395 
11396 	struct hclge_dev *hdev = ae_dev->priv;
11397 	int retry_cnt = 0;
11398 	int ret;
11399 
11400 retry:
11401 	down(&hdev->reset_sem);
11402 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11403 	hdev->reset_type = rst_type;
11404 	ret = hclge_reset_prepare(hdev);
11405 	if (ret || hdev->reset_pending) {
11406 		dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11407 			ret);
11408 		if (hdev->reset_pending ||
11409 		    retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11410 			dev_err(&hdev->pdev->dev,
11411 				"reset_pending:0x%lx, retry_cnt:%d\n",
11412 				hdev->reset_pending, retry_cnt);
11413 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11414 			up(&hdev->reset_sem);
11415 			msleep(HCLGE_RESET_RETRY_WAIT_MS);
11416 			goto retry;
11417 		}
11418 	}
11419 
11420 	/* disable misc vector before reset done */
11421 	hclge_enable_vector(&hdev->misc_vector, false);
11422 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11423 
11424 	if (hdev->reset_type == HNAE3_FLR_RESET)
11425 		hdev->rst_stats.flr_rst_cnt++;
11426 }
11427 
11428 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11429 {
11430 	struct hclge_dev *hdev = ae_dev->priv;
11431 	int ret;
11432 
11433 	hclge_enable_vector(&hdev->misc_vector, true);
11434 
11435 	ret = hclge_reset_rebuild(hdev);
11436 	if (ret)
11437 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11438 
11439 	hdev->reset_type = HNAE3_NONE_RESET;
11440 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11441 	up(&hdev->reset_sem);
11442 }
11443 
11444 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11445 {
11446 	u16 i;
11447 
11448 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11449 		struct hclge_vport *vport = &hdev->vport[i];
11450 		int ret;
11451 
11452 		 /* Send cmd to clear VF's FUNC_RST_ING */
11453 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11454 		if (ret)
11455 			dev_warn(&hdev->pdev->dev,
11456 				 "clear vf(%u) rst failed %d!\n",
11457 				 vport->vport_id, ret);
11458 	}
11459 }
11460 
11461 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11462 {
11463 	struct hclge_desc desc;
11464 	int ret;
11465 
11466 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11467 
11468 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11469 	/* This new command is only supported by new firmware, it will
11470 	 * fail with older firmware. Error value -EOPNOSUPP can only be
11471 	 * returned by older firmware running this command, to keep code
11472 	 * backward compatible we will override this value and return
11473 	 * success.
11474 	 */
11475 	if (ret && ret != -EOPNOTSUPP) {
11476 		dev_err(&hdev->pdev->dev,
11477 			"failed to clear hw resource, ret = %d\n", ret);
11478 		return ret;
11479 	}
11480 	return 0;
11481 }
11482 
11483 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11484 {
11485 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11486 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11487 }
11488 
11489 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11490 {
11491 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11492 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11493 }
11494 
11495 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11496 {
11497 	struct pci_dev *pdev = ae_dev->pdev;
11498 	struct hclge_dev *hdev;
11499 	int ret;
11500 
11501 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11502 	if (!hdev)
11503 		return -ENOMEM;
11504 
11505 	hdev->pdev = pdev;
11506 	hdev->ae_dev = ae_dev;
11507 	hdev->reset_type = HNAE3_NONE_RESET;
11508 	hdev->reset_level = HNAE3_FUNC_RESET;
11509 	ae_dev->priv = hdev;
11510 
11511 	/* HW supprt 2 layer vlan */
11512 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11513 
11514 	mutex_init(&hdev->vport_lock);
11515 	spin_lock_init(&hdev->fd_rule_lock);
11516 	sema_init(&hdev->reset_sem, 1);
11517 
11518 	ret = hclge_pci_init(hdev);
11519 	if (ret)
11520 		goto out;
11521 
11522 	ret = hclge_devlink_init(hdev);
11523 	if (ret)
11524 		goto err_pci_uninit;
11525 
11526 	/* Firmware command queue initialize */
11527 	ret = hclge_cmd_queue_init(hdev);
11528 	if (ret)
11529 		goto err_devlink_uninit;
11530 
11531 	/* Firmware command initialize */
11532 	ret = hclge_cmd_init(hdev);
11533 	if (ret)
11534 		goto err_cmd_uninit;
11535 
11536 	ret  = hclge_clear_hw_resource(hdev);
11537 	if (ret)
11538 		goto err_cmd_uninit;
11539 
11540 	ret = hclge_get_cap(hdev);
11541 	if (ret)
11542 		goto err_cmd_uninit;
11543 
11544 	ret = hclge_query_dev_specs(hdev);
11545 	if (ret) {
11546 		dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11547 			ret);
11548 		goto err_cmd_uninit;
11549 	}
11550 
11551 	ret = hclge_configure(hdev);
11552 	if (ret) {
11553 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11554 		goto err_cmd_uninit;
11555 	}
11556 
11557 	ret = hclge_init_msi(hdev);
11558 	if (ret) {
11559 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11560 		goto err_cmd_uninit;
11561 	}
11562 
11563 	ret = hclge_misc_irq_init(hdev);
11564 	if (ret)
11565 		goto err_msi_uninit;
11566 
11567 	ret = hclge_alloc_tqps(hdev);
11568 	if (ret) {
11569 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11570 		goto err_msi_irq_uninit;
11571 	}
11572 
11573 	ret = hclge_alloc_vport(hdev);
11574 	if (ret)
11575 		goto err_msi_irq_uninit;
11576 
11577 	ret = hclge_map_tqp(hdev);
11578 	if (ret)
11579 		goto err_msi_irq_uninit;
11580 
11581 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11582 	    !hnae3_dev_phy_imp_supported(hdev)) {
11583 		ret = hclge_mac_mdio_config(hdev);
11584 		if (ret)
11585 			goto err_msi_irq_uninit;
11586 	}
11587 
11588 	ret = hclge_init_umv_space(hdev);
11589 	if (ret)
11590 		goto err_mdiobus_unreg;
11591 
11592 	ret = hclge_mac_init(hdev);
11593 	if (ret) {
11594 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11595 		goto err_mdiobus_unreg;
11596 	}
11597 
11598 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11599 	if (ret) {
11600 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11601 		goto err_mdiobus_unreg;
11602 	}
11603 
11604 	ret = hclge_config_gro(hdev);
11605 	if (ret)
11606 		goto err_mdiobus_unreg;
11607 
11608 	ret = hclge_init_vlan_config(hdev);
11609 	if (ret) {
11610 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11611 		goto err_mdiobus_unreg;
11612 	}
11613 
11614 	ret = hclge_tm_schd_init(hdev);
11615 	if (ret) {
11616 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11617 		goto err_mdiobus_unreg;
11618 	}
11619 
11620 	ret = hclge_rss_init_cfg(hdev);
11621 	if (ret) {
11622 		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11623 		goto err_mdiobus_unreg;
11624 	}
11625 
11626 	ret = hclge_rss_init_hw(hdev);
11627 	if (ret) {
11628 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11629 		goto err_mdiobus_unreg;
11630 	}
11631 
11632 	ret = init_mgr_tbl(hdev);
11633 	if (ret) {
11634 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11635 		goto err_mdiobus_unreg;
11636 	}
11637 
11638 	ret = hclge_init_fd_config(hdev);
11639 	if (ret) {
11640 		dev_err(&pdev->dev,
11641 			"fd table init fail, ret=%d\n", ret);
11642 		goto err_mdiobus_unreg;
11643 	}
11644 
11645 	ret = hclge_ptp_init(hdev);
11646 	if (ret)
11647 		goto err_mdiobus_unreg;
11648 
11649 	INIT_KFIFO(hdev->mac_tnl_log);
11650 
11651 	hclge_dcb_ops_set(hdev);
11652 
11653 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11654 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11655 
11656 	/* Setup affinity after service timer setup because add_timer_on
11657 	 * is called in affinity notify.
11658 	 */
11659 	hclge_misc_affinity_setup(hdev);
11660 
11661 	hclge_clear_all_event_cause(hdev);
11662 	hclge_clear_resetting_state(hdev);
11663 
11664 	/* Log and clear the hw errors those already occurred */
11665 	if (hnae3_dev_ras_imp_supported(hdev))
11666 		hclge_handle_occurred_error(hdev);
11667 	else
11668 		hclge_handle_all_hns_hw_errors(ae_dev);
11669 
11670 	/* request delayed reset for the error recovery because an immediate
11671 	 * global reset on a PF affecting pending initialization of other PFs
11672 	 */
11673 	if (ae_dev->hw_err_reset_req) {
11674 		enum hnae3_reset_type reset_level;
11675 
11676 		reset_level = hclge_get_reset_level(ae_dev,
11677 						    &ae_dev->hw_err_reset_req);
11678 		hclge_set_def_reset_request(ae_dev, reset_level);
11679 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11680 	}
11681 
11682 	hclge_init_rxd_adv_layout(hdev);
11683 
11684 	/* Enable MISC vector(vector0) */
11685 	hclge_enable_vector(&hdev->misc_vector, true);
11686 
11687 	hclge_state_init(hdev);
11688 	hdev->last_reset_time = jiffies;
11689 
11690 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11691 		 HCLGE_DRIVER_NAME);
11692 
11693 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11694 
11695 	return 0;
11696 
11697 err_mdiobus_unreg:
11698 	if (hdev->hw.mac.phydev)
11699 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
11700 err_msi_irq_uninit:
11701 	hclge_misc_irq_uninit(hdev);
11702 err_msi_uninit:
11703 	pci_free_irq_vectors(pdev);
11704 err_cmd_uninit:
11705 	hclge_cmd_uninit(hdev);
11706 err_devlink_uninit:
11707 	hclge_devlink_uninit(hdev);
11708 err_pci_uninit:
11709 	pcim_iounmap(pdev, hdev->hw.io_base);
11710 	pci_clear_master(pdev);
11711 	pci_release_regions(pdev);
11712 	pci_disable_device(pdev);
11713 out:
11714 	mutex_destroy(&hdev->vport_lock);
11715 	return ret;
11716 }
11717 
11718 static void hclge_stats_clear(struct hclge_dev *hdev)
11719 {
11720 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11721 }
11722 
11723 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11724 {
11725 	return hclge_config_switch_param(hdev, vf, enable,
11726 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11727 }
11728 
11729 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11730 {
11731 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11732 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
11733 					  enable, vf);
11734 }
11735 
11736 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11737 {
11738 	int ret;
11739 
11740 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11741 	if (ret) {
11742 		dev_err(&hdev->pdev->dev,
11743 			"Set vf %d mac spoof check %s failed, ret=%d\n",
11744 			vf, enable ? "on" : "off", ret);
11745 		return ret;
11746 	}
11747 
11748 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11749 	if (ret)
11750 		dev_err(&hdev->pdev->dev,
11751 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
11752 			vf, enable ? "on" : "off", ret);
11753 
11754 	return ret;
11755 }
11756 
11757 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11758 				 bool enable)
11759 {
11760 	struct hclge_vport *vport = hclge_get_vport(handle);
11761 	struct hclge_dev *hdev = vport->back;
11762 	u32 new_spoofchk = enable ? 1 : 0;
11763 	int ret;
11764 
11765 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11766 		return -EOPNOTSUPP;
11767 
11768 	vport = hclge_get_vf_vport(hdev, vf);
11769 	if (!vport)
11770 		return -EINVAL;
11771 
11772 	if (vport->vf_info.spoofchk == new_spoofchk)
11773 		return 0;
11774 
11775 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11776 		dev_warn(&hdev->pdev->dev,
11777 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11778 			 vf);
11779 	else if (enable && hclge_is_umv_space_full(vport, true))
11780 		dev_warn(&hdev->pdev->dev,
11781 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11782 			 vf);
11783 
11784 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11785 	if (ret)
11786 		return ret;
11787 
11788 	vport->vf_info.spoofchk = new_spoofchk;
11789 	return 0;
11790 }
11791 
11792 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11793 {
11794 	struct hclge_vport *vport = hdev->vport;
11795 	int ret;
11796 	int i;
11797 
11798 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11799 		return 0;
11800 
11801 	/* resume the vf spoof check state after reset */
11802 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11803 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11804 					       vport->vf_info.spoofchk);
11805 		if (ret)
11806 			return ret;
11807 
11808 		vport++;
11809 	}
11810 
11811 	return 0;
11812 }
11813 
11814 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11815 {
11816 	struct hclge_vport *vport = hclge_get_vport(handle);
11817 	struct hclge_dev *hdev = vport->back;
11818 	u32 new_trusted = enable ? 1 : 0;
11819 
11820 	vport = hclge_get_vf_vport(hdev, vf);
11821 	if (!vport)
11822 		return -EINVAL;
11823 
11824 	if (vport->vf_info.trusted == new_trusted)
11825 		return 0;
11826 
11827 	vport->vf_info.trusted = new_trusted;
11828 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11829 	hclge_task_schedule(hdev, 0);
11830 
11831 	return 0;
11832 }
11833 
11834 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11835 {
11836 	int ret;
11837 	int vf;
11838 
11839 	/* reset vf rate to default value */
11840 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11841 		struct hclge_vport *vport = &hdev->vport[vf];
11842 
11843 		vport->vf_info.max_tx_rate = 0;
11844 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11845 		if (ret)
11846 			dev_err(&hdev->pdev->dev,
11847 				"vf%d failed to reset to default, ret=%d\n",
11848 				vf - HCLGE_VF_VPORT_START_NUM, ret);
11849 	}
11850 }
11851 
11852 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11853 				     int min_tx_rate, int max_tx_rate)
11854 {
11855 	if (min_tx_rate != 0 ||
11856 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11857 		dev_err(&hdev->pdev->dev,
11858 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11859 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11860 		return -EINVAL;
11861 	}
11862 
11863 	return 0;
11864 }
11865 
11866 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11867 			     int min_tx_rate, int max_tx_rate, bool force)
11868 {
11869 	struct hclge_vport *vport = hclge_get_vport(handle);
11870 	struct hclge_dev *hdev = vport->back;
11871 	int ret;
11872 
11873 	ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11874 	if (ret)
11875 		return ret;
11876 
11877 	vport = hclge_get_vf_vport(hdev, vf);
11878 	if (!vport)
11879 		return -EINVAL;
11880 
11881 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11882 		return 0;
11883 
11884 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11885 	if (ret)
11886 		return ret;
11887 
11888 	vport->vf_info.max_tx_rate = max_tx_rate;
11889 
11890 	return 0;
11891 }
11892 
11893 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11894 {
11895 	struct hnae3_handle *handle = &hdev->vport->nic;
11896 	struct hclge_vport *vport;
11897 	int ret;
11898 	int vf;
11899 
11900 	/* resume the vf max_tx_rate after reset */
11901 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11902 		vport = hclge_get_vf_vport(hdev, vf);
11903 		if (!vport)
11904 			return -EINVAL;
11905 
11906 		/* zero means max rate, after reset, firmware already set it to
11907 		 * max rate, so just continue.
11908 		 */
11909 		if (!vport->vf_info.max_tx_rate)
11910 			continue;
11911 
11912 		ret = hclge_set_vf_rate(handle, vf, 0,
11913 					vport->vf_info.max_tx_rate, true);
11914 		if (ret) {
11915 			dev_err(&hdev->pdev->dev,
11916 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
11917 				vf, vport->vf_info.max_tx_rate, ret);
11918 			return ret;
11919 		}
11920 	}
11921 
11922 	return 0;
11923 }
11924 
11925 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11926 {
11927 	struct hclge_vport *vport = hdev->vport;
11928 	int i;
11929 
11930 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11931 		hclge_vport_stop(vport);
11932 		vport++;
11933 	}
11934 }
11935 
11936 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11937 {
11938 	struct hclge_dev *hdev = ae_dev->priv;
11939 	struct pci_dev *pdev = ae_dev->pdev;
11940 	int ret;
11941 
11942 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11943 
11944 	hclge_stats_clear(hdev);
11945 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11946 	 * so here should not clean table in memory.
11947 	 */
11948 	if (hdev->reset_type == HNAE3_IMP_RESET ||
11949 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
11950 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11951 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11952 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11953 		hclge_reset_umv_space(hdev);
11954 	}
11955 
11956 	ret = hclge_cmd_init(hdev);
11957 	if (ret) {
11958 		dev_err(&pdev->dev, "Cmd queue init failed\n");
11959 		return ret;
11960 	}
11961 
11962 	ret = hclge_map_tqp(hdev);
11963 	if (ret) {
11964 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11965 		return ret;
11966 	}
11967 
11968 	ret = hclge_mac_init(hdev);
11969 	if (ret) {
11970 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11971 		return ret;
11972 	}
11973 
11974 	ret = hclge_tp_port_init(hdev);
11975 	if (ret) {
11976 		dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11977 			ret);
11978 		return ret;
11979 	}
11980 
11981 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11982 	if (ret) {
11983 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11984 		return ret;
11985 	}
11986 
11987 	ret = hclge_config_gro(hdev);
11988 	if (ret)
11989 		return ret;
11990 
11991 	ret = hclge_init_vlan_config(hdev);
11992 	if (ret) {
11993 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11994 		return ret;
11995 	}
11996 
11997 	ret = hclge_tm_init_hw(hdev, true);
11998 	if (ret) {
11999 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
12000 		return ret;
12001 	}
12002 
12003 	ret = hclge_rss_init_hw(hdev);
12004 	if (ret) {
12005 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
12006 		return ret;
12007 	}
12008 
12009 	ret = init_mgr_tbl(hdev);
12010 	if (ret) {
12011 		dev_err(&pdev->dev,
12012 			"failed to reinit manager table, ret = %d\n", ret);
12013 		return ret;
12014 	}
12015 
12016 	ret = hclge_init_fd_config(hdev);
12017 	if (ret) {
12018 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12019 		return ret;
12020 	}
12021 
12022 	ret = hclge_ptp_init(hdev);
12023 	if (ret)
12024 		return ret;
12025 
12026 	/* Log and clear the hw errors those already occurred */
12027 	if (hnae3_dev_ras_imp_supported(hdev))
12028 		hclge_handle_occurred_error(hdev);
12029 	else
12030 		hclge_handle_all_hns_hw_errors(ae_dev);
12031 
12032 	/* Re-enable the hw error interrupts because
12033 	 * the interrupts get disabled on global reset.
12034 	 */
12035 	ret = hclge_config_nic_hw_error(hdev, true);
12036 	if (ret) {
12037 		dev_err(&pdev->dev,
12038 			"fail(%d) to re-enable NIC hw error interrupts\n",
12039 			ret);
12040 		return ret;
12041 	}
12042 
12043 	if (hdev->roce_client) {
12044 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
12045 		if (ret) {
12046 			dev_err(&pdev->dev,
12047 				"fail(%d) to re-enable roce ras interrupts\n",
12048 				ret);
12049 			return ret;
12050 		}
12051 	}
12052 
12053 	hclge_reset_vport_state(hdev);
12054 	ret = hclge_reset_vport_spoofchk(hdev);
12055 	if (ret)
12056 		return ret;
12057 
12058 	ret = hclge_resume_vf_rate(hdev);
12059 	if (ret)
12060 		return ret;
12061 
12062 	hclge_init_rxd_adv_layout(hdev);
12063 
12064 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12065 		 HCLGE_DRIVER_NAME);
12066 
12067 	return 0;
12068 }
12069 
12070 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12071 {
12072 	struct hclge_dev *hdev = ae_dev->priv;
12073 	struct hclge_mac *mac = &hdev->hw.mac;
12074 
12075 	hclge_reset_vf_rate(hdev);
12076 	hclge_clear_vf_vlan(hdev);
12077 	hclge_misc_affinity_teardown(hdev);
12078 	hclge_state_uninit(hdev);
12079 	hclge_ptp_uninit(hdev);
12080 	hclge_uninit_rxd_adv_layout(hdev);
12081 	hclge_uninit_mac_table(hdev);
12082 	hclge_del_all_fd_entries(hdev);
12083 
12084 	if (mac->phydev)
12085 		mdiobus_unregister(mac->mdio_bus);
12086 
12087 	/* Disable MISC vector(vector0) */
12088 	hclge_enable_vector(&hdev->misc_vector, false);
12089 	synchronize_irq(hdev->misc_vector.vector_irq);
12090 
12091 	/* Disable all hw interrupts */
12092 	hclge_config_mac_tnl_int(hdev, false);
12093 	hclge_config_nic_hw_error(hdev, false);
12094 	hclge_config_rocee_ras_interrupt(hdev, false);
12095 
12096 	hclge_cmd_uninit(hdev);
12097 	hclge_misc_irq_uninit(hdev);
12098 	hclge_devlink_uninit(hdev);
12099 	hclge_pci_uninit(hdev);
12100 	mutex_destroy(&hdev->vport_lock);
12101 	hclge_uninit_vport_vlan_table(hdev);
12102 	ae_dev->priv = NULL;
12103 }
12104 
12105 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12106 {
12107 	struct hclge_vport *vport = hclge_get_vport(handle);
12108 	struct hclge_dev *hdev = vport->back;
12109 
12110 	return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12111 }
12112 
12113 static void hclge_get_channels(struct hnae3_handle *handle,
12114 			       struct ethtool_channels *ch)
12115 {
12116 	ch->max_combined = hclge_get_max_channels(handle);
12117 	ch->other_count = 1;
12118 	ch->max_other = 1;
12119 	ch->combined_count = handle->kinfo.rss_size;
12120 }
12121 
12122 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12123 					u16 *alloc_tqps, u16 *max_rss_size)
12124 {
12125 	struct hclge_vport *vport = hclge_get_vport(handle);
12126 	struct hclge_dev *hdev = vport->back;
12127 
12128 	*alloc_tqps = vport->alloc_tqps;
12129 	*max_rss_size = hdev->pf_rss_size_max;
12130 }
12131 
12132 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12133 			      bool rxfh_configured)
12134 {
12135 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12136 	struct hclge_vport *vport = hclge_get_vport(handle);
12137 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12138 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12139 	struct hclge_dev *hdev = vport->back;
12140 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12141 	u16 cur_rss_size = kinfo->rss_size;
12142 	u16 cur_tqps = kinfo->num_tqps;
12143 	u16 tc_valid[HCLGE_MAX_TC_NUM];
12144 	u16 roundup_size;
12145 	u32 *rss_indir;
12146 	unsigned int i;
12147 	int ret;
12148 
12149 	kinfo->req_rss_size = new_tqps_num;
12150 
12151 	ret = hclge_tm_vport_map_update(hdev);
12152 	if (ret) {
12153 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12154 		return ret;
12155 	}
12156 
12157 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
12158 	roundup_size = ilog2(roundup_size);
12159 	/* Set the RSS TC mode according to the new RSS size */
12160 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12161 		tc_valid[i] = 0;
12162 
12163 		if (!(hdev->hw_tc_map & BIT(i)))
12164 			continue;
12165 
12166 		tc_valid[i] = 1;
12167 		tc_size[i] = roundup_size;
12168 		tc_offset[i] = kinfo->rss_size * i;
12169 	}
12170 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12171 	if (ret)
12172 		return ret;
12173 
12174 	/* RSS indirection table has been configured by user */
12175 	if (rxfh_configured)
12176 		goto out;
12177 
12178 	/* Reinitializes the rss indirect table according to the new RSS size */
12179 	rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12180 			    GFP_KERNEL);
12181 	if (!rss_indir)
12182 		return -ENOMEM;
12183 
12184 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12185 		rss_indir[i] = i % kinfo->rss_size;
12186 
12187 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12188 	if (ret)
12189 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12190 			ret);
12191 
12192 	kfree(rss_indir);
12193 
12194 out:
12195 	if (!ret)
12196 		dev_info(&hdev->pdev->dev,
12197 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12198 			 cur_rss_size, kinfo->rss_size,
12199 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12200 
12201 	return ret;
12202 }
12203 
12204 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12205 			      u32 *regs_num_64_bit)
12206 {
12207 	struct hclge_desc desc;
12208 	u32 total_num;
12209 	int ret;
12210 
12211 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12212 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12213 	if (ret) {
12214 		dev_err(&hdev->pdev->dev,
12215 			"Query register number cmd failed, ret = %d.\n", ret);
12216 		return ret;
12217 	}
12218 
12219 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
12220 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
12221 
12222 	total_num = *regs_num_32_bit + *regs_num_64_bit;
12223 	if (!total_num)
12224 		return -EINVAL;
12225 
12226 	return 0;
12227 }
12228 
12229 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12230 				 void *data)
12231 {
12232 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12233 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12234 
12235 	struct hclge_desc *desc;
12236 	u32 *reg_val = data;
12237 	__le32 *desc_data;
12238 	int nodata_num;
12239 	int cmd_num;
12240 	int i, k, n;
12241 	int ret;
12242 
12243 	if (regs_num == 0)
12244 		return 0;
12245 
12246 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12247 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12248 			       HCLGE_32_BIT_REG_RTN_DATANUM);
12249 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12250 	if (!desc)
12251 		return -ENOMEM;
12252 
12253 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12254 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12255 	if (ret) {
12256 		dev_err(&hdev->pdev->dev,
12257 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
12258 		kfree(desc);
12259 		return ret;
12260 	}
12261 
12262 	for (i = 0; i < cmd_num; i++) {
12263 		if (i == 0) {
12264 			desc_data = (__le32 *)(&desc[i].data[0]);
12265 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12266 		} else {
12267 			desc_data = (__le32 *)(&desc[i]);
12268 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
12269 		}
12270 		for (k = 0; k < n; k++) {
12271 			*reg_val++ = le32_to_cpu(*desc_data++);
12272 
12273 			regs_num--;
12274 			if (!regs_num)
12275 				break;
12276 		}
12277 	}
12278 
12279 	kfree(desc);
12280 	return 0;
12281 }
12282 
12283 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12284 				 void *data)
12285 {
12286 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12287 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12288 
12289 	struct hclge_desc *desc;
12290 	u64 *reg_val = data;
12291 	__le64 *desc_data;
12292 	int nodata_len;
12293 	int cmd_num;
12294 	int i, k, n;
12295 	int ret;
12296 
12297 	if (regs_num == 0)
12298 		return 0;
12299 
12300 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12301 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12302 			       HCLGE_64_BIT_REG_RTN_DATANUM);
12303 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12304 	if (!desc)
12305 		return -ENOMEM;
12306 
12307 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12308 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12309 	if (ret) {
12310 		dev_err(&hdev->pdev->dev,
12311 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
12312 		kfree(desc);
12313 		return ret;
12314 	}
12315 
12316 	for (i = 0; i < cmd_num; i++) {
12317 		if (i == 0) {
12318 			desc_data = (__le64 *)(&desc[i].data[0]);
12319 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12320 		} else {
12321 			desc_data = (__le64 *)(&desc[i]);
12322 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
12323 		}
12324 		for (k = 0; k < n; k++) {
12325 			*reg_val++ = le64_to_cpu(*desc_data++);
12326 
12327 			regs_num--;
12328 			if (!regs_num)
12329 				break;
12330 		}
12331 	}
12332 
12333 	kfree(desc);
12334 	return 0;
12335 }
12336 
12337 #define MAX_SEPARATE_NUM	4
12338 #define SEPARATOR_VALUE		0xFDFCFBFA
12339 #define REG_NUM_PER_LINE	4
12340 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
12341 #define REG_SEPARATOR_LINE	1
12342 #define REG_NUM_REMAIN_MASK	3
12343 
12344 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12345 {
12346 	int i;
12347 
12348 	/* initialize command BD except the last one */
12349 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12350 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12351 					   true);
12352 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12353 	}
12354 
12355 	/* initialize the last command BD */
12356 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12357 
12358 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12359 }
12360 
12361 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12362 				    int *bd_num_list,
12363 				    u32 type_num)
12364 {
12365 	u32 entries_per_desc, desc_index, index, offset, i;
12366 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12367 	int ret;
12368 
12369 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
12370 	if (ret) {
12371 		dev_err(&hdev->pdev->dev,
12372 			"Get dfx bd num fail, status is %d.\n", ret);
12373 		return ret;
12374 	}
12375 
12376 	entries_per_desc = ARRAY_SIZE(desc[0].data);
12377 	for (i = 0; i < type_num; i++) {
12378 		offset = hclge_dfx_bd_offset_list[i];
12379 		index = offset % entries_per_desc;
12380 		desc_index = offset / entries_per_desc;
12381 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12382 	}
12383 
12384 	return ret;
12385 }
12386 
12387 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12388 				  struct hclge_desc *desc_src, int bd_num,
12389 				  enum hclge_opcode_type cmd)
12390 {
12391 	struct hclge_desc *desc = desc_src;
12392 	int i, ret;
12393 
12394 	hclge_cmd_setup_basic_desc(desc, cmd, true);
12395 	for (i = 0; i < bd_num - 1; i++) {
12396 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12397 		desc++;
12398 		hclge_cmd_setup_basic_desc(desc, cmd, true);
12399 	}
12400 
12401 	desc = desc_src;
12402 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12403 	if (ret)
12404 		dev_err(&hdev->pdev->dev,
12405 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12406 			cmd, ret);
12407 
12408 	return ret;
12409 }
12410 
12411 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12412 				    void *data)
12413 {
12414 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12415 	struct hclge_desc *desc = desc_src;
12416 	u32 *reg = data;
12417 
12418 	entries_per_desc = ARRAY_SIZE(desc->data);
12419 	reg_num = entries_per_desc * bd_num;
12420 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12421 	for (i = 0; i < reg_num; i++) {
12422 		index = i % entries_per_desc;
12423 		desc_index = i / entries_per_desc;
12424 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
12425 	}
12426 	for (i = 0; i < separator_num; i++)
12427 		*reg++ = SEPARATOR_VALUE;
12428 
12429 	return reg_num + separator_num;
12430 }
12431 
12432 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12433 {
12434 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12435 	int data_len_per_desc, bd_num, i;
12436 	int *bd_num_list;
12437 	u32 data_len;
12438 	int ret;
12439 
12440 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12441 	if (!bd_num_list)
12442 		return -ENOMEM;
12443 
12444 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12445 	if (ret) {
12446 		dev_err(&hdev->pdev->dev,
12447 			"Get dfx reg bd num fail, status is %d.\n", ret);
12448 		goto out;
12449 	}
12450 
12451 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
12452 	*len = 0;
12453 	for (i = 0; i < dfx_reg_type_num; i++) {
12454 		bd_num = bd_num_list[i];
12455 		data_len = data_len_per_desc * bd_num;
12456 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12457 	}
12458 
12459 out:
12460 	kfree(bd_num_list);
12461 	return ret;
12462 }
12463 
12464 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12465 {
12466 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12467 	int bd_num, bd_num_max, buf_len, i;
12468 	struct hclge_desc *desc_src;
12469 	int *bd_num_list;
12470 	u32 *reg = data;
12471 	int ret;
12472 
12473 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12474 	if (!bd_num_list)
12475 		return -ENOMEM;
12476 
12477 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12478 	if (ret) {
12479 		dev_err(&hdev->pdev->dev,
12480 			"Get dfx reg bd num fail, status is %d.\n", ret);
12481 		goto out;
12482 	}
12483 
12484 	bd_num_max = bd_num_list[0];
12485 	for (i = 1; i < dfx_reg_type_num; i++)
12486 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12487 
12488 	buf_len = sizeof(*desc_src) * bd_num_max;
12489 	desc_src = kzalloc(buf_len, GFP_KERNEL);
12490 	if (!desc_src) {
12491 		ret = -ENOMEM;
12492 		goto out;
12493 	}
12494 
12495 	for (i = 0; i < dfx_reg_type_num; i++) {
12496 		bd_num = bd_num_list[i];
12497 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12498 					     hclge_dfx_reg_opcode_list[i]);
12499 		if (ret) {
12500 			dev_err(&hdev->pdev->dev,
12501 				"Get dfx reg fail, status is %d.\n", ret);
12502 			break;
12503 		}
12504 
12505 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12506 	}
12507 
12508 	kfree(desc_src);
12509 out:
12510 	kfree(bd_num_list);
12511 	return ret;
12512 }
12513 
12514 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12515 			      struct hnae3_knic_private_info *kinfo)
12516 {
12517 #define HCLGE_RING_REG_OFFSET		0x200
12518 #define HCLGE_RING_INT_REG_OFFSET	0x4
12519 
12520 	int i, j, reg_num, separator_num;
12521 	int data_num_sum;
12522 	u32 *reg = data;
12523 
12524 	/* fetching per-PF registers valus from PF PCIe register space */
12525 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12526 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12527 	for (i = 0; i < reg_num; i++)
12528 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12529 	for (i = 0; i < separator_num; i++)
12530 		*reg++ = SEPARATOR_VALUE;
12531 	data_num_sum = reg_num + separator_num;
12532 
12533 	reg_num = ARRAY_SIZE(common_reg_addr_list);
12534 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12535 	for (i = 0; i < reg_num; i++)
12536 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12537 	for (i = 0; i < separator_num; i++)
12538 		*reg++ = SEPARATOR_VALUE;
12539 	data_num_sum += reg_num + separator_num;
12540 
12541 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
12542 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12543 	for (j = 0; j < kinfo->num_tqps; j++) {
12544 		for (i = 0; i < reg_num; i++)
12545 			*reg++ = hclge_read_dev(&hdev->hw,
12546 						ring_reg_addr_list[i] +
12547 						HCLGE_RING_REG_OFFSET * j);
12548 		for (i = 0; i < separator_num; i++)
12549 			*reg++ = SEPARATOR_VALUE;
12550 	}
12551 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12552 
12553 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12554 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12555 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
12556 		for (i = 0; i < reg_num; i++)
12557 			*reg++ = hclge_read_dev(&hdev->hw,
12558 						tqp_intr_reg_addr_list[i] +
12559 						HCLGE_RING_INT_REG_OFFSET * j);
12560 		for (i = 0; i < separator_num; i++)
12561 			*reg++ = SEPARATOR_VALUE;
12562 	}
12563 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12564 
12565 	return data_num_sum;
12566 }
12567 
12568 static int hclge_get_regs_len(struct hnae3_handle *handle)
12569 {
12570 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12571 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12572 	struct hclge_vport *vport = hclge_get_vport(handle);
12573 	struct hclge_dev *hdev = vport->back;
12574 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12575 	int regs_lines_32_bit, regs_lines_64_bit;
12576 	int ret;
12577 
12578 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12579 	if (ret) {
12580 		dev_err(&hdev->pdev->dev,
12581 			"Get register number failed, ret = %d.\n", ret);
12582 		return ret;
12583 	}
12584 
12585 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12586 	if (ret) {
12587 		dev_err(&hdev->pdev->dev,
12588 			"Get dfx reg len failed, ret = %d.\n", ret);
12589 		return ret;
12590 	}
12591 
12592 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12593 		REG_SEPARATOR_LINE;
12594 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12595 		REG_SEPARATOR_LINE;
12596 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12597 		REG_SEPARATOR_LINE;
12598 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12599 		REG_SEPARATOR_LINE;
12600 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12601 		REG_SEPARATOR_LINE;
12602 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12603 		REG_SEPARATOR_LINE;
12604 
12605 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12606 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12607 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12608 }
12609 
12610 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12611 			   void *data)
12612 {
12613 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12614 	struct hclge_vport *vport = hclge_get_vport(handle);
12615 	struct hclge_dev *hdev = vport->back;
12616 	u32 regs_num_32_bit, regs_num_64_bit;
12617 	int i, reg_num, separator_num, ret;
12618 	u32 *reg = data;
12619 
12620 	*version = hdev->fw_version;
12621 
12622 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12623 	if (ret) {
12624 		dev_err(&hdev->pdev->dev,
12625 			"Get register number failed, ret = %d.\n", ret);
12626 		return;
12627 	}
12628 
12629 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12630 
12631 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12632 	if (ret) {
12633 		dev_err(&hdev->pdev->dev,
12634 			"Get 32 bit register failed, ret = %d.\n", ret);
12635 		return;
12636 	}
12637 	reg_num = regs_num_32_bit;
12638 	reg += reg_num;
12639 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12640 	for (i = 0; i < separator_num; i++)
12641 		*reg++ = SEPARATOR_VALUE;
12642 
12643 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12644 	if (ret) {
12645 		dev_err(&hdev->pdev->dev,
12646 			"Get 64 bit register failed, ret = %d.\n", ret);
12647 		return;
12648 	}
12649 	reg_num = regs_num_64_bit * 2;
12650 	reg += reg_num;
12651 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12652 	for (i = 0; i < separator_num; i++)
12653 		*reg++ = SEPARATOR_VALUE;
12654 
12655 	ret = hclge_get_dfx_reg(hdev, reg);
12656 	if (ret)
12657 		dev_err(&hdev->pdev->dev,
12658 			"Get dfx register failed, ret = %d.\n", ret);
12659 }
12660 
12661 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12662 {
12663 	struct hclge_set_led_state_cmd *req;
12664 	struct hclge_desc desc;
12665 	int ret;
12666 
12667 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12668 
12669 	req = (struct hclge_set_led_state_cmd *)desc.data;
12670 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12671 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12672 
12673 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12674 	if (ret)
12675 		dev_err(&hdev->pdev->dev,
12676 			"Send set led state cmd error, ret =%d\n", ret);
12677 
12678 	return ret;
12679 }
12680 
12681 enum hclge_led_status {
12682 	HCLGE_LED_OFF,
12683 	HCLGE_LED_ON,
12684 	HCLGE_LED_NO_CHANGE = 0xFF,
12685 };
12686 
12687 static int hclge_set_led_id(struct hnae3_handle *handle,
12688 			    enum ethtool_phys_id_state status)
12689 {
12690 	struct hclge_vport *vport = hclge_get_vport(handle);
12691 	struct hclge_dev *hdev = vport->back;
12692 
12693 	switch (status) {
12694 	case ETHTOOL_ID_ACTIVE:
12695 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
12696 	case ETHTOOL_ID_INACTIVE:
12697 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12698 	default:
12699 		return -EINVAL;
12700 	}
12701 }
12702 
12703 static void hclge_get_link_mode(struct hnae3_handle *handle,
12704 				unsigned long *supported,
12705 				unsigned long *advertising)
12706 {
12707 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12708 	struct hclge_vport *vport = hclge_get_vport(handle);
12709 	struct hclge_dev *hdev = vport->back;
12710 	unsigned int idx = 0;
12711 
12712 	for (; idx < size; idx++) {
12713 		supported[idx] = hdev->hw.mac.supported[idx];
12714 		advertising[idx] = hdev->hw.mac.advertising[idx];
12715 	}
12716 }
12717 
12718 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12719 {
12720 	struct hclge_vport *vport = hclge_get_vport(handle);
12721 	struct hclge_dev *hdev = vport->back;
12722 	bool gro_en_old = hdev->gro_en;
12723 	int ret;
12724 
12725 	hdev->gro_en = enable;
12726 	ret = hclge_config_gro(hdev);
12727 	if (ret)
12728 		hdev->gro_en = gro_en_old;
12729 
12730 	return ret;
12731 }
12732 
12733 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12734 {
12735 	struct hclge_vport *vport = &hdev->vport[0];
12736 	struct hnae3_handle *handle = &vport->nic;
12737 	u8 tmp_flags;
12738 	int ret;
12739 	u16 i;
12740 
12741 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12742 		set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12743 		vport->last_promisc_flags = vport->overflow_promisc_flags;
12744 	}
12745 
12746 	if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12747 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12748 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12749 					     tmp_flags & HNAE3_MPE);
12750 		if (!ret) {
12751 			clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12752 				  &vport->state);
12753 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12754 				&vport->state);
12755 		}
12756 	}
12757 
12758 	for (i = 1; i < hdev->num_alloc_vport; i++) {
12759 		bool uc_en = false;
12760 		bool mc_en = false;
12761 		bool bc_en;
12762 
12763 		vport = &hdev->vport[i];
12764 
12765 		if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12766 					&vport->state))
12767 			continue;
12768 
12769 		if (vport->vf_info.trusted) {
12770 			uc_en = vport->vf_info.request_uc_en > 0;
12771 			mc_en = vport->vf_info.request_mc_en > 0;
12772 		}
12773 		bc_en = vport->vf_info.request_bc_en > 0;
12774 
12775 		ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12776 						 mc_en, bc_en);
12777 		if (ret) {
12778 			set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12779 				&vport->state);
12780 			return;
12781 		}
12782 		hclge_set_vport_vlan_fltr_change(vport);
12783 	}
12784 }
12785 
12786 static bool hclge_module_existed(struct hclge_dev *hdev)
12787 {
12788 	struct hclge_desc desc;
12789 	u32 existed;
12790 	int ret;
12791 
12792 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12793 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12794 	if (ret) {
12795 		dev_err(&hdev->pdev->dev,
12796 			"failed to get SFP exist state, ret = %d\n", ret);
12797 		return false;
12798 	}
12799 
12800 	existed = le32_to_cpu(desc.data[0]);
12801 
12802 	return existed != 0;
12803 }
12804 
12805 /* need 6 bds(total 140 bytes) in one reading
12806  * return the number of bytes actually read, 0 means read failed.
12807  */
12808 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12809 				     u32 len, u8 *data)
12810 {
12811 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12812 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12813 	u16 read_len;
12814 	u16 copy_len;
12815 	int ret;
12816 	int i;
12817 
12818 	/* setup all 6 bds to read module eeprom info. */
12819 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12820 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12821 					   true);
12822 
12823 		/* bd0~bd4 need next flag */
12824 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12825 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12826 	}
12827 
12828 	/* setup bd0, this bd contains offset and read length. */
12829 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12830 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12831 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12832 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
12833 
12834 	ret = hclge_cmd_send(&hdev->hw, desc, i);
12835 	if (ret) {
12836 		dev_err(&hdev->pdev->dev,
12837 			"failed to get SFP eeprom info, ret = %d\n", ret);
12838 		return 0;
12839 	}
12840 
12841 	/* copy sfp info from bd0 to out buffer. */
12842 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12843 	memcpy(data, sfp_info_bd0->data, copy_len);
12844 	read_len = copy_len;
12845 
12846 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
12847 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12848 		if (read_len >= len)
12849 			return read_len;
12850 
12851 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12852 		memcpy(data + read_len, desc[i].data, copy_len);
12853 		read_len += copy_len;
12854 	}
12855 
12856 	return read_len;
12857 }
12858 
12859 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12860 				   u32 len, u8 *data)
12861 {
12862 	struct hclge_vport *vport = hclge_get_vport(handle);
12863 	struct hclge_dev *hdev = vport->back;
12864 	u32 read_len = 0;
12865 	u16 data_len;
12866 
12867 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12868 		return -EOPNOTSUPP;
12869 
12870 	if (!hclge_module_existed(hdev))
12871 		return -ENXIO;
12872 
12873 	while (read_len < len) {
12874 		data_len = hclge_get_sfp_eeprom_info(hdev,
12875 						     offset + read_len,
12876 						     len - read_len,
12877 						     data + read_len);
12878 		if (!data_len)
12879 			return -EIO;
12880 
12881 		read_len += data_len;
12882 	}
12883 
12884 	return 0;
12885 }
12886 
12887 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
12888 					 u32 *status_code)
12889 {
12890 	struct hclge_vport *vport = hclge_get_vport(handle);
12891 	struct hclge_dev *hdev = vport->back;
12892 	struct hclge_desc desc;
12893 	int ret;
12894 
12895 	if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
12896 		return -EOPNOTSUPP;
12897 
12898 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
12899 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12900 	if (ret) {
12901 		dev_err(&hdev->pdev->dev,
12902 			"failed to query link diagnosis info, ret = %d\n", ret);
12903 		return ret;
12904 	}
12905 
12906 	*status_code = le32_to_cpu(desc.data[0]);
12907 	return 0;
12908 }
12909 
12910 static const struct hnae3_ae_ops hclge_ops = {
12911 	.init_ae_dev = hclge_init_ae_dev,
12912 	.uninit_ae_dev = hclge_uninit_ae_dev,
12913 	.reset_prepare = hclge_reset_prepare_general,
12914 	.reset_done = hclge_reset_done,
12915 	.init_client_instance = hclge_init_client_instance,
12916 	.uninit_client_instance = hclge_uninit_client_instance,
12917 	.map_ring_to_vector = hclge_map_ring_to_vector,
12918 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12919 	.get_vector = hclge_get_vector,
12920 	.put_vector = hclge_put_vector,
12921 	.set_promisc_mode = hclge_set_promisc_mode,
12922 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
12923 	.set_loopback = hclge_set_loopback,
12924 	.start = hclge_ae_start,
12925 	.stop = hclge_ae_stop,
12926 	.client_start = hclge_client_start,
12927 	.client_stop = hclge_client_stop,
12928 	.get_status = hclge_get_status,
12929 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
12930 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12931 	.get_media_type = hclge_get_media_type,
12932 	.check_port_speed = hclge_check_port_speed,
12933 	.get_fec = hclge_get_fec,
12934 	.set_fec = hclge_set_fec,
12935 	.get_rss_key_size = hclge_get_rss_key_size,
12936 	.get_rss = hclge_get_rss,
12937 	.set_rss = hclge_set_rss,
12938 	.set_rss_tuple = hclge_set_rss_tuple,
12939 	.get_rss_tuple = hclge_get_rss_tuple,
12940 	.get_tc_size = hclge_get_tc_size,
12941 	.get_mac_addr = hclge_get_mac_addr,
12942 	.set_mac_addr = hclge_set_mac_addr,
12943 	.do_ioctl = hclge_do_ioctl,
12944 	.add_uc_addr = hclge_add_uc_addr,
12945 	.rm_uc_addr = hclge_rm_uc_addr,
12946 	.add_mc_addr = hclge_add_mc_addr,
12947 	.rm_mc_addr = hclge_rm_mc_addr,
12948 	.set_autoneg = hclge_set_autoneg,
12949 	.get_autoneg = hclge_get_autoneg,
12950 	.restart_autoneg = hclge_restart_autoneg,
12951 	.halt_autoneg = hclge_halt_autoneg,
12952 	.get_pauseparam = hclge_get_pauseparam,
12953 	.set_pauseparam = hclge_set_pauseparam,
12954 	.set_mtu = hclge_set_mtu,
12955 	.reset_queue = hclge_reset_tqp,
12956 	.get_stats = hclge_get_stats,
12957 	.get_mac_stats = hclge_get_mac_stat,
12958 	.update_stats = hclge_update_stats,
12959 	.get_strings = hclge_get_strings,
12960 	.get_sset_count = hclge_get_sset_count,
12961 	.get_fw_version = hclge_get_fw_version,
12962 	.get_mdix_mode = hclge_get_mdix_mode,
12963 	.enable_vlan_filter = hclge_enable_vlan_filter,
12964 	.set_vlan_filter = hclge_set_vlan_filter,
12965 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12966 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12967 	.reset_event = hclge_reset_event,
12968 	.get_reset_level = hclge_get_reset_level,
12969 	.set_default_reset_request = hclge_set_def_reset_request,
12970 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12971 	.set_channels = hclge_set_channels,
12972 	.get_channels = hclge_get_channels,
12973 	.get_regs_len = hclge_get_regs_len,
12974 	.get_regs = hclge_get_regs,
12975 	.set_led_id = hclge_set_led_id,
12976 	.get_link_mode = hclge_get_link_mode,
12977 	.add_fd_entry = hclge_add_fd_entry,
12978 	.del_fd_entry = hclge_del_fd_entry,
12979 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12980 	.get_fd_rule_info = hclge_get_fd_rule_info,
12981 	.get_fd_all_rules = hclge_get_all_rules,
12982 	.enable_fd = hclge_enable_fd,
12983 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
12984 	.dbg_read_cmd = hclge_dbg_read_cmd,
12985 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
12986 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
12987 	.ae_dev_resetting = hclge_ae_dev_resetting,
12988 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12989 	.set_gro_en = hclge_gro_en,
12990 	.get_global_queue_id = hclge_covert_handle_qid_global,
12991 	.set_timer_task = hclge_set_timer_task,
12992 	.mac_connect_phy = hclge_mac_connect_phy,
12993 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
12994 	.get_vf_config = hclge_get_vf_config,
12995 	.set_vf_link_state = hclge_set_vf_link_state,
12996 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
12997 	.set_vf_trust = hclge_set_vf_trust,
12998 	.set_vf_rate = hclge_set_vf_rate,
12999 	.set_vf_mac = hclge_set_vf_mac,
13000 	.get_module_eeprom = hclge_get_module_eeprom,
13001 	.get_cmdq_stat = hclge_get_cmdq_stat,
13002 	.add_cls_flower = hclge_add_cls_flower,
13003 	.del_cls_flower = hclge_del_cls_flower,
13004 	.cls_flower_active = hclge_is_cls_flower_active,
13005 	.get_phy_link_ksettings = hclge_get_phy_link_ksettings,
13006 	.set_phy_link_ksettings = hclge_set_phy_link_ksettings,
13007 	.set_tx_hwts_info = hclge_ptp_set_tx_info,
13008 	.get_rx_hwts = hclge_ptp_get_rx_hwts,
13009 	.get_ts_info = hclge_ptp_get_ts_info,
13010 	.get_link_diagnosis_info = hclge_get_link_diagnosis_info,
13011 };
13012 
13013 static struct hnae3_ae_algo ae_algo = {
13014 	.ops = &hclge_ops,
13015 	.pdev_id_table = ae_algo_pci_tbl,
13016 };
13017 
13018 static int hclge_init(void)
13019 {
13020 	pr_info("%s is initializing\n", HCLGE_NAME);
13021 
13022 	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
13023 	if (!hclge_wq) {
13024 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
13025 		return -ENOMEM;
13026 	}
13027 
13028 	hnae3_register_ae_algo(&ae_algo);
13029 
13030 	return 0;
13031 }
13032 
13033 static void hclge_exit(void)
13034 {
13035 	hnae3_unregister_ae_algo(&ae_algo);
13036 	destroy_workqueue(hclge_wq);
13037 }
13038 module_init(hclge_init);
13039 module_exit(hclge_exit);
13040 
13041 MODULE_LICENSE("GPL");
13042 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
13043 MODULE_DESCRIPTION("HCLGE Driver");
13044 MODULE_VERSION(HCLGE_MOD_VERSION);
13045