1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256U
31 #define HCLGE_BUF_MUL_BY	2
32 #define HCLGE_BUF_DIV_BY	2
33 #define NEED_RESERVE_TC_NUM	2
34 #define BUF_MAX_PERCENT		100
35 #define BUF_RESERVE_PERCENT	90
36 
37 #define HCLGE_RESET_MAX_FAIL_CNT	5
38 #define HCLGE_RESET_SYNC_TIME		100
39 #define HCLGE_PF_RESET_SYNC_TIME	20
40 #define HCLGE_PF_RESET_SYNC_CNT		1500
41 
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55 
56 #define HCLGE_LINK_STATUS_MS	10
57 
58 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
59 static int hclge_init_vlan_config(struct hclge_dev *hdev);
60 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
61 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
62 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
63 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
64 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
65 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
66 						   unsigned long *addr);
67 static int hclge_set_default_loopback(struct hclge_dev *hdev);
68 
69 static void hclge_sync_mac_table(struct hclge_dev *hdev);
70 static void hclge_restore_hw_table(struct hclge_dev *hdev);
71 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
72 
73 static struct hnae3_ae_algo ae_algo;
74 
75 static struct workqueue_struct *hclge_wq;
76 
77 static const struct pci_device_id ae_algo_pci_tbl[] = {
78 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
79 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
86 	/* required last entry */
87 	{0, }
88 };
89 
90 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
91 
92 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
93 					 HCLGE_CMDQ_TX_ADDR_H_REG,
94 					 HCLGE_CMDQ_TX_DEPTH_REG,
95 					 HCLGE_CMDQ_TX_TAIL_REG,
96 					 HCLGE_CMDQ_TX_HEAD_REG,
97 					 HCLGE_CMDQ_RX_ADDR_L_REG,
98 					 HCLGE_CMDQ_RX_ADDR_H_REG,
99 					 HCLGE_CMDQ_RX_DEPTH_REG,
100 					 HCLGE_CMDQ_RX_TAIL_REG,
101 					 HCLGE_CMDQ_RX_HEAD_REG,
102 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
103 					 HCLGE_CMDQ_INTR_STS_REG,
104 					 HCLGE_CMDQ_INTR_EN_REG,
105 					 HCLGE_CMDQ_INTR_GEN_REG};
106 
107 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
108 					   HCLGE_VECTOR0_OTER_EN_REG,
109 					   HCLGE_MISC_RESET_STS_REG,
110 					   HCLGE_MISC_VECTOR_INT_STS,
111 					   HCLGE_GLOBAL_RESET_REG,
112 					   HCLGE_FUN_RST_ING,
113 					   HCLGE_GRO_EN_REG};
114 
115 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
116 					 HCLGE_RING_RX_ADDR_H_REG,
117 					 HCLGE_RING_RX_BD_NUM_REG,
118 					 HCLGE_RING_RX_BD_LENGTH_REG,
119 					 HCLGE_RING_RX_MERGE_EN_REG,
120 					 HCLGE_RING_RX_TAIL_REG,
121 					 HCLGE_RING_RX_HEAD_REG,
122 					 HCLGE_RING_RX_FBD_NUM_REG,
123 					 HCLGE_RING_RX_OFFSET_REG,
124 					 HCLGE_RING_RX_FBD_OFFSET_REG,
125 					 HCLGE_RING_RX_STASH_REG,
126 					 HCLGE_RING_RX_BD_ERR_REG,
127 					 HCLGE_RING_TX_ADDR_L_REG,
128 					 HCLGE_RING_TX_ADDR_H_REG,
129 					 HCLGE_RING_TX_BD_NUM_REG,
130 					 HCLGE_RING_TX_PRIORITY_REG,
131 					 HCLGE_RING_TX_TC_REG,
132 					 HCLGE_RING_TX_MERGE_EN_REG,
133 					 HCLGE_RING_TX_TAIL_REG,
134 					 HCLGE_RING_TX_HEAD_REG,
135 					 HCLGE_RING_TX_FBD_NUM_REG,
136 					 HCLGE_RING_TX_OFFSET_REG,
137 					 HCLGE_RING_TX_EBD_NUM_REG,
138 					 HCLGE_RING_TX_EBD_OFFSET_REG,
139 					 HCLGE_RING_TX_BD_ERR_REG,
140 					 HCLGE_RING_EN_REG};
141 
142 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
143 					     HCLGE_TQP_INTR_GL0_REG,
144 					     HCLGE_TQP_INTR_GL1_REG,
145 					     HCLGE_TQP_INTR_GL2_REG,
146 					     HCLGE_TQP_INTR_RL_REG};
147 
148 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
149 	"App    Loopback test",
150 	"Serdes serial Loopback test",
151 	"Serdes parallel Loopback test",
152 	"Phy    Loopback test"
153 };
154 
155 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
156 	{"mac_tx_mac_pause_num",
157 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
158 	{"mac_rx_mac_pause_num",
159 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
160 	{"mac_tx_control_pkt_num",
161 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
162 	{"mac_rx_control_pkt_num",
163 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
164 	{"mac_tx_pfc_pkt_num",
165 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
166 	{"mac_tx_pfc_pri0_pkt_num",
167 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
168 	{"mac_tx_pfc_pri1_pkt_num",
169 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
170 	{"mac_tx_pfc_pri2_pkt_num",
171 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
172 	{"mac_tx_pfc_pri3_pkt_num",
173 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
174 	{"mac_tx_pfc_pri4_pkt_num",
175 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
176 	{"mac_tx_pfc_pri5_pkt_num",
177 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
178 	{"mac_tx_pfc_pri6_pkt_num",
179 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
180 	{"mac_tx_pfc_pri7_pkt_num",
181 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
182 	{"mac_rx_pfc_pkt_num",
183 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
184 	{"mac_rx_pfc_pri0_pkt_num",
185 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
186 	{"mac_rx_pfc_pri1_pkt_num",
187 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
188 	{"mac_rx_pfc_pri2_pkt_num",
189 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
190 	{"mac_rx_pfc_pri3_pkt_num",
191 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
192 	{"mac_rx_pfc_pri4_pkt_num",
193 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
194 	{"mac_rx_pfc_pri5_pkt_num",
195 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
196 	{"mac_rx_pfc_pri6_pkt_num",
197 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
198 	{"mac_rx_pfc_pri7_pkt_num",
199 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
200 	{"mac_tx_total_pkt_num",
201 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
202 	{"mac_tx_total_oct_num",
203 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
204 	{"mac_tx_good_pkt_num",
205 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
206 	{"mac_tx_bad_pkt_num",
207 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
208 	{"mac_tx_good_oct_num",
209 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
210 	{"mac_tx_bad_oct_num",
211 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
212 	{"mac_tx_uni_pkt_num",
213 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
214 	{"mac_tx_multi_pkt_num",
215 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
216 	{"mac_tx_broad_pkt_num",
217 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
218 	{"mac_tx_undersize_pkt_num",
219 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
220 	{"mac_tx_oversize_pkt_num",
221 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
222 	{"mac_tx_64_oct_pkt_num",
223 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
224 	{"mac_tx_65_127_oct_pkt_num",
225 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
226 	{"mac_tx_128_255_oct_pkt_num",
227 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
228 	{"mac_tx_256_511_oct_pkt_num",
229 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
230 	{"mac_tx_512_1023_oct_pkt_num",
231 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
232 	{"mac_tx_1024_1518_oct_pkt_num",
233 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
234 	{"mac_tx_1519_2047_oct_pkt_num",
235 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
236 	{"mac_tx_2048_4095_oct_pkt_num",
237 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
238 	{"mac_tx_4096_8191_oct_pkt_num",
239 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
240 	{"mac_tx_8192_9216_oct_pkt_num",
241 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
242 	{"mac_tx_9217_12287_oct_pkt_num",
243 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
244 	{"mac_tx_12288_16383_oct_pkt_num",
245 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
246 	{"mac_tx_1519_max_good_pkt_num",
247 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
248 	{"mac_tx_1519_max_bad_pkt_num",
249 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
250 	{"mac_rx_total_pkt_num",
251 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
252 	{"mac_rx_total_oct_num",
253 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
254 	{"mac_rx_good_pkt_num",
255 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
256 	{"mac_rx_bad_pkt_num",
257 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
258 	{"mac_rx_good_oct_num",
259 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
260 	{"mac_rx_bad_oct_num",
261 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
262 	{"mac_rx_uni_pkt_num",
263 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
264 	{"mac_rx_multi_pkt_num",
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
266 	{"mac_rx_broad_pkt_num",
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
268 	{"mac_rx_undersize_pkt_num",
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
270 	{"mac_rx_oversize_pkt_num",
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
272 	{"mac_rx_64_oct_pkt_num",
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
274 	{"mac_rx_65_127_oct_pkt_num",
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
276 	{"mac_rx_128_255_oct_pkt_num",
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
278 	{"mac_rx_256_511_oct_pkt_num",
279 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
280 	{"mac_rx_512_1023_oct_pkt_num",
281 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
282 	{"mac_rx_1024_1518_oct_pkt_num",
283 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
284 	{"mac_rx_1519_2047_oct_pkt_num",
285 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
286 	{"mac_rx_2048_4095_oct_pkt_num",
287 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
288 	{"mac_rx_4096_8191_oct_pkt_num",
289 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
290 	{"mac_rx_8192_9216_oct_pkt_num",
291 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
292 	{"mac_rx_9217_12287_oct_pkt_num",
293 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
294 	{"mac_rx_12288_16383_oct_pkt_num",
295 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
296 	{"mac_rx_1519_max_good_pkt_num",
297 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
298 	{"mac_rx_1519_max_bad_pkt_num",
299 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
300 
301 	{"mac_tx_fragment_pkt_num",
302 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
303 	{"mac_tx_undermin_pkt_num",
304 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
305 	{"mac_tx_jabber_pkt_num",
306 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
307 	{"mac_tx_err_all_pkt_num",
308 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
309 	{"mac_tx_from_app_good_pkt_num",
310 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
311 	{"mac_tx_from_app_bad_pkt_num",
312 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
313 	{"mac_rx_fragment_pkt_num",
314 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
315 	{"mac_rx_undermin_pkt_num",
316 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
317 	{"mac_rx_jabber_pkt_num",
318 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
319 	{"mac_rx_fcs_err_pkt_num",
320 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
321 	{"mac_rx_send_app_good_pkt_num",
322 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
323 	{"mac_rx_send_app_bad_pkt_num",
324 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
325 };
326 
327 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
328 	{
329 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
330 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
331 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
332 		.i_port_bitmap = 0x1,
333 	},
334 };
335 
336 static const u8 hclge_hash_key[] = {
337 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
338 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
339 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
340 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
341 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
342 };
343 
344 static const u32 hclge_dfx_bd_offset_list[] = {
345 	HCLGE_DFX_BIOS_BD_OFFSET,
346 	HCLGE_DFX_SSU_0_BD_OFFSET,
347 	HCLGE_DFX_SSU_1_BD_OFFSET,
348 	HCLGE_DFX_IGU_BD_OFFSET,
349 	HCLGE_DFX_RPU_0_BD_OFFSET,
350 	HCLGE_DFX_RPU_1_BD_OFFSET,
351 	HCLGE_DFX_NCSI_BD_OFFSET,
352 	HCLGE_DFX_RTC_BD_OFFSET,
353 	HCLGE_DFX_PPP_BD_OFFSET,
354 	HCLGE_DFX_RCB_BD_OFFSET,
355 	HCLGE_DFX_TQP_BD_OFFSET,
356 	HCLGE_DFX_SSU_2_BD_OFFSET
357 };
358 
359 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
360 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
361 	HCLGE_OPC_DFX_SSU_REG_0,
362 	HCLGE_OPC_DFX_SSU_REG_1,
363 	HCLGE_OPC_DFX_IGU_EGU_REG,
364 	HCLGE_OPC_DFX_RPU_REG_0,
365 	HCLGE_OPC_DFX_RPU_REG_1,
366 	HCLGE_OPC_DFX_NCSI_REG,
367 	HCLGE_OPC_DFX_RTC_REG,
368 	HCLGE_OPC_DFX_PPP_REG,
369 	HCLGE_OPC_DFX_RCB_REG,
370 	HCLGE_OPC_DFX_TQP_REG,
371 	HCLGE_OPC_DFX_SSU_REG_2
372 };
373 
374 static const struct key_info meta_data_key_info[] = {
375 	{ PACKET_TYPE_ID, 6},
376 	{ IP_FRAGEMENT, 1},
377 	{ ROCE_TYPE, 1},
378 	{ NEXT_KEY, 5},
379 	{ VLAN_NUMBER, 2},
380 	{ SRC_VPORT, 12},
381 	{ DST_VPORT, 12},
382 	{ TUNNEL_PACKET, 1},
383 };
384 
385 static const struct key_info tuple_key_info[] = {
386 	{ OUTER_DST_MAC, 48},
387 	{ OUTER_SRC_MAC, 48},
388 	{ OUTER_VLAN_TAG_FST, 16},
389 	{ OUTER_VLAN_TAG_SEC, 16},
390 	{ OUTER_ETH_TYPE, 16},
391 	{ OUTER_L2_RSV, 16},
392 	{ OUTER_IP_TOS, 8},
393 	{ OUTER_IP_PROTO, 8},
394 	{ OUTER_SRC_IP, 32},
395 	{ OUTER_DST_IP, 32},
396 	{ OUTER_L3_RSV, 16},
397 	{ OUTER_SRC_PORT, 16},
398 	{ OUTER_DST_PORT, 16},
399 	{ OUTER_L4_RSV, 32},
400 	{ OUTER_TUN_VNI, 24},
401 	{ OUTER_TUN_FLOW_ID, 8},
402 	{ INNER_DST_MAC, 48},
403 	{ INNER_SRC_MAC, 48},
404 	{ INNER_VLAN_TAG_FST, 16},
405 	{ INNER_VLAN_TAG_SEC, 16},
406 	{ INNER_ETH_TYPE, 16},
407 	{ INNER_L2_RSV, 16},
408 	{ INNER_IP_TOS, 8},
409 	{ INNER_IP_PROTO, 8},
410 	{ INNER_SRC_IP, 32},
411 	{ INNER_DST_IP, 32},
412 	{ INNER_L3_RSV, 16},
413 	{ INNER_SRC_PORT, 16},
414 	{ INNER_DST_PORT, 16},
415 	{ INNER_L4_RSV, 32},
416 };
417 
418 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
419 {
420 #define HCLGE_MAC_CMD_NUM 21
421 
422 	u64 *data = (u64 *)(&hdev->mac_stats);
423 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
424 	__le64 *desc_data;
425 	int i, k, n;
426 	int ret;
427 
428 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
429 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
430 	if (ret) {
431 		dev_err(&hdev->pdev->dev,
432 			"Get MAC pkt stats fail, status = %d.\n", ret);
433 
434 		return ret;
435 	}
436 
437 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
438 		/* for special opcode 0032, only the first desc has the head */
439 		if (unlikely(i == 0)) {
440 			desc_data = (__le64 *)(&desc[i].data[0]);
441 			n = HCLGE_RD_FIRST_STATS_NUM;
442 		} else {
443 			desc_data = (__le64 *)(&desc[i]);
444 			n = HCLGE_RD_OTHER_STATS_NUM;
445 		}
446 
447 		for (k = 0; k < n; k++) {
448 			*data += le64_to_cpu(*desc_data);
449 			data++;
450 			desc_data++;
451 		}
452 	}
453 
454 	return 0;
455 }
456 
457 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
458 {
459 	u64 *data = (u64 *)(&hdev->mac_stats);
460 	struct hclge_desc *desc;
461 	__le64 *desc_data;
462 	u16 i, k, n;
463 	int ret;
464 
465 	/* This may be called inside atomic sections,
466 	 * so GFP_ATOMIC is more suitalbe here
467 	 */
468 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
469 	if (!desc)
470 		return -ENOMEM;
471 
472 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
473 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
474 	if (ret) {
475 		kfree(desc);
476 		return ret;
477 	}
478 
479 	for (i = 0; i < desc_num; i++) {
480 		/* for special opcode 0034, only the first desc has the head */
481 		if (i == 0) {
482 			desc_data = (__le64 *)(&desc[i].data[0]);
483 			n = HCLGE_RD_FIRST_STATS_NUM;
484 		} else {
485 			desc_data = (__le64 *)(&desc[i]);
486 			n = HCLGE_RD_OTHER_STATS_NUM;
487 		}
488 
489 		for (k = 0; k < n; k++) {
490 			*data += le64_to_cpu(*desc_data);
491 			data++;
492 			desc_data++;
493 		}
494 	}
495 
496 	kfree(desc);
497 
498 	return 0;
499 }
500 
501 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
502 {
503 	struct hclge_desc desc;
504 	__le32 *desc_data;
505 	u32 reg_num;
506 	int ret;
507 
508 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
509 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
510 	if (ret)
511 		return ret;
512 
513 	desc_data = (__le32 *)(&desc.data[0]);
514 	reg_num = le32_to_cpu(*desc_data);
515 
516 	*desc_num = 1 + ((reg_num - 3) >> 2) +
517 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
518 
519 	return 0;
520 }
521 
522 static int hclge_mac_update_stats(struct hclge_dev *hdev)
523 {
524 	u32 desc_num;
525 	int ret;
526 
527 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
528 
529 	/* The firmware supports the new statistics acquisition method */
530 	if (!ret)
531 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
532 	else if (ret == -EOPNOTSUPP)
533 		ret = hclge_mac_update_stats_defective(hdev);
534 	else
535 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
536 
537 	return ret;
538 }
539 
540 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
541 {
542 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
543 	struct hclge_vport *vport = hclge_get_vport(handle);
544 	struct hclge_dev *hdev = vport->back;
545 	struct hnae3_queue *queue;
546 	struct hclge_desc desc[1];
547 	struct hclge_tqp *tqp;
548 	int ret, i;
549 
550 	for (i = 0; i < kinfo->num_tqps; i++) {
551 		queue = handle->kinfo.tqp[i];
552 		tqp = container_of(queue, struct hclge_tqp, q);
553 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
554 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
555 					   true);
556 
557 		desc[0].data[0] = cpu_to_le32(tqp->index);
558 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
559 		if (ret) {
560 			dev_err(&hdev->pdev->dev,
561 				"Query tqp stat fail, status = %d,queue = %d\n",
562 				ret, i);
563 			return ret;
564 		}
565 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
566 			le32_to_cpu(desc[0].data[1]);
567 	}
568 
569 	for (i = 0; i < kinfo->num_tqps; i++) {
570 		queue = handle->kinfo.tqp[i];
571 		tqp = container_of(queue, struct hclge_tqp, q);
572 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
573 		hclge_cmd_setup_basic_desc(&desc[0],
574 					   HCLGE_OPC_QUERY_TX_STATS,
575 					   true);
576 
577 		desc[0].data[0] = cpu_to_le32(tqp->index);
578 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
579 		if (ret) {
580 			dev_err(&hdev->pdev->dev,
581 				"Query tqp stat fail, status = %d,queue = %d\n",
582 				ret, i);
583 			return ret;
584 		}
585 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
586 			le32_to_cpu(desc[0].data[1]);
587 	}
588 
589 	return 0;
590 }
591 
592 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
593 {
594 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
595 	struct hclge_tqp *tqp;
596 	u64 *buff = data;
597 	int i;
598 
599 	for (i = 0; i < kinfo->num_tqps; i++) {
600 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
601 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
602 	}
603 
604 	for (i = 0; i < kinfo->num_tqps; i++) {
605 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
606 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
607 	}
608 
609 	return buff;
610 }
611 
612 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
613 {
614 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
615 
616 	/* each tqp has TX & RX two queues */
617 	return kinfo->num_tqps * (2);
618 }
619 
620 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
621 {
622 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
623 	u8 *buff = data;
624 	int i;
625 
626 	for (i = 0; i < kinfo->num_tqps; i++) {
627 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
628 			struct hclge_tqp, q);
629 		snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
630 			 tqp->index);
631 		buff = buff + ETH_GSTRING_LEN;
632 	}
633 
634 	for (i = 0; i < kinfo->num_tqps; i++) {
635 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
636 			struct hclge_tqp, q);
637 		snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
638 			 tqp->index);
639 		buff = buff + ETH_GSTRING_LEN;
640 	}
641 
642 	return buff;
643 }
644 
645 static u64 *hclge_comm_get_stats(const void *comm_stats,
646 				 const struct hclge_comm_stats_str strs[],
647 				 int size, u64 *data)
648 {
649 	u64 *buf = data;
650 	u32 i;
651 
652 	for (i = 0; i < size; i++)
653 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
654 
655 	return buf + size;
656 }
657 
658 static u8 *hclge_comm_get_strings(u32 stringset,
659 				  const struct hclge_comm_stats_str strs[],
660 				  int size, u8 *data)
661 {
662 	char *buff = (char *)data;
663 	u32 i;
664 
665 	if (stringset != ETH_SS_STATS)
666 		return buff;
667 
668 	for (i = 0; i < size; i++) {
669 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
670 		buff = buff + ETH_GSTRING_LEN;
671 	}
672 
673 	return (u8 *)buff;
674 }
675 
676 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
677 {
678 	struct hnae3_handle *handle;
679 	int status;
680 
681 	handle = &hdev->vport[0].nic;
682 	if (handle->client) {
683 		status = hclge_tqps_update_stats(handle);
684 		if (status) {
685 			dev_err(&hdev->pdev->dev,
686 				"Update TQPS stats fail, status = %d.\n",
687 				status);
688 		}
689 	}
690 
691 	status = hclge_mac_update_stats(hdev);
692 	if (status)
693 		dev_err(&hdev->pdev->dev,
694 			"Update MAC stats fail, status = %d.\n", status);
695 }
696 
697 static void hclge_update_stats(struct hnae3_handle *handle,
698 			       struct net_device_stats *net_stats)
699 {
700 	struct hclge_vport *vport = hclge_get_vport(handle);
701 	struct hclge_dev *hdev = vport->back;
702 	int status;
703 
704 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
705 		return;
706 
707 	status = hclge_mac_update_stats(hdev);
708 	if (status)
709 		dev_err(&hdev->pdev->dev,
710 			"Update MAC stats fail, status = %d.\n",
711 			status);
712 
713 	status = hclge_tqps_update_stats(handle);
714 	if (status)
715 		dev_err(&hdev->pdev->dev,
716 			"Update TQPS stats fail, status = %d.\n",
717 			status);
718 
719 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
720 }
721 
722 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
723 {
724 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
725 		HNAE3_SUPPORT_PHY_LOOPBACK |\
726 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
727 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
728 
729 	struct hclge_vport *vport = hclge_get_vport(handle);
730 	struct hclge_dev *hdev = vport->back;
731 	int count = 0;
732 
733 	/* Loopback test support rules:
734 	 * mac: only GE mode support
735 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
736 	 * phy: only support when phy device exist on board
737 	 */
738 	if (stringset == ETH_SS_TEST) {
739 		/* clear loopback bit flags at first */
740 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
741 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
742 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
743 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
744 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
745 			count += 1;
746 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
747 		}
748 
749 		count += 2;
750 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
751 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
752 
753 		if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
754 		    hdev->hw.mac.phydev->drv->set_loopback) {
755 			count += 1;
756 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
757 		}
758 
759 	} else if (stringset == ETH_SS_STATS) {
760 		count = ARRAY_SIZE(g_mac_stats_string) +
761 			hclge_tqps_get_sset_count(handle, stringset);
762 	}
763 
764 	return count;
765 }
766 
767 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
768 			      u8 *data)
769 {
770 	u8 *p = (char *)data;
771 	int size;
772 
773 	if (stringset == ETH_SS_STATS) {
774 		size = ARRAY_SIZE(g_mac_stats_string);
775 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
776 					   size, p);
777 		p = hclge_tqps_get_strings(handle, p);
778 	} else if (stringset == ETH_SS_TEST) {
779 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
780 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
781 			       ETH_GSTRING_LEN);
782 			p += ETH_GSTRING_LEN;
783 		}
784 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
785 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
786 			       ETH_GSTRING_LEN);
787 			p += ETH_GSTRING_LEN;
788 		}
789 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
790 			memcpy(p,
791 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
792 			       ETH_GSTRING_LEN);
793 			p += ETH_GSTRING_LEN;
794 		}
795 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
796 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
797 			       ETH_GSTRING_LEN);
798 			p += ETH_GSTRING_LEN;
799 		}
800 	}
801 }
802 
803 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
804 {
805 	struct hclge_vport *vport = hclge_get_vport(handle);
806 	struct hclge_dev *hdev = vport->back;
807 	u64 *p;
808 
809 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
810 				 ARRAY_SIZE(g_mac_stats_string), data);
811 	p = hclge_tqps_get_stats(handle, p);
812 }
813 
814 static void hclge_get_mac_stat(struct hnae3_handle *handle,
815 			       struct hns3_mac_stats *mac_stats)
816 {
817 	struct hclge_vport *vport = hclge_get_vport(handle);
818 	struct hclge_dev *hdev = vport->back;
819 
820 	hclge_update_stats(handle, NULL);
821 
822 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
823 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
824 }
825 
826 static int hclge_parse_func_status(struct hclge_dev *hdev,
827 				   struct hclge_func_status_cmd *status)
828 {
829 #define HCLGE_MAC_ID_MASK	0xF
830 
831 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
832 		return -EINVAL;
833 
834 	/* Set the pf to main pf */
835 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
836 		hdev->flag |= HCLGE_FLAG_MAIN;
837 	else
838 		hdev->flag &= ~HCLGE_FLAG_MAIN;
839 
840 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
841 	return 0;
842 }
843 
844 static int hclge_query_function_status(struct hclge_dev *hdev)
845 {
846 #define HCLGE_QUERY_MAX_CNT	5
847 
848 	struct hclge_func_status_cmd *req;
849 	struct hclge_desc desc;
850 	int timeout = 0;
851 	int ret;
852 
853 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
854 	req = (struct hclge_func_status_cmd *)desc.data;
855 
856 	do {
857 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
858 		if (ret) {
859 			dev_err(&hdev->pdev->dev,
860 				"query function status failed %d.\n", ret);
861 			return ret;
862 		}
863 
864 		/* Check pf reset is done */
865 		if (req->pf_state)
866 			break;
867 		usleep_range(1000, 2000);
868 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
869 
870 	return hclge_parse_func_status(hdev, req);
871 }
872 
873 static int hclge_query_pf_resource(struct hclge_dev *hdev)
874 {
875 	struct hclge_pf_res_cmd *req;
876 	struct hclge_desc desc;
877 	int ret;
878 
879 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
880 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
881 	if (ret) {
882 		dev_err(&hdev->pdev->dev,
883 			"query pf resource failed %d.\n", ret);
884 		return ret;
885 	}
886 
887 	req = (struct hclge_pf_res_cmd *)desc.data;
888 	hdev->num_tqps = le16_to_cpu(req->tqp_num) +
889 			 le16_to_cpu(req->ext_tqp_num);
890 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
891 
892 	if (req->tx_buf_size)
893 		hdev->tx_buf_size =
894 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
895 	else
896 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
897 
898 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
899 
900 	if (req->dv_buf_size)
901 		hdev->dv_buf_size =
902 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
903 	else
904 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
905 
906 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
907 
908 	hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
909 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
910 		dev_err(&hdev->pdev->dev,
911 			"only %u msi resources available, not enough for pf(min:2).\n",
912 			hdev->num_nic_msi);
913 		return -EINVAL;
914 	}
915 
916 	if (hnae3_dev_roce_supported(hdev)) {
917 		hdev->num_roce_msi =
918 			le16_to_cpu(req->pf_intr_vector_number_roce);
919 
920 		/* PF should have NIC vectors and Roce vectors,
921 		 * NIC vectors are queued before Roce vectors.
922 		 */
923 		hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
924 	} else {
925 		hdev->num_msi = hdev->num_nic_msi;
926 	}
927 
928 	return 0;
929 }
930 
931 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
932 {
933 	switch (speed_cmd) {
934 	case 6:
935 		*speed = HCLGE_MAC_SPEED_10M;
936 		break;
937 	case 7:
938 		*speed = HCLGE_MAC_SPEED_100M;
939 		break;
940 	case 0:
941 		*speed = HCLGE_MAC_SPEED_1G;
942 		break;
943 	case 1:
944 		*speed = HCLGE_MAC_SPEED_10G;
945 		break;
946 	case 2:
947 		*speed = HCLGE_MAC_SPEED_25G;
948 		break;
949 	case 3:
950 		*speed = HCLGE_MAC_SPEED_40G;
951 		break;
952 	case 4:
953 		*speed = HCLGE_MAC_SPEED_50G;
954 		break;
955 	case 5:
956 		*speed = HCLGE_MAC_SPEED_100G;
957 		break;
958 	case 8:
959 		*speed = HCLGE_MAC_SPEED_200G;
960 		break;
961 	default:
962 		return -EINVAL;
963 	}
964 
965 	return 0;
966 }
967 
968 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
969 {
970 	struct hclge_vport *vport = hclge_get_vport(handle);
971 	struct hclge_dev *hdev = vport->back;
972 	u32 speed_ability = hdev->hw.mac.speed_ability;
973 	u32 speed_bit = 0;
974 
975 	switch (speed) {
976 	case HCLGE_MAC_SPEED_10M:
977 		speed_bit = HCLGE_SUPPORT_10M_BIT;
978 		break;
979 	case HCLGE_MAC_SPEED_100M:
980 		speed_bit = HCLGE_SUPPORT_100M_BIT;
981 		break;
982 	case HCLGE_MAC_SPEED_1G:
983 		speed_bit = HCLGE_SUPPORT_1G_BIT;
984 		break;
985 	case HCLGE_MAC_SPEED_10G:
986 		speed_bit = HCLGE_SUPPORT_10G_BIT;
987 		break;
988 	case HCLGE_MAC_SPEED_25G:
989 		speed_bit = HCLGE_SUPPORT_25G_BIT;
990 		break;
991 	case HCLGE_MAC_SPEED_40G:
992 		speed_bit = HCLGE_SUPPORT_40G_BIT;
993 		break;
994 	case HCLGE_MAC_SPEED_50G:
995 		speed_bit = HCLGE_SUPPORT_50G_BIT;
996 		break;
997 	case HCLGE_MAC_SPEED_100G:
998 		speed_bit = HCLGE_SUPPORT_100G_BIT;
999 		break;
1000 	case HCLGE_MAC_SPEED_200G:
1001 		speed_bit = HCLGE_SUPPORT_200G_BIT;
1002 		break;
1003 	default:
1004 		return -EINVAL;
1005 	}
1006 
1007 	if (speed_bit & speed_ability)
1008 		return 0;
1009 
1010 	return -EINVAL;
1011 }
1012 
1013 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1014 {
1015 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1016 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1017 				 mac->supported);
1018 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1019 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1020 				 mac->supported);
1021 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1022 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1023 				 mac->supported);
1024 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1025 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1026 				 mac->supported);
1027 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1028 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1029 				 mac->supported);
1030 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1031 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1032 				 mac->supported);
1033 }
1034 
1035 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1036 {
1037 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1038 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1039 				 mac->supported);
1040 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1041 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1042 				 mac->supported);
1043 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1044 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1045 				 mac->supported);
1046 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1047 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1048 				 mac->supported);
1049 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1050 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1051 				 mac->supported);
1052 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1053 		linkmode_set_bit(
1054 			ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1055 			mac->supported);
1056 }
1057 
1058 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1059 {
1060 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1061 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1062 				 mac->supported);
1063 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1064 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1065 				 mac->supported);
1066 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1067 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1068 				 mac->supported);
1069 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1070 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1071 				 mac->supported);
1072 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1073 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1074 				 mac->supported);
1075 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1076 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1077 				 mac->supported);
1078 }
1079 
1080 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1081 {
1082 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1083 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1084 				 mac->supported);
1085 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1086 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1087 				 mac->supported);
1088 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1089 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1090 				 mac->supported);
1091 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1092 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1093 				 mac->supported);
1094 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1095 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1096 				 mac->supported);
1097 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1098 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1099 				 mac->supported);
1100 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1101 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1102 				 mac->supported);
1103 }
1104 
1105 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1106 {
1107 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1108 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1109 
1110 	switch (mac->speed) {
1111 	case HCLGE_MAC_SPEED_10G:
1112 	case HCLGE_MAC_SPEED_40G:
1113 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1114 				 mac->supported);
1115 		mac->fec_ability =
1116 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1117 		break;
1118 	case HCLGE_MAC_SPEED_25G:
1119 	case HCLGE_MAC_SPEED_50G:
1120 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1121 				 mac->supported);
1122 		mac->fec_ability =
1123 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1124 			BIT(HNAE3_FEC_AUTO);
1125 		break;
1126 	case HCLGE_MAC_SPEED_100G:
1127 	case HCLGE_MAC_SPEED_200G:
1128 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1129 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1130 		break;
1131 	default:
1132 		mac->fec_ability = 0;
1133 		break;
1134 	}
1135 }
1136 
1137 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1138 					u16 speed_ability)
1139 {
1140 	struct hclge_mac *mac = &hdev->hw.mac;
1141 
1142 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1143 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1144 				 mac->supported);
1145 
1146 	hclge_convert_setting_sr(mac, speed_ability);
1147 	hclge_convert_setting_lr(mac, speed_ability);
1148 	hclge_convert_setting_cr(mac, speed_ability);
1149 	if (hnae3_dev_fec_supported(hdev))
1150 		hclge_convert_setting_fec(mac);
1151 
1152 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1153 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1154 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1155 }
1156 
1157 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1158 					    u16 speed_ability)
1159 {
1160 	struct hclge_mac *mac = &hdev->hw.mac;
1161 
1162 	hclge_convert_setting_kr(mac, speed_ability);
1163 	if (hnae3_dev_fec_supported(hdev))
1164 		hclge_convert_setting_fec(mac);
1165 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1166 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1167 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1168 }
1169 
1170 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1171 					 u16 speed_ability)
1172 {
1173 	unsigned long *supported = hdev->hw.mac.supported;
1174 
1175 	/* default to support all speed for GE port */
1176 	if (!speed_ability)
1177 		speed_ability = HCLGE_SUPPORT_GE;
1178 
1179 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1180 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1181 				 supported);
1182 
1183 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1184 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1185 				 supported);
1186 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1187 				 supported);
1188 	}
1189 
1190 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1191 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1192 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1193 	}
1194 
1195 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1196 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1197 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1198 	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1199 }
1200 
1201 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1202 {
1203 	u8 media_type = hdev->hw.mac.media_type;
1204 
1205 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1206 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1207 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1208 		hclge_parse_copper_link_mode(hdev, speed_ability);
1209 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1210 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1211 }
1212 
1213 static u32 hclge_get_max_speed(u16 speed_ability)
1214 {
1215 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1216 		return HCLGE_MAC_SPEED_200G;
1217 
1218 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1219 		return HCLGE_MAC_SPEED_100G;
1220 
1221 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1222 		return HCLGE_MAC_SPEED_50G;
1223 
1224 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1225 		return HCLGE_MAC_SPEED_40G;
1226 
1227 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1228 		return HCLGE_MAC_SPEED_25G;
1229 
1230 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1231 		return HCLGE_MAC_SPEED_10G;
1232 
1233 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1234 		return HCLGE_MAC_SPEED_1G;
1235 
1236 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1237 		return HCLGE_MAC_SPEED_100M;
1238 
1239 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1240 		return HCLGE_MAC_SPEED_10M;
1241 
1242 	return HCLGE_MAC_SPEED_1G;
1243 }
1244 
1245 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1246 {
1247 #define SPEED_ABILITY_EXT_SHIFT			8
1248 
1249 	struct hclge_cfg_param_cmd *req;
1250 	u64 mac_addr_tmp_high;
1251 	u16 speed_ability_ext;
1252 	u64 mac_addr_tmp;
1253 	unsigned int i;
1254 
1255 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1256 
1257 	/* get the configuration */
1258 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1259 					      HCLGE_CFG_VMDQ_M,
1260 					      HCLGE_CFG_VMDQ_S);
1261 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1262 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1263 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1264 					    HCLGE_CFG_TQP_DESC_N_M,
1265 					    HCLGE_CFG_TQP_DESC_N_S);
1266 
1267 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1268 					HCLGE_CFG_PHY_ADDR_M,
1269 					HCLGE_CFG_PHY_ADDR_S);
1270 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1271 					  HCLGE_CFG_MEDIA_TP_M,
1272 					  HCLGE_CFG_MEDIA_TP_S);
1273 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1274 					  HCLGE_CFG_RX_BUF_LEN_M,
1275 					  HCLGE_CFG_RX_BUF_LEN_S);
1276 	/* get mac_address */
1277 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1278 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1279 					    HCLGE_CFG_MAC_ADDR_H_M,
1280 					    HCLGE_CFG_MAC_ADDR_H_S);
1281 
1282 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1283 
1284 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1285 					     HCLGE_CFG_DEFAULT_SPEED_M,
1286 					     HCLGE_CFG_DEFAULT_SPEED_S);
1287 	cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1288 					       HCLGE_CFG_RSS_SIZE_M,
1289 					       HCLGE_CFG_RSS_SIZE_S);
1290 
1291 	for (i = 0; i < ETH_ALEN; i++)
1292 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1293 
1294 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1295 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1296 
1297 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1298 					     HCLGE_CFG_SPEED_ABILITY_M,
1299 					     HCLGE_CFG_SPEED_ABILITY_S);
1300 	speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1301 					    HCLGE_CFG_SPEED_ABILITY_EXT_M,
1302 					    HCLGE_CFG_SPEED_ABILITY_EXT_S);
1303 	cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1304 
1305 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1306 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1307 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1308 	if (!cfg->umv_space)
1309 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1310 
1311 	cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1312 					       HCLGE_CFG_PF_RSS_SIZE_M,
1313 					       HCLGE_CFG_PF_RSS_SIZE_S);
1314 
1315 	/* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1316 	 * power of 2, instead of reading out directly. This would
1317 	 * be more flexible for future changes and expansions.
1318 	 * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1319 	 * it does not make sense if PF's field is 0. In this case, PF and VF
1320 	 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1321 	 */
1322 	cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1323 			       1U << cfg->pf_rss_size_max :
1324 			       cfg->vf_rss_size_max;
1325 }
1326 
1327 /* hclge_get_cfg: query the static parameter from flash
1328  * @hdev: pointer to struct hclge_dev
1329  * @hcfg: the config structure to be getted
1330  */
1331 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1332 {
1333 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1334 	struct hclge_cfg_param_cmd *req;
1335 	unsigned int i;
1336 	int ret;
1337 
1338 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1339 		u32 offset = 0;
1340 
1341 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1342 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1343 					   true);
1344 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1345 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1346 		/* Len should be united by 4 bytes when send to hardware */
1347 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1348 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1349 		req->offset = cpu_to_le32(offset);
1350 	}
1351 
1352 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1353 	if (ret) {
1354 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1355 		return ret;
1356 	}
1357 
1358 	hclge_parse_cfg(hcfg, desc);
1359 
1360 	return 0;
1361 }
1362 
1363 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1364 {
1365 #define HCLGE_MAX_NON_TSO_BD_NUM			8U
1366 
1367 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1368 
1369 	ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1370 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1371 	ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1372 	ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1373 	ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1374 	ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1375 	ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1376 }
1377 
1378 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1379 				  struct hclge_desc *desc)
1380 {
1381 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1382 	struct hclge_dev_specs_0_cmd *req0;
1383 	struct hclge_dev_specs_1_cmd *req1;
1384 
1385 	req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1386 	req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1387 
1388 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1389 	ae_dev->dev_specs.rss_ind_tbl_size =
1390 		le16_to_cpu(req0->rss_ind_tbl_size);
1391 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1392 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1393 	ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1394 	ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1395 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1396 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1397 }
1398 
1399 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1400 {
1401 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1402 
1403 	if (!dev_specs->max_non_tso_bd_num)
1404 		dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1405 	if (!dev_specs->rss_ind_tbl_size)
1406 		dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1407 	if (!dev_specs->rss_key_size)
1408 		dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1409 	if (!dev_specs->max_tm_rate)
1410 		dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1411 	if (!dev_specs->max_qset_num)
1412 		dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1413 	if (!dev_specs->max_int_gl)
1414 		dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1415 	if (!dev_specs->max_frm_size)
1416 		dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1417 }
1418 
1419 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1420 {
1421 	struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1422 	int ret;
1423 	int i;
1424 
1425 	/* set default specifications as devices lower than version V3 do not
1426 	 * support querying specifications from firmware.
1427 	 */
1428 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1429 		hclge_set_default_dev_specs(hdev);
1430 		return 0;
1431 	}
1432 
1433 	for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1434 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1435 					   true);
1436 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1437 	}
1438 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1439 
1440 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1441 	if (ret)
1442 		return ret;
1443 
1444 	hclge_parse_dev_specs(hdev, desc);
1445 	hclge_check_dev_specs(hdev);
1446 
1447 	return 0;
1448 }
1449 
1450 static int hclge_get_cap(struct hclge_dev *hdev)
1451 {
1452 	int ret;
1453 
1454 	ret = hclge_query_function_status(hdev);
1455 	if (ret) {
1456 		dev_err(&hdev->pdev->dev,
1457 			"query function status error %d.\n", ret);
1458 		return ret;
1459 	}
1460 
1461 	/* get pf resource */
1462 	return hclge_query_pf_resource(hdev);
1463 }
1464 
1465 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1466 {
1467 #define HCLGE_MIN_TX_DESC	64
1468 #define HCLGE_MIN_RX_DESC	64
1469 
1470 	if (!is_kdump_kernel())
1471 		return;
1472 
1473 	dev_info(&hdev->pdev->dev,
1474 		 "Running kdump kernel. Using minimal resources\n");
1475 
1476 	/* minimal queue pairs equals to the number of vports */
1477 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1478 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1479 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1480 }
1481 
1482 static int hclge_configure(struct hclge_dev *hdev)
1483 {
1484 	struct hclge_cfg cfg;
1485 	unsigned int i;
1486 	int ret;
1487 
1488 	ret = hclge_get_cfg(hdev, &cfg);
1489 	if (ret)
1490 		return ret;
1491 
1492 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1493 	hdev->base_tqp_pid = 0;
1494 	hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1495 	hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1496 	hdev->rx_buf_len = cfg.rx_buf_len;
1497 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1498 	hdev->hw.mac.media_type = cfg.media_type;
1499 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1500 	hdev->num_tx_desc = cfg.tqp_desc_num;
1501 	hdev->num_rx_desc = cfg.tqp_desc_num;
1502 	hdev->tm_info.num_pg = 1;
1503 	hdev->tc_max = cfg.tc_num;
1504 	hdev->tm_info.hw_pfc_map = 0;
1505 	hdev->wanted_umv_size = cfg.umv_space;
1506 
1507 	if (hnae3_dev_fd_supported(hdev)) {
1508 		hdev->fd_en = true;
1509 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1510 	}
1511 
1512 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1513 	if (ret) {
1514 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1515 			cfg.default_speed, ret);
1516 		return ret;
1517 	}
1518 
1519 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1520 
1521 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1522 
1523 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1524 	    (hdev->tc_max < 1)) {
1525 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1526 			 hdev->tc_max);
1527 		hdev->tc_max = 1;
1528 	}
1529 
1530 	/* Dev does not support DCB */
1531 	if (!hnae3_dev_dcb_supported(hdev)) {
1532 		hdev->tc_max = 1;
1533 		hdev->pfc_max = 0;
1534 	} else {
1535 		hdev->pfc_max = hdev->tc_max;
1536 	}
1537 
1538 	hdev->tm_info.num_tc = 1;
1539 
1540 	/* Currently not support uncontiuous tc */
1541 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1542 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1543 
1544 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1545 
1546 	hclge_init_kdump_kernel_config(hdev);
1547 
1548 	/* Set the init affinity based on pci func number */
1549 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1550 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1551 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1552 			&hdev->affinity_mask);
1553 
1554 	return ret;
1555 }
1556 
1557 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1558 			    u16 tso_mss_max)
1559 {
1560 	struct hclge_cfg_tso_status_cmd *req;
1561 	struct hclge_desc desc;
1562 
1563 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1564 
1565 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1566 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1567 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1568 
1569 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1570 }
1571 
1572 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1573 {
1574 	struct hclge_cfg_gro_status_cmd *req;
1575 	struct hclge_desc desc;
1576 	int ret;
1577 
1578 	if (!hnae3_dev_gro_supported(hdev))
1579 		return 0;
1580 
1581 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1582 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1583 
1584 	req->gro_en = en ? 1 : 0;
1585 
1586 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1587 	if (ret)
1588 		dev_err(&hdev->pdev->dev,
1589 			"GRO hardware config cmd failed, ret = %d\n", ret);
1590 
1591 	return ret;
1592 }
1593 
1594 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1595 {
1596 	struct hclge_tqp *tqp;
1597 	int i;
1598 
1599 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1600 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1601 	if (!hdev->htqp)
1602 		return -ENOMEM;
1603 
1604 	tqp = hdev->htqp;
1605 
1606 	for (i = 0; i < hdev->num_tqps; i++) {
1607 		tqp->dev = &hdev->pdev->dev;
1608 		tqp->index = i;
1609 
1610 		tqp->q.ae_algo = &ae_algo;
1611 		tqp->q.buf_size = hdev->rx_buf_len;
1612 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1613 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1614 
1615 		/* need an extended offset to configure queues >=
1616 		 * HCLGE_TQP_MAX_SIZE_DEV_V2
1617 		 */
1618 		if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1619 			tqp->q.io_base = hdev->hw.io_base +
1620 					 HCLGE_TQP_REG_OFFSET +
1621 					 i * HCLGE_TQP_REG_SIZE;
1622 		else
1623 			tqp->q.io_base = hdev->hw.io_base +
1624 					 HCLGE_TQP_REG_OFFSET +
1625 					 HCLGE_TQP_EXT_REG_OFFSET +
1626 					 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1627 					 HCLGE_TQP_REG_SIZE;
1628 
1629 		tqp++;
1630 	}
1631 
1632 	return 0;
1633 }
1634 
1635 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1636 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1637 {
1638 	struct hclge_tqp_map_cmd *req;
1639 	struct hclge_desc desc;
1640 	int ret;
1641 
1642 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1643 
1644 	req = (struct hclge_tqp_map_cmd *)desc.data;
1645 	req->tqp_id = cpu_to_le16(tqp_pid);
1646 	req->tqp_vf = func_id;
1647 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1648 	if (!is_pf)
1649 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1650 	req->tqp_vid = cpu_to_le16(tqp_vid);
1651 
1652 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1653 	if (ret)
1654 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1655 
1656 	return ret;
1657 }
1658 
1659 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1660 {
1661 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1662 	struct hclge_dev *hdev = vport->back;
1663 	int i, alloced;
1664 
1665 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1666 	     alloced < num_tqps; i++) {
1667 		if (!hdev->htqp[i].alloced) {
1668 			hdev->htqp[i].q.handle = &vport->nic;
1669 			hdev->htqp[i].q.tqp_index = alloced;
1670 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1671 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1672 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1673 			hdev->htqp[i].alloced = true;
1674 			alloced++;
1675 		}
1676 	}
1677 	vport->alloc_tqps = alloced;
1678 	kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1679 				vport->alloc_tqps / hdev->tm_info.num_tc);
1680 
1681 	/* ensure one to one mapping between irq and queue at default */
1682 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1683 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1684 
1685 	return 0;
1686 }
1687 
1688 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1689 			    u16 num_tx_desc, u16 num_rx_desc)
1690 
1691 {
1692 	struct hnae3_handle *nic = &vport->nic;
1693 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1694 	struct hclge_dev *hdev = vport->back;
1695 	int ret;
1696 
1697 	kinfo->num_tx_desc = num_tx_desc;
1698 	kinfo->num_rx_desc = num_rx_desc;
1699 
1700 	kinfo->rx_buf_len = hdev->rx_buf_len;
1701 
1702 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1703 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1704 	if (!kinfo->tqp)
1705 		return -ENOMEM;
1706 
1707 	ret = hclge_assign_tqp(vport, num_tqps);
1708 	if (ret)
1709 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1710 
1711 	return ret;
1712 }
1713 
1714 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1715 				  struct hclge_vport *vport)
1716 {
1717 	struct hnae3_handle *nic = &vport->nic;
1718 	struct hnae3_knic_private_info *kinfo;
1719 	u16 i;
1720 
1721 	kinfo = &nic->kinfo;
1722 	for (i = 0; i < vport->alloc_tqps; i++) {
1723 		struct hclge_tqp *q =
1724 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1725 		bool is_pf;
1726 		int ret;
1727 
1728 		is_pf = !(vport->vport_id);
1729 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1730 					     i, is_pf);
1731 		if (ret)
1732 			return ret;
1733 	}
1734 
1735 	return 0;
1736 }
1737 
1738 static int hclge_map_tqp(struct hclge_dev *hdev)
1739 {
1740 	struct hclge_vport *vport = hdev->vport;
1741 	u16 i, num_vport;
1742 
1743 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1744 	for (i = 0; i < num_vport; i++)	{
1745 		int ret;
1746 
1747 		ret = hclge_map_tqp_to_vport(hdev, vport);
1748 		if (ret)
1749 			return ret;
1750 
1751 		vport++;
1752 	}
1753 
1754 	return 0;
1755 }
1756 
1757 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1758 {
1759 	struct hnae3_handle *nic = &vport->nic;
1760 	struct hclge_dev *hdev = vport->back;
1761 	int ret;
1762 
1763 	nic->pdev = hdev->pdev;
1764 	nic->ae_algo = &ae_algo;
1765 	nic->numa_node_mask = hdev->numa_node_mask;
1766 
1767 	ret = hclge_knic_setup(vport, num_tqps,
1768 			       hdev->num_tx_desc, hdev->num_rx_desc);
1769 	if (ret)
1770 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1771 
1772 	return ret;
1773 }
1774 
1775 static int hclge_alloc_vport(struct hclge_dev *hdev)
1776 {
1777 	struct pci_dev *pdev = hdev->pdev;
1778 	struct hclge_vport *vport;
1779 	u32 tqp_main_vport;
1780 	u32 tqp_per_vport;
1781 	int num_vport, i;
1782 	int ret;
1783 
1784 	/* We need to alloc a vport for main NIC of PF */
1785 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1786 
1787 	if (hdev->num_tqps < num_vport) {
1788 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1789 			hdev->num_tqps, num_vport);
1790 		return -EINVAL;
1791 	}
1792 
1793 	/* Alloc the same number of TQPs for every vport */
1794 	tqp_per_vport = hdev->num_tqps / num_vport;
1795 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1796 
1797 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1798 			     GFP_KERNEL);
1799 	if (!vport)
1800 		return -ENOMEM;
1801 
1802 	hdev->vport = vport;
1803 	hdev->num_alloc_vport = num_vport;
1804 
1805 	if (IS_ENABLED(CONFIG_PCI_IOV))
1806 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1807 
1808 	for (i = 0; i < num_vport; i++) {
1809 		vport->back = hdev;
1810 		vport->vport_id = i;
1811 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1812 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1813 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1814 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1815 		INIT_LIST_HEAD(&vport->vlan_list);
1816 		INIT_LIST_HEAD(&vport->uc_mac_list);
1817 		INIT_LIST_HEAD(&vport->mc_mac_list);
1818 		spin_lock_init(&vport->mac_list_lock);
1819 
1820 		if (i == 0)
1821 			ret = hclge_vport_setup(vport, tqp_main_vport);
1822 		else
1823 			ret = hclge_vport_setup(vport, tqp_per_vport);
1824 		if (ret) {
1825 			dev_err(&pdev->dev,
1826 				"vport setup failed for vport %d, %d\n",
1827 				i, ret);
1828 			return ret;
1829 		}
1830 
1831 		vport++;
1832 	}
1833 
1834 	return 0;
1835 }
1836 
1837 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1838 				    struct hclge_pkt_buf_alloc *buf_alloc)
1839 {
1840 /* TX buffer size is unit by 128 byte */
1841 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1842 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1843 	struct hclge_tx_buff_alloc_cmd *req;
1844 	struct hclge_desc desc;
1845 	int ret;
1846 	u8 i;
1847 
1848 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1849 
1850 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1851 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1852 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1853 
1854 		req->tx_pkt_buff[i] =
1855 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1856 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1857 	}
1858 
1859 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1860 	if (ret)
1861 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1862 			ret);
1863 
1864 	return ret;
1865 }
1866 
1867 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1868 				 struct hclge_pkt_buf_alloc *buf_alloc)
1869 {
1870 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1871 
1872 	if (ret)
1873 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1874 
1875 	return ret;
1876 }
1877 
1878 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1879 {
1880 	unsigned int i;
1881 	u32 cnt = 0;
1882 
1883 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1884 		if (hdev->hw_tc_map & BIT(i))
1885 			cnt++;
1886 	return cnt;
1887 }
1888 
1889 /* Get the number of pfc enabled TCs, which have private buffer */
1890 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1891 				  struct hclge_pkt_buf_alloc *buf_alloc)
1892 {
1893 	struct hclge_priv_buf *priv;
1894 	unsigned int i;
1895 	int cnt = 0;
1896 
1897 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1898 		priv = &buf_alloc->priv_buf[i];
1899 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1900 		    priv->enable)
1901 			cnt++;
1902 	}
1903 
1904 	return cnt;
1905 }
1906 
1907 /* Get the number of pfc disabled TCs, which have private buffer */
1908 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1909 				     struct hclge_pkt_buf_alloc *buf_alloc)
1910 {
1911 	struct hclge_priv_buf *priv;
1912 	unsigned int i;
1913 	int cnt = 0;
1914 
1915 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1916 		priv = &buf_alloc->priv_buf[i];
1917 		if (hdev->hw_tc_map & BIT(i) &&
1918 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1919 		    priv->enable)
1920 			cnt++;
1921 	}
1922 
1923 	return cnt;
1924 }
1925 
1926 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1927 {
1928 	struct hclge_priv_buf *priv;
1929 	u32 rx_priv = 0;
1930 	int i;
1931 
1932 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1933 		priv = &buf_alloc->priv_buf[i];
1934 		if (priv->enable)
1935 			rx_priv += priv->buf_size;
1936 	}
1937 	return rx_priv;
1938 }
1939 
1940 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1941 {
1942 	u32 i, total_tx_size = 0;
1943 
1944 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1945 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1946 
1947 	return total_tx_size;
1948 }
1949 
1950 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1951 				struct hclge_pkt_buf_alloc *buf_alloc,
1952 				u32 rx_all)
1953 {
1954 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1955 	u32 tc_num = hclge_get_tc_num(hdev);
1956 	u32 shared_buf, aligned_mps;
1957 	u32 rx_priv;
1958 	int i;
1959 
1960 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1961 
1962 	if (hnae3_dev_dcb_supported(hdev))
1963 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1964 					hdev->dv_buf_size;
1965 	else
1966 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1967 					+ hdev->dv_buf_size;
1968 
1969 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1970 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1971 			     HCLGE_BUF_SIZE_UNIT);
1972 
1973 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1974 	if (rx_all < rx_priv + shared_std)
1975 		return false;
1976 
1977 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1978 	buf_alloc->s_buf.buf_size = shared_buf;
1979 	if (hnae3_dev_dcb_supported(hdev)) {
1980 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1981 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1982 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1983 				  HCLGE_BUF_SIZE_UNIT);
1984 	} else {
1985 		buf_alloc->s_buf.self.high = aligned_mps +
1986 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1987 		buf_alloc->s_buf.self.low = aligned_mps;
1988 	}
1989 
1990 	if (hnae3_dev_dcb_supported(hdev)) {
1991 		hi_thrd = shared_buf - hdev->dv_buf_size;
1992 
1993 		if (tc_num <= NEED_RESERVE_TC_NUM)
1994 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1995 					/ BUF_MAX_PERCENT;
1996 
1997 		if (tc_num)
1998 			hi_thrd = hi_thrd / tc_num;
1999 
2000 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2001 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2002 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2003 	} else {
2004 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2005 		lo_thrd = aligned_mps;
2006 	}
2007 
2008 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2009 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2010 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2011 	}
2012 
2013 	return true;
2014 }
2015 
2016 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2017 				struct hclge_pkt_buf_alloc *buf_alloc)
2018 {
2019 	u32 i, total_size;
2020 
2021 	total_size = hdev->pkt_buf_size;
2022 
2023 	/* alloc tx buffer for all enabled tc */
2024 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2025 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2026 
2027 		if (hdev->hw_tc_map & BIT(i)) {
2028 			if (total_size < hdev->tx_buf_size)
2029 				return -ENOMEM;
2030 
2031 			priv->tx_buf_size = hdev->tx_buf_size;
2032 		} else {
2033 			priv->tx_buf_size = 0;
2034 		}
2035 
2036 		total_size -= priv->tx_buf_size;
2037 	}
2038 
2039 	return 0;
2040 }
2041 
2042 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2043 				  struct hclge_pkt_buf_alloc *buf_alloc)
2044 {
2045 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2046 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2047 	unsigned int i;
2048 
2049 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2050 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2051 
2052 		priv->enable = 0;
2053 		priv->wl.low = 0;
2054 		priv->wl.high = 0;
2055 		priv->buf_size = 0;
2056 
2057 		if (!(hdev->hw_tc_map & BIT(i)))
2058 			continue;
2059 
2060 		priv->enable = 1;
2061 
2062 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2063 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2064 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
2065 						HCLGE_BUF_SIZE_UNIT);
2066 		} else {
2067 			priv->wl.low = 0;
2068 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2069 					aligned_mps;
2070 		}
2071 
2072 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2073 	}
2074 
2075 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2076 }
2077 
2078 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2079 					  struct hclge_pkt_buf_alloc *buf_alloc)
2080 {
2081 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2082 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2083 	int i;
2084 
2085 	/* let the last to be cleared first */
2086 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2087 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2088 		unsigned int mask = BIT((unsigned int)i);
2089 
2090 		if (hdev->hw_tc_map & mask &&
2091 		    !(hdev->tm_info.hw_pfc_map & mask)) {
2092 			/* Clear the no pfc TC private buffer */
2093 			priv->wl.low = 0;
2094 			priv->wl.high = 0;
2095 			priv->buf_size = 0;
2096 			priv->enable = 0;
2097 			no_pfc_priv_num--;
2098 		}
2099 
2100 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2101 		    no_pfc_priv_num == 0)
2102 			break;
2103 	}
2104 
2105 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2106 }
2107 
2108 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2109 					struct hclge_pkt_buf_alloc *buf_alloc)
2110 {
2111 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2112 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2113 	int i;
2114 
2115 	/* let the last to be cleared first */
2116 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2117 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2118 		unsigned int mask = BIT((unsigned int)i);
2119 
2120 		if (hdev->hw_tc_map & mask &&
2121 		    hdev->tm_info.hw_pfc_map & mask) {
2122 			/* Reduce the number of pfc TC with private buffer */
2123 			priv->wl.low = 0;
2124 			priv->enable = 0;
2125 			priv->wl.high = 0;
2126 			priv->buf_size = 0;
2127 			pfc_priv_num--;
2128 		}
2129 
2130 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2131 		    pfc_priv_num == 0)
2132 			break;
2133 	}
2134 
2135 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2136 }
2137 
2138 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2139 				      struct hclge_pkt_buf_alloc *buf_alloc)
2140 {
2141 #define COMPENSATE_BUFFER	0x3C00
2142 #define COMPENSATE_HALF_MPS_NUM	5
2143 #define PRIV_WL_GAP		0x1800
2144 
2145 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2146 	u32 tc_num = hclge_get_tc_num(hdev);
2147 	u32 half_mps = hdev->mps >> 1;
2148 	u32 min_rx_priv;
2149 	unsigned int i;
2150 
2151 	if (tc_num)
2152 		rx_priv = rx_priv / tc_num;
2153 
2154 	if (tc_num <= NEED_RESERVE_TC_NUM)
2155 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2156 
2157 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2158 			COMPENSATE_HALF_MPS_NUM * half_mps;
2159 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2160 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2161 
2162 	if (rx_priv < min_rx_priv)
2163 		return false;
2164 
2165 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2166 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2167 
2168 		priv->enable = 0;
2169 		priv->wl.low = 0;
2170 		priv->wl.high = 0;
2171 		priv->buf_size = 0;
2172 
2173 		if (!(hdev->hw_tc_map & BIT(i)))
2174 			continue;
2175 
2176 		priv->enable = 1;
2177 		priv->buf_size = rx_priv;
2178 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2179 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2180 	}
2181 
2182 	buf_alloc->s_buf.buf_size = 0;
2183 
2184 	return true;
2185 }
2186 
2187 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2188  * @hdev: pointer to struct hclge_dev
2189  * @buf_alloc: pointer to buffer calculation data
2190  * @return: 0: calculate sucessful, negative: fail
2191  */
2192 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2193 				struct hclge_pkt_buf_alloc *buf_alloc)
2194 {
2195 	/* When DCB is not supported, rx private buffer is not allocated. */
2196 	if (!hnae3_dev_dcb_supported(hdev)) {
2197 		u32 rx_all = hdev->pkt_buf_size;
2198 
2199 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2200 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2201 			return -ENOMEM;
2202 
2203 		return 0;
2204 	}
2205 
2206 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2207 		return 0;
2208 
2209 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2210 		return 0;
2211 
2212 	/* try to decrease the buffer size */
2213 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2214 		return 0;
2215 
2216 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2217 		return 0;
2218 
2219 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2220 		return 0;
2221 
2222 	return -ENOMEM;
2223 }
2224 
2225 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2226 				   struct hclge_pkt_buf_alloc *buf_alloc)
2227 {
2228 	struct hclge_rx_priv_buff_cmd *req;
2229 	struct hclge_desc desc;
2230 	int ret;
2231 	int i;
2232 
2233 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2234 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2235 
2236 	/* Alloc private buffer TCs */
2237 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2238 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2239 
2240 		req->buf_num[i] =
2241 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2242 		req->buf_num[i] |=
2243 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2244 	}
2245 
2246 	req->shared_buf =
2247 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2248 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2249 
2250 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2251 	if (ret)
2252 		dev_err(&hdev->pdev->dev,
2253 			"rx private buffer alloc cmd failed %d\n", ret);
2254 
2255 	return ret;
2256 }
2257 
2258 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2259 				   struct hclge_pkt_buf_alloc *buf_alloc)
2260 {
2261 	struct hclge_rx_priv_wl_buf *req;
2262 	struct hclge_priv_buf *priv;
2263 	struct hclge_desc desc[2];
2264 	int i, j;
2265 	int ret;
2266 
2267 	for (i = 0; i < 2; i++) {
2268 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2269 					   false);
2270 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2271 
2272 		/* The first descriptor set the NEXT bit to 1 */
2273 		if (i == 0)
2274 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2275 		else
2276 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2277 
2278 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2279 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2280 
2281 			priv = &buf_alloc->priv_buf[idx];
2282 			req->tc_wl[j].high =
2283 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2284 			req->tc_wl[j].high |=
2285 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2286 			req->tc_wl[j].low =
2287 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2288 			req->tc_wl[j].low |=
2289 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2290 		}
2291 	}
2292 
2293 	/* Send 2 descriptor at one time */
2294 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2295 	if (ret)
2296 		dev_err(&hdev->pdev->dev,
2297 			"rx private waterline config cmd failed %d\n",
2298 			ret);
2299 	return ret;
2300 }
2301 
2302 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2303 				    struct hclge_pkt_buf_alloc *buf_alloc)
2304 {
2305 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2306 	struct hclge_rx_com_thrd *req;
2307 	struct hclge_desc desc[2];
2308 	struct hclge_tc_thrd *tc;
2309 	int i, j;
2310 	int ret;
2311 
2312 	for (i = 0; i < 2; i++) {
2313 		hclge_cmd_setup_basic_desc(&desc[i],
2314 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2315 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2316 
2317 		/* The first descriptor set the NEXT bit to 1 */
2318 		if (i == 0)
2319 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2320 		else
2321 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2322 
2323 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2324 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2325 
2326 			req->com_thrd[j].high =
2327 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2328 			req->com_thrd[j].high |=
2329 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2330 			req->com_thrd[j].low =
2331 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2332 			req->com_thrd[j].low |=
2333 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2334 		}
2335 	}
2336 
2337 	/* Send 2 descriptors at one time */
2338 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2339 	if (ret)
2340 		dev_err(&hdev->pdev->dev,
2341 			"common threshold config cmd failed %d\n", ret);
2342 	return ret;
2343 }
2344 
2345 static int hclge_common_wl_config(struct hclge_dev *hdev,
2346 				  struct hclge_pkt_buf_alloc *buf_alloc)
2347 {
2348 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2349 	struct hclge_rx_com_wl *req;
2350 	struct hclge_desc desc;
2351 	int ret;
2352 
2353 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2354 
2355 	req = (struct hclge_rx_com_wl *)desc.data;
2356 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2357 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2358 
2359 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2360 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2361 
2362 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2363 	if (ret)
2364 		dev_err(&hdev->pdev->dev,
2365 			"common waterline config cmd failed %d\n", ret);
2366 
2367 	return ret;
2368 }
2369 
2370 int hclge_buffer_alloc(struct hclge_dev *hdev)
2371 {
2372 	struct hclge_pkt_buf_alloc *pkt_buf;
2373 	int ret;
2374 
2375 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2376 	if (!pkt_buf)
2377 		return -ENOMEM;
2378 
2379 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2380 	if (ret) {
2381 		dev_err(&hdev->pdev->dev,
2382 			"could not calc tx buffer size for all TCs %d\n", ret);
2383 		goto out;
2384 	}
2385 
2386 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2387 	if (ret) {
2388 		dev_err(&hdev->pdev->dev,
2389 			"could not alloc tx buffers %d\n", ret);
2390 		goto out;
2391 	}
2392 
2393 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2394 	if (ret) {
2395 		dev_err(&hdev->pdev->dev,
2396 			"could not calc rx priv buffer size for all TCs %d\n",
2397 			ret);
2398 		goto out;
2399 	}
2400 
2401 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2402 	if (ret) {
2403 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2404 			ret);
2405 		goto out;
2406 	}
2407 
2408 	if (hnae3_dev_dcb_supported(hdev)) {
2409 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2410 		if (ret) {
2411 			dev_err(&hdev->pdev->dev,
2412 				"could not configure rx private waterline %d\n",
2413 				ret);
2414 			goto out;
2415 		}
2416 
2417 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2418 		if (ret) {
2419 			dev_err(&hdev->pdev->dev,
2420 				"could not configure common threshold %d\n",
2421 				ret);
2422 			goto out;
2423 		}
2424 	}
2425 
2426 	ret = hclge_common_wl_config(hdev, pkt_buf);
2427 	if (ret)
2428 		dev_err(&hdev->pdev->dev,
2429 			"could not configure common waterline %d\n", ret);
2430 
2431 out:
2432 	kfree(pkt_buf);
2433 	return ret;
2434 }
2435 
2436 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2437 {
2438 	struct hnae3_handle *roce = &vport->roce;
2439 	struct hnae3_handle *nic = &vport->nic;
2440 	struct hclge_dev *hdev = vport->back;
2441 
2442 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2443 
2444 	if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2445 		return -EINVAL;
2446 
2447 	roce->rinfo.base_vector = hdev->roce_base_vector;
2448 
2449 	roce->rinfo.netdev = nic->kinfo.netdev;
2450 	roce->rinfo.roce_io_base = hdev->hw.io_base;
2451 	roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2452 
2453 	roce->pdev = nic->pdev;
2454 	roce->ae_algo = nic->ae_algo;
2455 	roce->numa_node_mask = nic->numa_node_mask;
2456 
2457 	return 0;
2458 }
2459 
2460 static int hclge_init_msi(struct hclge_dev *hdev)
2461 {
2462 	struct pci_dev *pdev = hdev->pdev;
2463 	int vectors;
2464 	int i;
2465 
2466 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2467 					hdev->num_msi,
2468 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2469 	if (vectors < 0) {
2470 		dev_err(&pdev->dev,
2471 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2472 			vectors);
2473 		return vectors;
2474 	}
2475 	if (vectors < hdev->num_msi)
2476 		dev_warn(&hdev->pdev->dev,
2477 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2478 			 hdev->num_msi, vectors);
2479 
2480 	hdev->num_msi = vectors;
2481 	hdev->num_msi_left = vectors;
2482 
2483 	hdev->base_msi_vector = pdev->irq;
2484 	hdev->roce_base_vector = hdev->base_msi_vector +
2485 				hdev->num_nic_msi;
2486 
2487 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2488 					   sizeof(u16), GFP_KERNEL);
2489 	if (!hdev->vector_status) {
2490 		pci_free_irq_vectors(pdev);
2491 		return -ENOMEM;
2492 	}
2493 
2494 	for (i = 0; i < hdev->num_msi; i++)
2495 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2496 
2497 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2498 					sizeof(int), GFP_KERNEL);
2499 	if (!hdev->vector_irq) {
2500 		pci_free_irq_vectors(pdev);
2501 		return -ENOMEM;
2502 	}
2503 
2504 	return 0;
2505 }
2506 
2507 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2508 {
2509 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2510 		duplex = HCLGE_MAC_FULL;
2511 
2512 	return duplex;
2513 }
2514 
2515 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2516 				      u8 duplex)
2517 {
2518 	struct hclge_config_mac_speed_dup_cmd *req;
2519 	struct hclge_desc desc;
2520 	int ret;
2521 
2522 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2523 
2524 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2525 
2526 	if (duplex)
2527 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2528 
2529 	switch (speed) {
2530 	case HCLGE_MAC_SPEED_10M:
2531 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2532 				HCLGE_CFG_SPEED_S, 6);
2533 		break;
2534 	case HCLGE_MAC_SPEED_100M:
2535 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2536 				HCLGE_CFG_SPEED_S, 7);
2537 		break;
2538 	case HCLGE_MAC_SPEED_1G:
2539 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2540 				HCLGE_CFG_SPEED_S, 0);
2541 		break;
2542 	case HCLGE_MAC_SPEED_10G:
2543 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2544 				HCLGE_CFG_SPEED_S, 1);
2545 		break;
2546 	case HCLGE_MAC_SPEED_25G:
2547 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2548 				HCLGE_CFG_SPEED_S, 2);
2549 		break;
2550 	case HCLGE_MAC_SPEED_40G:
2551 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2552 				HCLGE_CFG_SPEED_S, 3);
2553 		break;
2554 	case HCLGE_MAC_SPEED_50G:
2555 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2556 				HCLGE_CFG_SPEED_S, 4);
2557 		break;
2558 	case HCLGE_MAC_SPEED_100G:
2559 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2560 				HCLGE_CFG_SPEED_S, 5);
2561 		break;
2562 	case HCLGE_MAC_SPEED_200G:
2563 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2564 				HCLGE_CFG_SPEED_S, 8);
2565 		break;
2566 	default:
2567 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2568 		return -EINVAL;
2569 	}
2570 
2571 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2572 		      1);
2573 
2574 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2575 	if (ret) {
2576 		dev_err(&hdev->pdev->dev,
2577 			"mac speed/duplex config cmd failed %d.\n", ret);
2578 		return ret;
2579 	}
2580 
2581 	return 0;
2582 }
2583 
2584 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2585 {
2586 	struct hclge_mac *mac = &hdev->hw.mac;
2587 	int ret;
2588 
2589 	duplex = hclge_check_speed_dup(duplex, speed);
2590 	if (!mac->support_autoneg && mac->speed == speed &&
2591 	    mac->duplex == duplex)
2592 		return 0;
2593 
2594 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2595 	if (ret)
2596 		return ret;
2597 
2598 	hdev->hw.mac.speed = speed;
2599 	hdev->hw.mac.duplex = duplex;
2600 
2601 	return 0;
2602 }
2603 
2604 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2605 				     u8 duplex)
2606 {
2607 	struct hclge_vport *vport = hclge_get_vport(handle);
2608 	struct hclge_dev *hdev = vport->back;
2609 
2610 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2611 }
2612 
2613 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2614 {
2615 	struct hclge_config_auto_neg_cmd *req;
2616 	struct hclge_desc desc;
2617 	u32 flag = 0;
2618 	int ret;
2619 
2620 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2621 
2622 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2623 	if (enable)
2624 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2625 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2626 
2627 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2628 	if (ret)
2629 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2630 			ret);
2631 
2632 	return ret;
2633 }
2634 
2635 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2636 {
2637 	struct hclge_vport *vport = hclge_get_vport(handle);
2638 	struct hclge_dev *hdev = vport->back;
2639 
2640 	if (!hdev->hw.mac.support_autoneg) {
2641 		if (enable) {
2642 			dev_err(&hdev->pdev->dev,
2643 				"autoneg is not supported by current port\n");
2644 			return -EOPNOTSUPP;
2645 		} else {
2646 			return 0;
2647 		}
2648 	}
2649 
2650 	return hclge_set_autoneg_en(hdev, enable);
2651 }
2652 
2653 static int hclge_get_autoneg(struct hnae3_handle *handle)
2654 {
2655 	struct hclge_vport *vport = hclge_get_vport(handle);
2656 	struct hclge_dev *hdev = vport->back;
2657 	struct phy_device *phydev = hdev->hw.mac.phydev;
2658 
2659 	if (phydev)
2660 		return phydev->autoneg;
2661 
2662 	return hdev->hw.mac.autoneg;
2663 }
2664 
2665 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2666 {
2667 	struct hclge_vport *vport = hclge_get_vport(handle);
2668 	struct hclge_dev *hdev = vport->back;
2669 	int ret;
2670 
2671 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2672 
2673 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2674 	if (ret)
2675 		return ret;
2676 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2677 }
2678 
2679 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2680 {
2681 	struct hclge_vport *vport = hclge_get_vport(handle);
2682 	struct hclge_dev *hdev = vport->back;
2683 
2684 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2685 		return hclge_set_autoneg_en(hdev, !halt);
2686 
2687 	return 0;
2688 }
2689 
2690 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2691 {
2692 	struct hclge_config_fec_cmd *req;
2693 	struct hclge_desc desc;
2694 	int ret;
2695 
2696 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2697 
2698 	req = (struct hclge_config_fec_cmd *)desc.data;
2699 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2700 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2701 	if (fec_mode & BIT(HNAE3_FEC_RS))
2702 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2703 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2704 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2705 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2706 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2707 
2708 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2709 	if (ret)
2710 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2711 
2712 	return ret;
2713 }
2714 
2715 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2716 {
2717 	struct hclge_vport *vport = hclge_get_vport(handle);
2718 	struct hclge_dev *hdev = vport->back;
2719 	struct hclge_mac *mac = &hdev->hw.mac;
2720 	int ret;
2721 
2722 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2723 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2724 		return -EINVAL;
2725 	}
2726 
2727 	ret = hclge_set_fec_hw(hdev, fec_mode);
2728 	if (ret)
2729 		return ret;
2730 
2731 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2732 	return 0;
2733 }
2734 
2735 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2736 			  u8 *fec_mode)
2737 {
2738 	struct hclge_vport *vport = hclge_get_vport(handle);
2739 	struct hclge_dev *hdev = vport->back;
2740 	struct hclge_mac *mac = &hdev->hw.mac;
2741 
2742 	if (fec_ability)
2743 		*fec_ability = mac->fec_ability;
2744 	if (fec_mode)
2745 		*fec_mode = mac->fec_mode;
2746 }
2747 
2748 static int hclge_mac_init(struct hclge_dev *hdev)
2749 {
2750 	struct hclge_mac *mac = &hdev->hw.mac;
2751 	int ret;
2752 
2753 	hdev->support_sfp_query = true;
2754 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2755 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2756 					 hdev->hw.mac.duplex);
2757 	if (ret)
2758 		return ret;
2759 
2760 	if (hdev->hw.mac.support_autoneg) {
2761 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2762 		if (ret)
2763 			return ret;
2764 	}
2765 
2766 	mac->link = 0;
2767 
2768 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2769 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2770 		if (ret)
2771 			return ret;
2772 	}
2773 
2774 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2775 	if (ret) {
2776 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2777 		return ret;
2778 	}
2779 
2780 	ret = hclge_set_default_loopback(hdev);
2781 	if (ret)
2782 		return ret;
2783 
2784 	ret = hclge_buffer_alloc(hdev);
2785 	if (ret)
2786 		dev_err(&hdev->pdev->dev,
2787 			"allocate buffer fail, ret=%d\n", ret);
2788 
2789 	return ret;
2790 }
2791 
2792 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2793 {
2794 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2795 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2796 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2797 				    hclge_wq, &hdev->service_task, 0);
2798 }
2799 
2800 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2801 {
2802 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2803 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2804 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2805 				    hclge_wq, &hdev->service_task, 0);
2806 }
2807 
2808 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2809 {
2810 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2811 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2812 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2813 				    hclge_wq, &hdev->service_task,
2814 				    delay_time);
2815 }
2816 
2817 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2818 {
2819 	struct hclge_link_status_cmd *req;
2820 	struct hclge_desc desc;
2821 	int ret;
2822 
2823 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2824 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2825 	if (ret) {
2826 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2827 			ret);
2828 		return ret;
2829 	}
2830 
2831 	req = (struct hclge_link_status_cmd *)desc.data;
2832 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2833 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2834 
2835 	return 0;
2836 }
2837 
2838 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2839 {
2840 	struct phy_device *phydev = hdev->hw.mac.phydev;
2841 
2842 	*link_status = HCLGE_LINK_STATUS_DOWN;
2843 
2844 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2845 		return 0;
2846 
2847 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2848 		return 0;
2849 
2850 	return hclge_get_mac_link_status(hdev, link_status);
2851 }
2852 
2853 static void hclge_update_link_status(struct hclge_dev *hdev)
2854 {
2855 	struct hnae3_client *rclient = hdev->roce_client;
2856 	struct hnae3_client *client = hdev->nic_client;
2857 	struct hnae3_handle *rhandle;
2858 	struct hnae3_handle *handle;
2859 	int state;
2860 	int ret;
2861 	int i;
2862 
2863 	if (!client)
2864 		return;
2865 
2866 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2867 		return;
2868 
2869 	ret = hclge_get_mac_phy_link(hdev, &state);
2870 	if (ret) {
2871 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2872 		return;
2873 	}
2874 
2875 	if (state != hdev->hw.mac.link) {
2876 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2877 			handle = &hdev->vport[i].nic;
2878 			client->ops->link_status_change(handle, state);
2879 			hclge_config_mac_tnl_int(hdev, state);
2880 			rhandle = &hdev->vport[i].roce;
2881 			if (rclient && rclient->ops->link_status_change)
2882 				rclient->ops->link_status_change(rhandle,
2883 								 state);
2884 		}
2885 		hdev->hw.mac.link = state;
2886 	}
2887 
2888 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2889 }
2890 
2891 static void hclge_update_port_capability(struct hclge_mac *mac)
2892 {
2893 	/* update fec ability by speed */
2894 	hclge_convert_setting_fec(mac);
2895 
2896 	/* firmware can not identify back plane type, the media type
2897 	 * read from configuration can help deal it
2898 	 */
2899 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2900 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2901 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2902 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2903 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2904 
2905 	if (mac->support_autoneg) {
2906 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2907 		linkmode_copy(mac->advertising, mac->supported);
2908 	} else {
2909 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2910 				   mac->supported);
2911 		linkmode_zero(mac->advertising);
2912 	}
2913 }
2914 
2915 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2916 {
2917 	struct hclge_sfp_info_cmd *resp;
2918 	struct hclge_desc desc;
2919 	int ret;
2920 
2921 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2922 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2923 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2924 	if (ret == -EOPNOTSUPP) {
2925 		dev_warn(&hdev->pdev->dev,
2926 			 "IMP do not support get SFP speed %d\n", ret);
2927 		return ret;
2928 	} else if (ret) {
2929 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2930 		return ret;
2931 	}
2932 
2933 	*speed = le32_to_cpu(resp->speed);
2934 
2935 	return 0;
2936 }
2937 
2938 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2939 {
2940 	struct hclge_sfp_info_cmd *resp;
2941 	struct hclge_desc desc;
2942 	int ret;
2943 
2944 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2945 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2946 
2947 	resp->query_type = QUERY_ACTIVE_SPEED;
2948 
2949 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2950 	if (ret == -EOPNOTSUPP) {
2951 		dev_warn(&hdev->pdev->dev,
2952 			 "IMP does not support get SFP info %d\n", ret);
2953 		return ret;
2954 	} else if (ret) {
2955 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2956 		return ret;
2957 	}
2958 
2959 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
2960 	 * set to mac->speed.
2961 	 */
2962 	if (!le32_to_cpu(resp->speed))
2963 		return 0;
2964 
2965 	mac->speed = le32_to_cpu(resp->speed);
2966 	/* if resp->speed_ability is 0, it means it's an old version
2967 	 * firmware, do not update these params
2968 	 */
2969 	if (resp->speed_ability) {
2970 		mac->module_type = le32_to_cpu(resp->module_type);
2971 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
2972 		mac->autoneg = resp->autoneg;
2973 		mac->support_autoneg = resp->autoneg_ability;
2974 		mac->speed_type = QUERY_ACTIVE_SPEED;
2975 		if (!resp->active_fec)
2976 			mac->fec_mode = 0;
2977 		else
2978 			mac->fec_mode = BIT(resp->active_fec);
2979 	} else {
2980 		mac->speed_type = QUERY_SFP_SPEED;
2981 	}
2982 
2983 	return 0;
2984 }
2985 
2986 static int hclge_update_port_info(struct hclge_dev *hdev)
2987 {
2988 	struct hclge_mac *mac = &hdev->hw.mac;
2989 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
2990 	int ret;
2991 
2992 	/* get the port info from SFP cmd if not copper port */
2993 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2994 		return 0;
2995 
2996 	/* if IMP does not support get SFP/qSFP info, return directly */
2997 	if (!hdev->support_sfp_query)
2998 		return 0;
2999 
3000 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3001 		ret = hclge_get_sfp_info(hdev, mac);
3002 	else
3003 		ret = hclge_get_sfp_speed(hdev, &speed);
3004 
3005 	if (ret == -EOPNOTSUPP) {
3006 		hdev->support_sfp_query = false;
3007 		return ret;
3008 	} else if (ret) {
3009 		return ret;
3010 	}
3011 
3012 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3013 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3014 			hclge_update_port_capability(mac);
3015 			return 0;
3016 		}
3017 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3018 					       HCLGE_MAC_FULL);
3019 	} else {
3020 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3021 			return 0; /* do nothing if no SFP */
3022 
3023 		/* must config full duplex for SFP */
3024 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3025 	}
3026 }
3027 
3028 static int hclge_get_status(struct hnae3_handle *handle)
3029 {
3030 	struct hclge_vport *vport = hclge_get_vport(handle);
3031 	struct hclge_dev *hdev = vport->back;
3032 
3033 	hclge_update_link_status(hdev);
3034 
3035 	return hdev->hw.mac.link;
3036 }
3037 
3038 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3039 {
3040 	if (!pci_num_vf(hdev->pdev)) {
3041 		dev_err(&hdev->pdev->dev,
3042 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
3043 		return NULL;
3044 	}
3045 
3046 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3047 		dev_err(&hdev->pdev->dev,
3048 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
3049 			vf, pci_num_vf(hdev->pdev));
3050 		return NULL;
3051 	}
3052 
3053 	/* VF start from 1 in vport */
3054 	vf += HCLGE_VF_VPORT_START_NUM;
3055 	return &hdev->vport[vf];
3056 }
3057 
3058 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3059 			       struct ifla_vf_info *ivf)
3060 {
3061 	struct hclge_vport *vport = hclge_get_vport(handle);
3062 	struct hclge_dev *hdev = vport->back;
3063 
3064 	vport = hclge_get_vf_vport(hdev, vf);
3065 	if (!vport)
3066 		return -EINVAL;
3067 
3068 	ivf->vf = vf;
3069 	ivf->linkstate = vport->vf_info.link_state;
3070 	ivf->spoofchk = vport->vf_info.spoofchk;
3071 	ivf->trusted = vport->vf_info.trusted;
3072 	ivf->min_tx_rate = 0;
3073 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3074 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3075 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3076 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3077 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
3078 
3079 	return 0;
3080 }
3081 
3082 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3083 				   int link_state)
3084 {
3085 	struct hclge_vport *vport = hclge_get_vport(handle);
3086 	struct hclge_dev *hdev = vport->back;
3087 
3088 	vport = hclge_get_vf_vport(hdev, vf);
3089 	if (!vport)
3090 		return -EINVAL;
3091 
3092 	vport->vf_info.link_state = link_state;
3093 
3094 	return 0;
3095 }
3096 
3097 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3098 {
3099 	u32 cmdq_src_reg, msix_src_reg;
3100 
3101 	/* fetch the events from their corresponding regs */
3102 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3103 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3104 
3105 	/* Assumption: If by any chance reset and mailbox events are reported
3106 	 * together then we will only process reset event in this go and will
3107 	 * defer the processing of the mailbox events. Since, we would have not
3108 	 * cleared RX CMDQ event this time we would receive again another
3109 	 * interrupt from H/W just for the mailbox.
3110 	 *
3111 	 * check for vector0 reset event sources
3112 	 */
3113 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3114 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3115 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3116 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3117 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3118 		hdev->rst_stats.imp_rst_cnt++;
3119 		return HCLGE_VECTOR0_EVENT_RST;
3120 	}
3121 
3122 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3123 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3124 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3125 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3126 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3127 		hdev->rst_stats.global_rst_cnt++;
3128 		return HCLGE_VECTOR0_EVENT_RST;
3129 	}
3130 
3131 	/* check for vector0 msix event source */
3132 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3133 		*clearval = msix_src_reg;
3134 		return HCLGE_VECTOR0_EVENT_ERR;
3135 	}
3136 
3137 	/* check for vector0 mailbox(=CMDQ RX) event source */
3138 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3139 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3140 		*clearval = cmdq_src_reg;
3141 		return HCLGE_VECTOR0_EVENT_MBX;
3142 	}
3143 
3144 	/* print other vector0 event source */
3145 	dev_info(&hdev->pdev->dev,
3146 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3147 		 cmdq_src_reg, msix_src_reg);
3148 	*clearval = msix_src_reg;
3149 
3150 	return HCLGE_VECTOR0_EVENT_OTHER;
3151 }
3152 
3153 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3154 				    u32 regclr)
3155 {
3156 	switch (event_type) {
3157 	case HCLGE_VECTOR0_EVENT_RST:
3158 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3159 		break;
3160 	case HCLGE_VECTOR0_EVENT_MBX:
3161 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3162 		break;
3163 	default:
3164 		break;
3165 	}
3166 }
3167 
3168 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3169 {
3170 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3171 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3172 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3173 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3174 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3175 }
3176 
3177 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3178 {
3179 	writel(enable ? 1 : 0, vector->addr);
3180 }
3181 
3182 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3183 {
3184 	struct hclge_dev *hdev = data;
3185 	u32 clearval = 0;
3186 	u32 event_cause;
3187 
3188 	hclge_enable_vector(&hdev->misc_vector, false);
3189 	event_cause = hclge_check_event_cause(hdev, &clearval);
3190 
3191 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
3192 	switch (event_cause) {
3193 	case HCLGE_VECTOR0_EVENT_ERR:
3194 		/* we do not know what type of reset is required now. This could
3195 		 * only be decided after we fetch the type of errors which
3196 		 * caused this event. Therefore, we will do below for now:
3197 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3198 		 *    have defered type of reset to be used.
3199 		 * 2. Schedule the reset serivce task.
3200 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3201 		 *    will fetch the correct type of reset.  This would be done
3202 		 *    by first decoding the types of errors.
3203 		 */
3204 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3205 		fallthrough;
3206 	case HCLGE_VECTOR0_EVENT_RST:
3207 		hclge_reset_task_schedule(hdev);
3208 		break;
3209 	case HCLGE_VECTOR0_EVENT_MBX:
3210 		/* If we are here then,
3211 		 * 1. Either we are not handling any mbx task and we are not
3212 		 *    scheduled as well
3213 		 *                        OR
3214 		 * 2. We could be handling a mbx task but nothing more is
3215 		 *    scheduled.
3216 		 * In both cases, we should schedule mbx task as there are more
3217 		 * mbx messages reported by this interrupt.
3218 		 */
3219 		hclge_mbx_task_schedule(hdev);
3220 		break;
3221 	default:
3222 		dev_warn(&hdev->pdev->dev,
3223 			 "received unknown or unhandled event of vector0\n");
3224 		break;
3225 	}
3226 
3227 	hclge_clear_event_cause(hdev, event_cause, clearval);
3228 
3229 	/* Enable interrupt if it is not cause by reset. And when
3230 	 * clearval equal to 0, it means interrupt status may be
3231 	 * cleared by hardware before driver reads status register.
3232 	 * For this case, vector0 interrupt also should be enabled.
3233 	 */
3234 	if (!clearval ||
3235 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3236 		hclge_enable_vector(&hdev->misc_vector, true);
3237 	}
3238 
3239 	return IRQ_HANDLED;
3240 }
3241 
3242 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3243 {
3244 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3245 		dev_warn(&hdev->pdev->dev,
3246 			 "vector(vector_id %d) has been freed.\n", vector_id);
3247 		return;
3248 	}
3249 
3250 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3251 	hdev->num_msi_left += 1;
3252 	hdev->num_msi_used -= 1;
3253 }
3254 
3255 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3256 {
3257 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3258 
3259 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3260 
3261 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3262 	hdev->vector_status[0] = 0;
3263 
3264 	hdev->num_msi_left -= 1;
3265 	hdev->num_msi_used += 1;
3266 }
3267 
3268 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3269 				      const cpumask_t *mask)
3270 {
3271 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3272 					      affinity_notify);
3273 
3274 	cpumask_copy(&hdev->affinity_mask, mask);
3275 }
3276 
3277 static void hclge_irq_affinity_release(struct kref *ref)
3278 {
3279 }
3280 
3281 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3282 {
3283 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3284 			      &hdev->affinity_mask);
3285 
3286 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3287 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3288 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3289 				  &hdev->affinity_notify);
3290 }
3291 
3292 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3293 {
3294 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3295 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3296 }
3297 
3298 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3299 {
3300 	int ret;
3301 
3302 	hclge_get_misc_vector(hdev);
3303 
3304 	/* this would be explicitly freed in the end */
3305 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3306 		 HCLGE_NAME, pci_name(hdev->pdev));
3307 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3308 			  0, hdev->misc_vector.name, hdev);
3309 	if (ret) {
3310 		hclge_free_vector(hdev, 0);
3311 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3312 			hdev->misc_vector.vector_irq);
3313 	}
3314 
3315 	return ret;
3316 }
3317 
3318 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3319 {
3320 	free_irq(hdev->misc_vector.vector_irq, hdev);
3321 	hclge_free_vector(hdev, 0);
3322 }
3323 
3324 int hclge_notify_client(struct hclge_dev *hdev,
3325 			enum hnae3_reset_notify_type type)
3326 {
3327 	struct hnae3_client *client = hdev->nic_client;
3328 	u16 i;
3329 
3330 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3331 		return 0;
3332 
3333 	if (!client->ops->reset_notify)
3334 		return -EOPNOTSUPP;
3335 
3336 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3337 		struct hnae3_handle *handle = &hdev->vport[i].nic;
3338 		int ret;
3339 
3340 		ret = client->ops->reset_notify(handle, type);
3341 		if (ret) {
3342 			dev_err(&hdev->pdev->dev,
3343 				"notify nic client failed %d(%d)\n", type, ret);
3344 			return ret;
3345 		}
3346 	}
3347 
3348 	return 0;
3349 }
3350 
3351 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3352 				    enum hnae3_reset_notify_type type)
3353 {
3354 	struct hnae3_client *client = hdev->roce_client;
3355 	int ret;
3356 	u16 i;
3357 
3358 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3359 		return 0;
3360 
3361 	if (!client->ops->reset_notify)
3362 		return -EOPNOTSUPP;
3363 
3364 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3365 		struct hnae3_handle *handle = &hdev->vport[i].roce;
3366 
3367 		ret = client->ops->reset_notify(handle, type);
3368 		if (ret) {
3369 			dev_err(&hdev->pdev->dev,
3370 				"notify roce client failed %d(%d)",
3371 				type, ret);
3372 			return ret;
3373 		}
3374 	}
3375 
3376 	return ret;
3377 }
3378 
3379 static int hclge_reset_wait(struct hclge_dev *hdev)
3380 {
3381 #define HCLGE_RESET_WATI_MS	100
3382 #define HCLGE_RESET_WAIT_CNT	350
3383 
3384 	u32 val, reg, reg_bit;
3385 	u32 cnt = 0;
3386 
3387 	switch (hdev->reset_type) {
3388 	case HNAE3_IMP_RESET:
3389 		reg = HCLGE_GLOBAL_RESET_REG;
3390 		reg_bit = HCLGE_IMP_RESET_BIT;
3391 		break;
3392 	case HNAE3_GLOBAL_RESET:
3393 		reg = HCLGE_GLOBAL_RESET_REG;
3394 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3395 		break;
3396 	case HNAE3_FUNC_RESET:
3397 		reg = HCLGE_FUN_RST_ING;
3398 		reg_bit = HCLGE_FUN_RST_ING_B;
3399 		break;
3400 	default:
3401 		dev_err(&hdev->pdev->dev,
3402 			"Wait for unsupported reset type: %d\n",
3403 			hdev->reset_type);
3404 		return -EINVAL;
3405 	}
3406 
3407 	val = hclge_read_dev(&hdev->hw, reg);
3408 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3409 		msleep(HCLGE_RESET_WATI_MS);
3410 		val = hclge_read_dev(&hdev->hw, reg);
3411 		cnt++;
3412 	}
3413 
3414 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3415 		dev_warn(&hdev->pdev->dev,
3416 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3417 		return -EBUSY;
3418 	}
3419 
3420 	return 0;
3421 }
3422 
3423 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3424 {
3425 	struct hclge_vf_rst_cmd *req;
3426 	struct hclge_desc desc;
3427 
3428 	req = (struct hclge_vf_rst_cmd *)desc.data;
3429 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3430 	req->dest_vfid = func_id;
3431 
3432 	if (reset)
3433 		req->vf_rst = 0x1;
3434 
3435 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3436 }
3437 
3438 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3439 {
3440 	int i;
3441 
3442 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3443 		struct hclge_vport *vport = &hdev->vport[i];
3444 		int ret;
3445 
3446 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3447 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3448 		if (ret) {
3449 			dev_err(&hdev->pdev->dev,
3450 				"set vf(%u) rst failed %d!\n",
3451 				vport->vport_id, ret);
3452 			return ret;
3453 		}
3454 
3455 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3456 			continue;
3457 
3458 		/* Inform VF to process the reset.
3459 		 * hclge_inform_reset_assert_to_vf may fail if VF
3460 		 * driver is not loaded.
3461 		 */
3462 		ret = hclge_inform_reset_assert_to_vf(vport);
3463 		if (ret)
3464 			dev_warn(&hdev->pdev->dev,
3465 				 "inform reset to vf(%u) failed %d!\n",
3466 				 vport->vport_id, ret);
3467 	}
3468 
3469 	return 0;
3470 }
3471 
3472 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3473 {
3474 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3475 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3476 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3477 		return;
3478 
3479 	hclge_mbx_handler(hdev);
3480 
3481 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3482 }
3483 
3484 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3485 {
3486 	struct hclge_pf_rst_sync_cmd *req;
3487 	struct hclge_desc desc;
3488 	int cnt = 0;
3489 	int ret;
3490 
3491 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3492 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3493 
3494 	do {
3495 		/* vf need to down netdev by mbx during PF or FLR reset */
3496 		hclge_mailbox_service_task(hdev);
3497 
3498 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3499 		/* for compatible with old firmware, wait
3500 		 * 100 ms for VF to stop IO
3501 		 */
3502 		if (ret == -EOPNOTSUPP) {
3503 			msleep(HCLGE_RESET_SYNC_TIME);
3504 			return;
3505 		} else if (ret) {
3506 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3507 				 ret);
3508 			return;
3509 		} else if (req->all_vf_ready) {
3510 			return;
3511 		}
3512 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3513 		hclge_cmd_reuse_desc(&desc, true);
3514 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3515 
3516 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3517 }
3518 
3519 void hclge_report_hw_error(struct hclge_dev *hdev,
3520 			   enum hnae3_hw_error_type type)
3521 {
3522 	struct hnae3_client *client = hdev->nic_client;
3523 	u16 i;
3524 
3525 	if (!client || !client->ops->process_hw_error ||
3526 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3527 		return;
3528 
3529 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3530 		client->ops->process_hw_error(&hdev->vport[i].nic, type);
3531 }
3532 
3533 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3534 {
3535 	u32 reg_val;
3536 
3537 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3538 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3539 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3540 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3541 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3542 	}
3543 
3544 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3545 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3546 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3547 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3548 	}
3549 }
3550 
3551 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3552 {
3553 	struct hclge_desc desc;
3554 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3555 	int ret;
3556 
3557 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3558 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3559 	req->fun_reset_vfid = func_id;
3560 
3561 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3562 	if (ret)
3563 		dev_err(&hdev->pdev->dev,
3564 			"send function reset cmd fail, status =%d\n", ret);
3565 
3566 	return ret;
3567 }
3568 
3569 static void hclge_do_reset(struct hclge_dev *hdev)
3570 {
3571 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3572 	struct pci_dev *pdev = hdev->pdev;
3573 	u32 val;
3574 
3575 	if (hclge_get_hw_reset_stat(handle)) {
3576 		dev_info(&pdev->dev, "hardware reset not finish\n");
3577 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3578 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3579 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3580 		return;
3581 	}
3582 
3583 	switch (hdev->reset_type) {
3584 	case HNAE3_GLOBAL_RESET:
3585 		dev_info(&pdev->dev, "global reset requested\n");
3586 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3587 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3588 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3589 		break;
3590 	case HNAE3_FUNC_RESET:
3591 		dev_info(&pdev->dev, "PF reset requested\n");
3592 		/* schedule again to check later */
3593 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3594 		hclge_reset_task_schedule(hdev);
3595 		break;
3596 	default:
3597 		dev_warn(&pdev->dev,
3598 			 "unsupported reset type: %d\n", hdev->reset_type);
3599 		break;
3600 	}
3601 }
3602 
3603 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3604 						   unsigned long *addr)
3605 {
3606 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3607 	struct hclge_dev *hdev = ae_dev->priv;
3608 
3609 	/* first, resolve any unknown reset type to the known type(s) */
3610 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3611 		u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3612 					HCLGE_MISC_VECTOR_INT_STS);
3613 		/* we will intentionally ignore any errors from this function
3614 		 *  as we will end up in *some* reset request in any case
3615 		 */
3616 		if (hclge_handle_hw_msix_error(hdev, addr))
3617 			dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3618 				 msix_sts_reg);
3619 
3620 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3621 		/* We defered the clearing of the error event which caused
3622 		 * interrupt since it was not posssible to do that in
3623 		 * interrupt context (and this is the reason we introduced
3624 		 * new UNKNOWN reset type). Now, the errors have been
3625 		 * handled and cleared in hardware we can safely enable
3626 		 * interrupts. This is an exception to the norm.
3627 		 */
3628 		hclge_enable_vector(&hdev->misc_vector, true);
3629 	}
3630 
3631 	/* return the highest priority reset level amongst all */
3632 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3633 		rst_level = HNAE3_IMP_RESET;
3634 		clear_bit(HNAE3_IMP_RESET, addr);
3635 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3636 		clear_bit(HNAE3_FUNC_RESET, addr);
3637 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3638 		rst_level = HNAE3_GLOBAL_RESET;
3639 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3640 		clear_bit(HNAE3_FUNC_RESET, addr);
3641 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3642 		rst_level = HNAE3_FUNC_RESET;
3643 		clear_bit(HNAE3_FUNC_RESET, addr);
3644 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3645 		rst_level = HNAE3_FLR_RESET;
3646 		clear_bit(HNAE3_FLR_RESET, addr);
3647 	}
3648 
3649 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3650 	    rst_level < hdev->reset_type)
3651 		return HNAE3_NONE_RESET;
3652 
3653 	return rst_level;
3654 }
3655 
3656 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3657 {
3658 	u32 clearval = 0;
3659 
3660 	switch (hdev->reset_type) {
3661 	case HNAE3_IMP_RESET:
3662 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3663 		break;
3664 	case HNAE3_GLOBAL_RESET:
3665 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3666 		break;
3667 	default:
3668 		break;
3669 	}
3670 
3671 	if (!clearval)
3672 		return;
3673 
3674 	/* For revision 0x20, the reset interrupt source
3675 	 * can only be cleared after hardware reset done
3676 	 */
3677 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3678 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3679 				clearval);
3680 
3681 	hclge_enable_vector(&hdev->misc_vector, true);
3682 }
3683 
3684 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3685 {
3686 	u32 reg_val;
3687 
3688 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3689 	if (enable)
3690 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3691 	else
3692 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3693 
3694 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3695 }
3696 
3697 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3698 {
3699 	int ret;
3700 
3701 	ret = hclge_set_all_vf_rst(hdev, true);
3702 	if (ret)
3703 		return ret;
3704 
3705 	hclge_func_reset_sync_vf(hdev);
3706 
3707 	return 0;
3708 }
3709 
3710 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3711 {
3712 	u32 reg_val;
3713 	int ret = 0;
3714 
3715 	switch (hdev->reset_type) {
3716 	case HNAE3_FUNC_RESET:
3717 		ret = hclge_func_reset_notify_vf(hdev);
3718 		if (ret)
3719 			return ret;
3720 
3721 		ret = hclge_func_reset_cmd(hdev, 0);
3722 		if (ret) {
3723 			dev_err(&hdev->pdev->dev,
3724 				"asserting function reset fail %d!\n", ret);
3725 			return ret;
3726 		}
3727 
3728 		/* After performaning pf reset, it is not necessary to do the
3729 		 * mailbox handling or send any command to firmware, because
3730 		 * any mailbox handling or command to firmware is only valid
3731 		 * after hclge_cmd_init is called.
3732 		 */
3733 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3734 		hdev->rst_stats.pf_rst_cnt++;
3735 		break;
3736 	case HNAE3_FLR_RESET:
3737 		ret = hclge_func_reset_notify_vf(hdev);
3738 		if (ret)
3739 			return ret;
3740 		break;
3741 	case HNAE3_IMP_RESET:
3742 		hclge_handle_imp_error(hdev);
3743 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3744 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3745 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3746 		break;
3747 	default:
3748 		break;
3749 	}
3750 
3751 	/* inform hardware that preparatory work is done */
3752 	msleep(HCLGE_RESET_SYNC_TIME);
3753 	hclge_reset_handshake(hdev, true);
3754 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3755 
3756 	return ret;
3757 }
3758 
3759 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3760 {
3761 #define MAX_RESET_FAIL_CNT 5
3762 
3763 	if (hdev->reset_pending) {
3764 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3765 			 hdev->reset_pending);
3766 		return true;
3767 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3768 		   HCLGE_RESET_INT_M) {
3769 		dev_info(&hdev->pdev->dev,
3770 			 "reset failed because new reset interrupt\n");
3771 		hclge_clear_reset_cause(hdev);
3772 		return false;
3773 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3774 		hdev->rst_stats.reset_fail_cnt++;
3775 		set_bit(hdev->reset_type, &hdev->reset_pending);
3776 		dev_info(&hdev->pdev->dev,
3777 			 "re-schedule reset task(%u)\n",
3778 			 hdev->rst_stats.reset_fail_cnt);
3779 		return true;
3780 	}
3781 
3782 	hclge_clear_reset_cause(hdev);
3783 
3784 	/* recover the handshake status when reset fail */
3785 	hclge_reset_handshake(hdev, true);
3786 
3787 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3788 
3789 	hclge_dbg_dump_rst_info(hdev);
3790 
3791 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3792 
3793 	return false;
3794 }
3795 
3796 static int hclge_set_rst_done(struct hclge_dev *hdev)
3797 {
3798 	struct hclge_pf_rst_done_cmd *req;
3799 	struct hclge_desc desc;
3800 	int ret;
3801 
3802 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
3803 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3804 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3805 
3806 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3807 	/* To be compatible with the old firmware, which does not support
3808 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3809 	 * return success
3810 	 */
3811 	if (ret == -EOPNOTSUPP) {
3812 		dev_warn(&hdev->pdev->dev,
3813 			 "current firmware does not support command(0x%x)!\n",
3814 			 HCLGE_OPC_PF_RST_DONE);
3815 		return 0;
3816 	} else if (ret) {
3817 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3818 			ret);
3819 	}
3820 
3821 	return ret;
3822 }
3823 
3824 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3825 {
3826 	int ret = 0;
3827 
3828 	switch (hdev->reset_type) {
3829 	case HNAE3_FUNC_RESET:
3830 	case HNAE3_FLR_RESET:
3831 		ret = hclge_set_all_vf_rst(hdev, false);
3832 		break;
3833 	case HNAE3_GLOBAL_RESET:
3834 	case HNAE3_IMP_RESET:
3835 		ret = hclge_set_rst_done(hdev);
3836 		break;
3837 	default:
3838 		break;
3839 	}
3840 
3841 	/* clear up the handshake status after re-initialize done */
3842 	hclge_reset_handshake(hdev, false);
3843 
3844 	return ret;
3845 }
3846 
3847 static int hclge_reset_stack(struct hclge_dev *hdev)
3848 {
3849 	int ret;
3850 
3851 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3852 	if (ret)
3853 		return ret;
3854 
3855 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3856 	if (ret)
3857 		return ret;
3858 
3859 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3860 }
3861 
3862 static int hclge_reset_prepare(struct hclge_dev *hdev)
3863 {
3864 	int ret;
3865 
3866 	hdev->rst_stats.reset_cnt++;
3867 	/* perform reset of the stack & ae device for a client */
3868 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3869 	if (ret)
3870 		return ret;
3871 
3872 	rtnl_lock();
3873 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3874 	rtnl_unlock();
3875 	if (ret)
3876 		return ret;
3877 
3878 	return hclge_reset_prepare_wait(hdev);
3879 }
3880 
3881 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3882 {
3883 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3884 	enum hnae3_reset_type reset_level;
3885 	int ret;
3886 
3887 	hdev->rst_stats.hw_reset_done_cnt++;
3888 
3889 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3890 	if (ret)
3891 		return ret;
3892 
3893 	rtnl_lock();
3894 	ret = hclge_reset_stack(hdev);
3895 	rtnl_unlock();
3896 	if (ret)
3897 		return ret;
3898 
3899 	hclge_clear_reset_cause(hdev);
3900 
3901 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3902 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3903 	 * times
3904 	 */
3905 	if (ret &&
3906 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3907 		return ret;
3908 
3909 	ret = hclge_reset_prepare_up(hdev);
3910 	if (ret)
3911 		return ret;
3912 
3913 	rtnl_lock();
3914 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3915 	rtnl_unlock();
3916 	if (ret)
3917 		return ret;
3918 
3919 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3920 	if (ret)
3921 		return ret;
3922 
3923 	hdev->last_reset_time = jiffies;
3924 	hdev->rst_stats.reset_fail_cnt = 0;
3925 	hdev->rst_stats.reset_done_cnt++;
3926 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3927 
3928 	/* if default_reset_request has a higher level reset request,
3929 	 * it should be handled as soon as possible. since some errors
3930 	 * need this kind of reset to fix.
3931 	 */
3932 	reset_level = hclge_get_reset_level(ae_dev,
3933 					    &hdev->default_reset_request);
3934 	if (reset_level != HNAE3_NONE_RESET)
3935 		set_bit(reset_level, &hdev->reset_request);
3936 
3937 	return 0;
3938 }
3939 
3940 static void hclge_reset(struct hclge_dev *hdev)
3941 {
3942 	if (hclge_reset_prepare(hdev))
3943 		goto err_reset;
3944 
3945 	if (hclge_reset_wait(hdev))
3946 		goto err_reset;
3947 
3948 	if (hclge_reset_rebuild(hdev))
3949 		goto err_reset;
3950 
3951 	return;
3952 
3953 err_reset:
3954 	if (hclge_reset_err_handle(hdev))
3955 		hclge_reset_task_schedule(hdev);
3956 }
3957 
3958 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3959 {
3960 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3961 	struct hclge_dev *hdev = ae_dev->priv;
3962 
3963 	/* We might end up getting called broadly because of 2 below cases:
3964 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3965 	 *    normalcy is to reset.
3966 	 * 2. A new reset request from the stack due to timeout
3967 	 *
3968 	 * For the first case,error event might not have ae handle available.
3969 	 * check if this is a new reset request and we are not here just because
3970 	 * last reset attempt did not succeed and watchdog hit us again. We will
3971 	 * know this if last reset request did not occur very recently (watchdog
3972 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3973 	 * In case of new request we reset the "reset level" to PF reset.
3974 	 * And if it is a repeat reset request of the most recent one then we
3975 	 * want to make sure we throttle the reset request. Therefore, we will
3976 	 * not allow it again before 3*HZ times.
3977 	 */
3978 	if (!handle)
3979 		handle = &hdev->vport[0].nic;
3980 
3981 	if (time_before(jiffies, (hdev->last_reset_time +
3982 				  HCLGE_RESET_INTERVAL))) {
3983 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3984 		return;
3985 	} else if (hdev->default_reset_request) {
3986 		hdev->reset_level =
3987 			hclge_get_reset_level(ae_dev,
3988 					      &hdev->default_reset_request);
3989 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3990 		hdev->reset_level = HNAE3_FUNC_RESET;
3991 	}
3992 
3993 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3994 		 hdev->reset_level);
3995 
3996 	/* request reset & schedule reset task */
3997 	set_bit(hdev->reset_level, &hdev->reset_request);
3998 	hclge_reset_task_schedule(hdev);
3999 
4000 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4001 		hdev->reset_level++;
4002 }
4003 
4004 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4005 					enum hnae3_reset_type rst_type)
4006 {
4007 	struct hclge_dev *hdev = ae_dev->priv;
4008 
4009 	set_bit(rst_type, &hdev->default_reset_request);
4010 }
4011 
4012 static void hclge_reset_timer(struct timer_list *t)
4013 {
4014 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4015 
4016 	/* if default_reset_request has no value, it means that this reset
4017 	 * request has already be handled, so just return here
4018 	 */
4019 	if (!hdev->default_reset_request)
4020 		return;
4021 
4022 	dev_info(&hdev->pdev->dev,
4023 		 "triggering reset in reset timer\n");
4024 	hclge_reset_event(hdev->pdev, NULL);
4025 }
4026 
4027 static void hclge_reset_subtask(struct hclge_dev *hdev)
4028 {
4029 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4030 
4031 	/* check if there is any ongoing reset in the hardware. This status can
4032 	 * be checked from reset_pending. If there is then, we need to wait for
4033 	 * hardware to complete reset.
4034 	 *    a. If we are able to figure out in reasonable time that hardware
4035 	 *       has fully resetted then, we can proceed with driver, client
4036 	 *       reset.
4037 	 *    b. else, we can come back later to check this status so re-sched
4038 	 *       now.
4039 	 */
4040 	hdev->last_reset_time = jiffies;
4041 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4042 	if (hdev->reset_type != HNAE3_NONE_RESET)
4043 		hclge_reset(hdev);
4044 
4045 	/* check if we got any *new* reset requests to be honored */
4046 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4047 	if (hdev->reset_type != HNAE3_NONE_RESET)
4048 		hclge_do_reset(hdev);
4049 
4050 	hdev->reset_type = HNAE3_NONE_RESET;
4051 }
4052 
4053 static void hclge_reset_service_task(struct hclge_dev *hdev)
4054 {
4055 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4056 		return;
4057 
4058 	down(&hdev->reset_sem);
4059 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4060 
4061 	hclge_reset_subtask(hdev);
4062 
4063 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4064 	up(&hdev->reset_sem);
4065 }
4066 
4067 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4068 {
4069 	int i;
4070 
4071 	/* start from vport 1 for PF is always alive */
4072 	for (i = 1; i < hdev->num_alloc_vport; i++) {
4073 		struct hclge_vport *vport = &hdev->vport[i];
4074 
4075 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4076 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4077 
4078 		/* If vf is not alive, set to default value */
4079 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4080 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4081 	}
4082 }
4083 
4084 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4085 {
4086 	unsigned long delta = round_jiffies_relative(HZ);
4087 
4088 	if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4089 		return;
4090 
4091 	/* Always handle the link updating to make sure link state is
4092 	 * updated when it is triggered by mbx.
4093 	 */
4094 	hclge_update_link_status(hdev);
4095 	hclge_sync_mac_table(hdev);
4096 	hclge_sync_promisc_mode(hdev);
4097 
4098 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4099 		delta = jiffies - hdev->last_serv_processed;
4100 
4101 		if (delta < round_jiffies_relative(HZ)) {
4102 			delta = round_jiffies_relative(HZ) - delta;
4103 			goto out;
4104 		}
4105 	}
4106 
4107 	hdev->serv_processed_cnt++;
4108 	hclge_update_vport_alive(hdev);
4109 
4110 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4111 		hdev->last_serv_processed = jiffies;
4112 		goto out;
4113 	}
4114 
4115 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4116 		hclge_update_stats_for_all(hdev);
4117 
4118 	hclge_update_port_info(hdev);
4119 	hclge_sync_vlan_filter(hdev);
4120 
4121 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4122 		hclge_rfs_filter_expire(hdev);
4123 
4124 	hdev->last_serv_processed = jiffies;
4125 
4126 out:
4127 	hclge_task_schedule(hdev, delta);
4128 }
4129 
4130 static void hclge_service_task(struct work_struct *work)
4131 {
4132 	struct hclge_dev *hdev =
4133 		container_of(work, struct hclge_dev, service_task.work);
4134 
4135 	hclge_reset_service_task(hdev);
4136 	hclge_mailbox_service_task(hdev);
4137 	hclge_periodic_service_task(hdev);
4138 
4139 	/* Handle reset and mbx again in case periodical task delays the
4140 	 * handling by calling hclge_task_schedule() in
4141 	 * hclge_periodic_service_task().
4142 	 */
4143 	hclge_reset_service_task(hdev);
4144 	hclge_mailbox_service_task(hdev);
4145 }
4146 
4147 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4148 {
4149 	/* VF handle has no client */
4150 	if (!handle->client)
4151 		return container_of(handle, struct hclge_vport, nic);
4152 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4153 		return container_of(handle, struct hclge_vport, roce);
4154 	else
4155 		return container_of(handle, struct hclge_vport, nic);
4156 }
4157 
4158 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4159 				  struct hnae3_vector_info *vector_info)
4160 {
4161 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2	64
4162 
4163 	vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4164 
4165 	/* need an extend offset to config vector >= 64 */
4166 	if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4167 		vector_info->io_addr = hdev->hw.io_base +
4168 				HCLGE_VECTOR_REG_BASE +
4169 				(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4170 	else
4171 		vector_info->io_addr = hdev->hw.io_base +
4172 				HCLGE_VECTOR_EXT_REG_BASE +
4173 				(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4174 				HCLGE_VECTOR_REG_OFFSET_H +
4175 				(idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4176 				HCLGE_VECTOR_REG_OFFSET;
4177 
4178 	hdev->vector_status[idx] = hdev->vport[0].vport_id;
4179 	hdev->vector_irq[idx] = vector_info->vector;
4180 }
4181 
4182 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4183 			    struct hnae3_vector_info *vector_info)
4184 {
4185 	struct hclge_vport *vport = hclge_get_vport(handle);
4186 	struct hnae3_vector_info *vector = vector_info;
4187 	struct hclge_dev *hdev = vport->back;
4188 	int alloc = 0;
4189 	u16 i = 0;
4190 	u16 j;
4191 
4192 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4193 	vector_num = min(hdev->num_msi_left, vector_num);
4194 
4195 	for (j = 0; j < vector_num; j++) {
4196 		while (++i < hdev->num_nic_msi) {
4197 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4198 				hclge_get_vector_info(hdev, i, vector);
4199 				vector++;
4200 				alloc++;
4201 
4202 				break;
4203 			}
4204 		}
4205 	}
4206 	hdev->num_msi_left -= alloc;
4207 	hdev->num_msi_used += alloc;
4208 
4209 	return alloc;
4210 }
4211 
4212 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4213 {
4214 	int i;
4215 
4216 	for (i = 0; i < hdev->num_msi; i++)
4217 		if (vector == hdev->vector_irq[i])
4218 			return i;
4219 
4220 	return -EINVAL;
4221 }
4222 
4223 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4224 {
4225 	struct hclge_vport *vport = hclge_get_vport(handle);
4226 	struct hclge_dev *hdev = vport->back;
4227 	int vector_id;
4228 
4229 	vector_id = hclge_get_vector_index(hdev, vector);
4230 	if (vector_id < 0) {
4231 		dev_err(&hdev->pdev->dev,
4232 			"Get vector index fail. vector = %d\n", vector);
4233 		return vector_id;
4234 	}
4235 
4236 	hclge_free_vector(hdev, vector_id);
4237 
4238 	return 0;
4239 }
4240 
4241 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4242 {
4243 	return HCLGE_RSS_KEY_SIZE;
4244 }
4245 
4246 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4247 				  const u8 hfunc, const u8 *key)
4248 {
4249 	struct hclge_rss_config_cmd *req;
4250 	unsigned int key_offset = 0;
4251 	struct hclge_desc desc;
4252 	int key_counts;
4253 	int key_size;
4254 	int ret;
4255 
4256 	key_counts = HCLGE_RSS_KEY_SIZE;
4257 	req = (struct hclge_rss_config_cmd *)desc.data;
4258 
4259 	while (key_counts) {
4260 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4261 					   false);
4262 
4263 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4264 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4265 
4266 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4267 		memcpy(req->hash_key,
4268 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4269 
4270 		key_counts -= key_size;
4271 		key_offset++;
4272 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4273 		if (ret) {
4274 			dev_err(&hdev->pdev->dev,
4275 				"Configure RSS config fail, status = %d\n",
4276 				ret);
4277 			return ret;
4278 		}
4279 	}
4280 	return 0;
4281 }
4282 
4283 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4284 {
4285 	struct hclge_rss_indirection_table_cmd *req;
4286 	struct hclge_desc desc;
4287 	int rss_cfg_tbl_num;
4288 	u8 rss_msb_oft;
4289 	u8 rss_msb_val;
4290 	int ret;
4291 	u16 qid;
4292 	int i;
4293 	u32 j;
4294 
4295 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4296 	rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4297 			  HCLGE_RSS_CFG_TBL_SIZE;
4298 
4299 	for (i = 0; i < rss_cfg_tbl_num; i++) {
4300 		hclge_cmd_setup_basic_desc
4301 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4302 
4303 		req->start_table_index =
4304 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4305 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4306 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4307 			qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4308 			req->rss_qid_l[j] = qid & 0xff;
4309 			rss_msb_oft =
4310 				j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4311 			rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4312 				(j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4313 			req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4314 		}
4315 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4316 		if (ret) {
4317 			dev_err(&hdev->pdev->dev,
4318 				"Configure rss indir table fail,status = %d\n",
4319 				ret);
4320 			return ret;
4321 		}
4322 	}
4323 	return 0;
4324 }
4325 
4326 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4327 				 u16 *tc_size, u16 *tc_offset)
4328 {
4329 	struct hclge_rss_tc_mode_cmd *req;
4330 	struct hclge_desc desc;
4331 	int ret;
4332 	int i;
4333 
4334 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4335 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4336 
4337 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4338 		u16 mode = 0;
4339 
4340 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4341 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4342 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4343 		hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4344 			      tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4345 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4346 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4347 
4348 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4349 	}
4350 
4351 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4352 	if (ret)
4353 		dev_err(&hdev->pdev->dev,
4354 			"Configure rss tc mode fail, status = %d\n", ret);
4355 
4356 	return ret;
4357 }
4358 
4359 static void hclge_get_rss_type(struct hclge_vport *vport)
4360 {
4361 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4362 	    vport->rss_tuple_sets.ipv4_udp_en ||
4363 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4364 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4365 	    vport->rss_tuple_sets.ipv6_udp_en ||
4366 	    vport->rss_tuple_sets.ipv6_sctp_en)
4367 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4368 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4369 		 vport->rss_tuple_sets.ipv6_fragment_en)
4370 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4371 	else
4372 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4373 }
4374 
4375 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4376 {
4377 	struct hclge_rss_input_tuple_cmd *req;
4378 	struct hclge_desc desc;
4379 	int ret;
4380 
4381 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4382 
4383 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4384 
4385 	/* Get the tuple cfg from pf */
4386 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4387 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4388 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4389 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4390 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4391 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4392 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4393 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4394 	hclge_get_rss_type(&hdev->vport[0]);
4395 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4396 	if (ret)
4397 		dev_err(&hdev->pdev->dev,
4398 			"Configure rss input fail, status = %d\n", ret);
4399 	return ret;
4400 }
4401 
4402 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4403 			 u8 *key, u8 *hfunc)
4404 {
4405 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4406 	struct hclge_vport *vport = hclge_get_vport(handle);
4407 	int i;
4408 
4409 	/* Get hash algorithm */
4410 	if (hfunc) {
4411 		switch (vport->rss_algo) {
4412 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4413 			*hfunc = ETH_RSS_HASH_TOP;
4414 			break;
4415 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4416 			*hfunc = ETH_RSS_HASH_XOR;
4417 			break;
4418 		default:
4419 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4420 			break;
4421 		}
4422 	}
4423 
4424 	/* Get the RSS Key required by the user */
4425 	if (key)
4426 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4427 
4428 	/* Get indirect table */
4429 	if (indir)
4430 		for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4431 			indir[i] =  vport->rss_indirection_tbl[i];
4432 
4433 	return 0;
4434 }
4435 
4436 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4437 			 const  u8 *key, const  u8 hfunc)
4438 {
4439 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4440 	struct hclge_vport *vport = hclge_get_vport(handle);
4441 	struct hclge_dev *hdev = vport->back;
4442 	u8 hash_algo;
4443 	int ret, i;
4444 
4445 	/* Set the RSS Hash Key if specififed by the user */
4446 	if (key) {
4447 		switch (hfunc) {
4448 		case ETH_RSS_HASH_TOP:
4449 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4450 			break;
4451 		case ETH_RSS_HASH_XOR:
4452 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4453 			break;
4454 		case ETH_RSS_HASH_NO_CHANGE:
4455 			hash_algo = vport->rss_algo;
4456 			break;
4457 		default:
4458 			return -EINVAL;
4459 		}
4460 
4461 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4462 		if (ret)
4463 			return ret;
4464 
4465 		/* Update the shadow RSS key with user specified qids */
4466 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4467 		vport->rss_algo = hash_algo;
4468 	}
4469 
4470 	/* Update the shadow RSS table with user specified qids */
4471 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4472 		vport->rss_indirection_tbl[i] = indir[i];
4473 
4474 	/* Update the hardware */
4475 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4476 }
4477 
4478 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4479 {
4480 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4481 
4482 	if (nfc->data & RXH_L4_B_2_3)
4483 		hash_sets |= HCLGE_D_PORT_BIT;
4484 	else
4485 		hash_sets &= ~HCLGE_D_PORT_BIT;
4486 
4487 	if (nfc->data & RXH_IP_SRC)
4488 		hash_sets |= HCLGE_S_IP_BIT;
4489 	else
4490 		hash_sets &= ~HCLGE_S_IP_BIT;
4491 
4492 	if (nfc->data & RXH_IP_DST)
4493 		hash_sets |= HCLGE_D_IP_BIT;
4494 	else
4495 		hash_sets &= ~HCLGE_D_IP_BIT;
4496 
4497 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4498 		hash_sets |= HCLGE_V_TAG_BIT;
4499 
4500 	return hash_sets;
4501 }
4502 
4503 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4504 			       struct ethtool_rxnfc *nfc)
4505 {
4506 	struct hclge_vport *vport = hclge_get_vport(handle);
4507 	struct hclge_dev *hdev = vport->back;
4508 	struct hclge_rss_input_tuple_cmd *req;
4509 	struct hclge_desc desc;
4510 	u8 tuple_sets;
4511 	int ret;
4512 
4513 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4514 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4515 		return -EINVAL;
4516 
4517 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4518 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4519 
4520 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4521 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4522 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4523 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4524 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4525 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4526 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4527 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4528 
4529 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4530 	switch (nfc->flow_type) {
4531 	case TCP_V4_FLOW:
4532 		req->ipv4_tcp_en = tuple_sets;
4533 		break;
4534 	case TCP_V6_FLOW:
4535 		req->ipv6_tcp_en = tuple_sets;
4536 		break;
4537 	case UDP_V4_FLOW:
4538 		req->ipv4_udp_en = tuple_sets;
4539 		break;
4540 	case UDP_V6_FLOW:
4541 		req->ipv6_udp_en = tuple_sets;
4542 		break;
4543 	case SCTP_V4_FLOW:
4544 		req->ipv4_sctp_en = tuple_sets;
4545 		break;
4546 	case SCTP_V6_FLOW:
4547 		if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4548 		    (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4549 			return -EINVAL;
4550 
4551 		req->ipv6_sctp_en = tuple_sets;
4552 		break;
4553 	case IPV4_FLOW:
4554 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4555 		break;
4556 	case IPV6_FLOW:
4557 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4558 		break;
4559 	default:
4560 		return -EINVAL;
4561 	}
4562 
4563 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4564 	if (ret) {
4565 		dev_err(&hdev->pdev->dev,
4566 			"Set rss tuple fail, status = %d\n", ret);
4567 		return ret;
4568 	}
4569 
4570 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4571 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4572 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4573 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4574 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4575 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4576 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4577 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4578 	hclge_get_rss_type(vport);
4579 	return 0;
4580 }
4581 
4582 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4583 			       struct ethtool_rxnfc *nfc)
4584 {
4585 	struct hclge_vport *vport = hclge_get_vport(handle);
4586 	u8 tuple_sets;
4587 
4588 	nfc->data = 0;
4589 
4590 	switch (nfc->flow_type) {
4591 	case TCP_V4_FLOW:
4592 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4593 		break;
4594 	case UDP_V4_FLOW:
4595 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4596 		break;
4597 	case TCP_V6_FLOW:
4598 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4599 		break;
4600 	case UDP_V6_FLOW:
4601 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4602 		break;
4603 	case SCTP_V4_FLOW:
4604 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4605 		break;
4606 	case SCTP_V6_FLOW:
4607 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4608 		break;
4609 	case IPV4_FLOW:
4610 	case IPV6_FLOW:
4611 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4612 		break;
4613 	default:
4614 		return -EINVAL;
4615 	}
4616 
4617 	if (!tuple_sets)
4618 		return 0;
4619 
4620 	if (tuple_sets & HCLGE_D_PORT_BIT)
4621 		nfc->data |= RXH_L4_B_2_3;
4622 	if (tuple_sets & HCLGE_S_PORT_BIT)
4623 		nfc->data |= RXH_L4_B_0_1;
4624 	if (tuple_sets & HCLGE_D_IP_BIT)
4625 		nfc->data |= RXH_IP_DST;
4626 	if (tuple_sets & HCLGE_S_IP_BIT)
4627 		nfc->data |= RXH_IP_SRC;
4628 
4629 	return 0;
4630 }
4631 
4632 static int hclge_get_tc_size(struct hnae3_handle *handle)
4633 {
4634 	struct hclge_vport *vport = hclge_get_vport(handle);
4635 	struct hclge_dev *hdev = vport->back;
4636 
4637 	return hdev->pf_rss_size_max;
4638 }
4639 
4640 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4641 {
4642 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4643 	struct hclge_vport *vport = hdev->vport;
4644 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4645 	u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4646 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4647 	struct hnae3_tc_info *tc_info;
4648 	u16 roundup_size;
4649 	u16 rss_size;
4650 	int i;
4651 
4652 	tc_info = &vport->nic.kinfo.tc_info;
4653 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4654 		rss_size = tc_info->tqp_count[i];
4655 		tc_valid[i] = 0;
4656 
4657 		if (!(hdev->hw_tc_map & BIT(i)))
4658 			continue;
4659 
4660 		/* tc_size set to hardware is the log2 of roundup power of two
4661 		 * of rss_size, the acutal queue size is limited by indirection
4662 		 * table.
4663 		 */
4664 		if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4665 		    rss_size == 0) {
4666 			dev_err(&hdev->pdev->dev,
4667 				"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4668 				rss_size);
4669 			return -EINVAL;
4670 		}
4671 
4672 		roundup_size = roundup_pow_of_two(rss_size);
4673 		roundup_size = ilog2(roundup_size);
4674 
4675 		tc_valid[i] = 1;
4676 		tc_size[i] = roundup_size;
4677 		tc_offset[i] = tc_info->tqp_offset[i];
4678 	}
4679 
4680 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4681 }
4682 
4683 int hclge_rss_init_hw(struct hclge_dev *hdev)
4684 {
4685 	struct hclge_vport *vport = hdev->vport;
4686 	u16 *rss_indir = vport[0].rss_indirection_tbl;
4687 	u8 *key = vport[0].rss_hash_key;
4688 	u8 hfunc = vport[0].rss_algo;
4689 	int ret;
4690 
4691 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4692 	if (ret)
4693 		return ret;
4694 
4695 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4696 	if (ret)
4697 		return ret;
4698 
4699 	ret = hclge_set_rss_input_tuple(hdev);
4700 	if (ret)
4701 		return ret;
4702 
4703 	return hclge_init_rss_tc_mode(hdev);
4704 }
4705 
4706 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4707 {
4708 	struct hclge_vport *vport = hdev->vport;
4709 	int i, j;
4710 
4711 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4712 		for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
4713 			vport[j].rss_indirection_tbl[i] =
4714 				i % vport[j].alloc_rss_size;
4715 	}
4716 }
4717 
4718 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
4719 {
4720 	u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
4721 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4722 	struct hclge_vport *vport = hdev->vport;
4723 
4724 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
4725 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4726 
4727 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4728 		u16 *rss_ind_tbl;
4729 
4730 		vport[i].rss_tuple_sets.ipv4_tcp_en =
4731 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4732 		vport[i].rss_tuple_sets.ipv4_udp_en =
4733 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4734 		vport[i].rss_tuple_sets.ipv4_sctp_en =
4735 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4736 		vport[i].rss_tuple_sets.ipv4_fragment_en =
4737 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4738 		vport[i].rss_tuple_sets.ipv6_tcp_en =
4739 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4740 		vport[i].rss_tuple_sets.ipv6_udp_en =
4741 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4742 		vport[i].rss_tuple_sets.ipv6_sctp_en =
4743 			hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
4744 			HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
4745 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4746 		vport[i].rss_tuple_sets.ipv6_fragment_en =
4747 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4748 
4749 		vport[i].rss_algo = rss_algo;
4750 
4751 		rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
4752 					   sizeof(*rss_ind_tbl), GFP_KERNEL);
4753 		if (!rss_ind_tbl)
4754 			return -ENOMEM;
4755 
4756 		vport[i].rss_indirection_tbl = rss_ind_tbl;
4757 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
4758 		       HCLGE_RSS_KEY_SIZE);
4759 	}
4760 
4761 	hclge_rss_indir_init_cfg(hdev);
4762 
4763 	return 0;
4764 }
4765 
4766 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4767 				int vector_id, bool en,
4768 				struct hnae3_ring_chain_node *ring_chain)
4769 {
4770 	struct hclge_dev *hdev = vport->back;
4771 	struct hnae3_ring_chain_node *node;
4772 	struct hclge_desc desc;
4773 	struct hclge_ctrl_vector_chain_cmd *req =
4774 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
4775 	enum hclge_cmd_status status;
4776 	enum hclge_opcode_type op;
4777 	u16 tqp_type_and_id;
4778 	int i;
4779 
4780 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4781 	hclge_cmd_setup_basic_desc(&desc, op, false);
4782 	req->int_vector_id_l = hnae3_get_field(vector_id,
4783 					       HCLGE_VECTOR_ID_L_M,
4784 					       HCLGE_VECTOR_ID_L_S);
4785 	req->int_vector_id_h = hnae3_get_field(vector_id,
4786 					       HCLGE_VECTOR_ID_H_M,
4787 					       HCLGE_VECTOR_ID_H_S);
4788 
4789 	i = 0;
4790 	for (node = ring_chain; node; node = node->next) {
4791 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4792 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4793 				HCLGE_INT_TYPE_S,
4794 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4795 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4796 				HCLGE_TQP_ID_S, node->tqp_index);
4797 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4798 				HCLGE_INT_GL_IDX_S,
4799 				hnae3_get_field(node->int_gl_idx,
4800 						HNAE3_RING_GL_IDX_M,
4801 						HNAE3_RING_GL_IDX_S));
4802 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4803 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4804 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4805 			req->vfid = vport->vport_id;
4806 
4807 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
4808 			if (status) {
4809 				dev_err(&hdev->pdev->dev,
4810 					"Map TQP fail, status is %d.\n",
4811 					status);
4812 				return -EIO;
4813 			}
4814 			i = 0;
4815 
4816 			hclge_cmd_setup_basic_desc(&desc,
4817 						   op,
4818 						   false);
4819 			req->int_vector_id_l =
4820 				hnae3_get_field(vector_id,
4821 						HCLGE_VECTOR_ID_L_M,
4822 						HCLGE_VECTOR_ID_L_S);
4823 			req->int_vector_id_h =
4824 				hnae3_get_field(vector_id,
4825 						HCLGE_VECTOR_ID_H_M,
4826 						HCLGE_VECTOR_ID_H_S);
4827 		}
4828 	}
4829 
4830 	if (i > 0) {
4831 		req->int_cause_num = i;
4832 		req->vfid = vport->vport_id;
4833 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
4834 		if (status) {
4835 			dev_err(&hdev->pdev->dev,
4836 				"Map TQP fail, status is %d.\n", status);
4837 			return -EIO;
4838 		}
4839 	}
4840 
4841 	return 0;
4842 }
4843 
4844 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4845 				    struct hnae3_ring_chain_node *ring_chain)
4846 {
4847 	struct hclge_vport *vport = hclge_get_vport(handle);
4848 	struct hclge_dev *hdev = vport->back;
4849 	int vector_id;
4850 
4851 	vector_id = hclge_get_vector_index(hdev, vector);
4852 	if (vector_id < 0) {
4853 		dev_err(&hdev->pdev->dev,
4854 			"failed to get vector index. vector=%d\n", vector);
4855 		return vector_id;
4856 	}
4857 
4858 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4859 }
4860 
4861 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4862 				       struct hnae3_ring_chain_node *ring_chain)
4863 {
4864 	struct hclge_vport *vport = hclge_get_vport(handle);
4865 	struct hclge_dev *hdev = vport->back;
4866 	int vector_id, ret;
4867 
4868 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4869 		return 0;
4870 
4871 	vector_id = hclge_get_vector_index(hdev, vector);
4872 	if (vector_id < 0) {
4873 		dev_err(&handle->pdev->dev,
4874 			"Get vector index fail. ret =%d\n", vector_id);
4875 		return vector_id;
4876 	}
4877 
4878 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4879 	if (ret)
4880 		dev_err(&handle->pdev->dev,
4881 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4882 			vector_id, ret);
4883 
4884 	return ret;
4885 }
4886 
4887 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
4888 				      bool en_uc, bool en_mc, bool en_bc)
4889 {
4890 	struct hclge_vport *vport = &hdev->vport[vf_id];
4891 	struct hnae3_handle *handle = &vport->nic;
4892 	struct hclge_promisc_cfg_cmd *req;
4893 	struct hclge_desc desc;
4894 	bool uc_tx_en = en_uc;
4895 	u8 promisc_cfg = 0;
4896 	int ret;
4897 
4898 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4899 
4900 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
4901 	req->vf_id = vf_id;
4902 
4903 	if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
4904 		uc_tx_en = false;
4905 
4906 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
4907 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
4908 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
4909 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
4910 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
4911 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
4912 	req->extend_promisc = promisc_cfg;
4913 
4914 	/* to be compatible with DEVICE_VERSION_V1/2 */
4915 	promisc_cfg = 0;
4916 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
4917 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
4918 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
4919 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
4920 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
4921 	req->promisc = promisc_cfg;
4922 
4923 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4924 	if (ret)
4925 		dev_err(&hdev->pdev->dev,
4926 			"failed to set vport %u promisc mode, ret = %d.\n",
4927 			vf_id, ret);
4928 
4929 	return ret;
4930 }
4931 
4932 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4933 				 bool en_mc_pmc, bool en_bc_pmc)
4934 {
4935 	return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
4936 					  en_uc_pmc, en_mc_pmc, en_bc_pmc);
4937 }
4938 
4939 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4940 				  bool en_mc_pmc)
4941 {
4942 	struct hclge_vport *vport = hclge_get_vport(handle);
4943 	struct hclge_dev *hdev = vport->back;
4944 	bool en_bc_pmc = true;
4945 
4946 	/* For device whose version below V2, if broadcast promisc enabled,
4947 	 * vlan filter is always bypassed. So broadcast promisc should be
4948 	 * disabled until user enable promisc mode
4949 	 */
4950 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4951 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4952 
4953 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4954 					    en_bc_pmc);
4955 }
4956 
4957 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4958 {
4959 	struct hclge_vport *vport = hclge_get_vport(handle);
4960 	struct hclge_dev *hdev = vport->back;
4961 
4962 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4963 }
4964 
4965 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4966 {
4967 	struct hclge_get_fd_mode_cmd *req;
4968 	struct hclge_desc desc;
4969 	int ret;
4970 
4971 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4972 
4973 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
4974 
4975 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4976 	if (ret) {
4977 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4978 		return ret;
4979 	}
4980 
4981 	*fd_mode = req->mode;
4982 
4983 	return ret;
4984 }
4985 
4986 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4987 				   u32 *stage1_entry_num,
4988 				   u32 *stage2_entry_num,
4989 				   u16 *stage1_counter_num,
4990 				   u16 *stage2_counter_num)
4991 {
4992 	struct hclge_get_fd_allocation_cmd *req;
4993 	struct hclge_desc desc;
4994 	int ret;
4995 
4996 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4997 
4998 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4999 
5000 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5001 	if (ret) {
5002 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5003 			ret);
5004 		return ret;
5005 	}
5006 
5007 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5008 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5009 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5010 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5011 
5012 	return ret;
5013 }
5014 
5015 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5016 				   enum HCLGE_FD_STAGE stage_num)
5017 {
5018 	struct hclge_set_fd_key_config_cmd *req;
5019 	struct hclge_fd_key_cfg *stage;
5020 	struct hclge_desc desc;
5021 	int ret;
5022 
5023 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5024 
5025 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5026 	stage = &hdev->fd_cfg.key_cfg[stage_num];
5027 	req->stage = stage_num;
5028 	req->key_select = stage->key_sel;
5029 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5030 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5031 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5032 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5033 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5034 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5035 
5036 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5037 	if (ret)
5038 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5039 
5040 	return ret;
5041 }
5042 
5043 static int hclge_init_fd_config(struct hclge_dev *hdev)
5044 {
5045 #define LOW_2_WORDS		0x03
5046 	struct hclge_fd_key_cfg *key_cfg;
5047 	int ret;
5048 
5049 	if (!hnae3_dev_fd_supported(hdev))
5050 		return 0;
5051 
5052 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5053 	if (ret)
5054 		return ret;
5055 
5056 	switch (hdev->fd_cfg.fd_mode) {
5057 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5058 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5059 		break;
5060 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5061 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5062 		break;
5063 	default:
5064 		dev_err(&hdev->pdev->dev,
5065 			"Unsupported flow director mode %u\n",
5066 			hdev->fd_cfg.fd_mode);
5067 		return -EOPNOTSUPP;
5068 	}
5069 
5070 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5071 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5072 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5073 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5074 	key_cfg->outer_sipv6_word_en = 0;
5075 	key_cfg->outer_dipv6_word_en = 0;
5076 
5077 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5078 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5079 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5080 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5081 
5082 	/* If use max 400bit key, we can support tuples for ether type */
5083 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
5084 		key_cfg->tuple_active |=
5085 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5086 
5087 	/* roce_type is used to filter roce frames
5088 	 * dst_vport is used to specify the rule
5089 	 */
5090 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5091 
5092 	ret = hclge_get_fd_allocation(hdev,
5093 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5094 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5095 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5096 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5097 	if (ret)
5098 		return ret;
5099 
5100 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5101 }
5102 
5103 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5104 				int loc, u8 *key, bool is_add)
5105 {
5106 	struct hclge_fd_tcam_config_1_cmd *req1;
5107 	struct hclge_fd_tcam_config_2_cmd *req2;
5108 	struct hclge_fd_tcam_config_3_cmd *req3;
5109 	struct hclge_desc desc[3];
5110 	int ret;
5111 
5112 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5113 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5114 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5115 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5116 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5117 
5118 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5119 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5120 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5121 
5122 	req1->stage = stage;
5123 	req1->xy_sel = sel_x ? 1 : 0;
5124 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5125 	req1->index = cpu_to_le32(loc);
5126 	req1->entry_vld = sel_x ? is_add : 0;
5127 
5128 	if (key) {
5129 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5130 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5131 		       sizeof(req2->tcam_data));
5132 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5133 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5134 	}
5135 
5136 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
5137 	if (ret)
5138 		dev_err(&hdev->pdev->dev,
5139 			"config tcam key fail, ret=%d\n",
5140 			ret);
5141 
5142 	return ret;
5143 }
5144 
5145 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5146 			      struct hclge_fd_ad_data *action)
5147 {
5148 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5149 	struct hclge_fd_ad_config_cmd *req;
5150 	struct hclge_desc desc;
5151 	u64 ad_data = 0;
5152 	int ret;
5153 
5154 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5155 
5156 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
5157 	req->index = cpu_to_le32(loc);
5158 	req->stage = stage;
5159 
5160 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5161 		      action->write_rule_id_to_bd);
5162 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5163 			action->rule_id);
5164 	if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5165 		hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5166 			      action->override_tc);
5167 		hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5168 				HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5169 	}
5170 	ad_data <<= 32;
5171 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5172 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5173 		      action->forward_to_direct_queue);
5174 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5175 			action->queue_id);
5176 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5177 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5178 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5179 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5180 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5181 			action->counter_id);
5182 
5183 	req->ad_data = cpu_to_le64(ad_data);
5184 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5185 	if (ret)
5186 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5187 
5188 	return ret;
5189 }
5190 
5191 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5192 				   struct hclge_fd_rule *rule)
5193 {
5194 	u16 tmp_x_s, tmp_y_s;
5195 	u32 tmp_x_l, tmp_y_l;
5196 	int i;
5197 
5198 	if (rule->unused_tuple & tuple_bit)
5199 		return true;
5200 
5201 	switch (tuple_bit) {
5202 	case BIT(INNER_DST_MAC):
5203 		for (i = 0; i < ETH_ALEN; i++) {
5204 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5205 			       rule->tuples_mask.dst_mac[i]);
5206 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5207 			       rule->tuples_mask.dst_mac[i]);
5208 		}
5209 
5210 		return true;
5211 	case BIT(INNER_SRC_MAC):
5212 		for (i = 0; i < ETH_ALEN; i++) {
5213 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5214 			       rule->tuples.src_mac[i]);
5215 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5216 			       rule->tuples.src_mac[i]);
5217 		}
5218 
5219 		return true;
5220 	case BIT(INNER_VLAN_TAG_FST):
5221 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5222 		       rule->tuples_mask.vlan_tag1);
5223 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5224 		       rule->tuples_mask.vlan_tag1);
5225 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5226 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5227 
5228 		return true;
5229 	case BIT(INNER_ETH_TYPE):
5230 		calc_x(tmp_x_s, rule->tuples.ether_proto,
5231 		       rule->tuples_mask.ether_proto);
5232 		calc_y(tmp_y_s, rule->tuples.ether_proto,
5233 		       rule->tuples_mask.ether_proto);
5234 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5235 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5236 
5237 		return true;
5238 	case BIT(INNER_IP_TOS):
5239 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5240 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5241 
5242 		return true;
5243 	case BIT(INNER_IP_PROTO):
5244 		calc_x(*key_x, rule->tuples.ip_proto,
5245 		       rule->tuples_mask.ip_proto);
5246 		calc_y(*key_y, rule->tuples.ip_proto,
5247 		       rule->tuples_mask.ip_proto);
5248 
5249 		return true;
5250 	case BIT(INNER_SRC_IP):
5251 		calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5252 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5253 		calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5254 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5255 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5256 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5257 
5258 		return true;
5259 	case BIT(INNER_DST_IP):
5260 		calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5261 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5262 		calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5263 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5264 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5265 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5266 
5267 		return true;
5268 	case BIT(INNER_SRC_PORT):
5269 		calc_x(tmp_x_s, rule->tuples.src_port,
5270 		       rule->tuples_mask.src_port);
5271 		calc_y(tmp_y_s, rule->tuples.src_port,
5272 		       rule->tuples_mask.src_port);
5273 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5274 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5275 
5276 		return true;
5277 	case BIT(INNER_DST_PORT):
5278 		calc_x(tmp_x_s, rule->tuples.dst_port,
5279 		       rule->tuples_mask.dst_port);
5280 		calc_y(tmp_y_s, rule->tuples.dst_port,
5281 		       rule->tuples_mask.dst_port);
5282 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5283 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5284 
5285 		return true;
5286 	default:
5287 		return false;
5288 	}
5289 }
5290 
5291 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5292 				 u8 vf_id, u8 network_port_id)
5293 {
5294 	u32 port_number = 0;
5295 
5296 	if (port_type == HOST_PORT) {
5297 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5298 				pf_id);
5299 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5300 				vf_id);
5301 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5302 	} else {
5303 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5304 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5305 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5306 	}
5307 
5308 	return port_number;
5309 }
5310 
5311 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5312 				       __le32 *key_x, __le32 *key_y,
5313 				       struct hclge_fd_rule *rule)
5314 {
5315 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5316 	u8 cur_pos = 0, tuple_size, shift_bits;
5317 	unsigned int i;
5318 
5319 	for (i = 0; i < MAX_META_DATA; i++) {
5320 		tuple_size = meta_data_key_info[i].key_length;
5321 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5322 
5323 		switch (tuple_bit) {
5324 		case BIT(ROCE_TYPE):
5325 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5326 			cur_pos += tuple_size;
5327 			break;
5328 		case BIT(DST_VPORT):
5329 			port_number = hclge_get_port_number(HOST_PORT, 0,
5330 							    rule->vf_id, 0);
5331 			hnae3_set_field(meta_data,
5332 					GENMASK(cur_pos + tuple_size, cur_pos),
5333 					cur_pos, port_number);
5334 			cur_pos += tuple_size;
5335 			break;
5336 		default:
5337 			break;
5338 		}
5339 	}
5340 
5341 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5342 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5343 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5344 
5345 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5346 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5347 }
5348 
5349 /* A complete key is combined with meta data key and tuple key.
5350  * Meta data key is stored at the MSB region, and tuple key is stored at
5351  * the LSB region, unused bits will be filled 0.
5352  */
5353 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5354 			    struct hclge_fd_rule *rule)
5355 {
5356 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5357 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5358 	u8 *cur_key_x, *cur_key_y;
5359 	u8 meta_data_region;
5360 	u8 tuple_size;
5361 	int ret;
5362 	u32 i;
5363 
5364 	memset(key_x, 0, sizeof(key_x));
5365 	memset(key_y, 0, sizeof(key_y));
5366 	cur_key_x = key_x;
5367 	cur_key_y = key_y;
5368 
5369 	for (i = 0 ; i < MAX_TUPLE; i++) {
5370 		bool tuple_valid;
5371 		u32 check_tuple;
5372 
5373 		tuple_size = tuple_key_info[i].key_length / 8;
5374 		check_tuple = key_cfg->tuple_active & BIT(i);
5375 
5376 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5377 						     cur_key_y, rule);
5378 		if (tuple_valid) {
5379 			cur_key_x += tuple_size;
5380 			cur_key_y += tuple_size;
5381 		}
5382 	}
5383 
5384 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5385 			MAX_META_DATA_LENGTH / 8;
5386 
5387 	hclge_fd_convert_meta_data(key_cfg,
5388 				   (__le32 *)(key_x + meta_data_region),
5389 				   (__le32 *)(key_y + meta_data_region),
5390 				   rule);
5391 
5392 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5393 				   true);
5394 	if (ret) {
5395 		dev_err(&hdev->pdev->dev,
5396 			"fd key_y config fail, loc=%u, ret=%d\n",
5397 			rule->queue_id, ret);
5398 		return ret;
5399 	}
5400 
5401 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5402 				   true);
5403 	if (ret)
5404 		dev_err(&hdev->pdev->dev,
5405 			"fd key_x config fail, loc=%u, ret=%d\n",
5406 			rule->queue_id, ret);
5407 	return ret;
5408 }
5409 
5410 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5411 			       struct hclge_fd_rule *rule)
5412 {
5413 	struct hclge_vport *vport = hdev->vport;
5414 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5415 	struct hclge_fd_ad_data ad_data;
5416 
5417 	memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5418 	ad_data.ad_id = rule->location;
5419 
5420 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5421 		ad_data.drop_packet = true;
5422 	} else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5423 		ad_data.override_tc = true;
5424 		ad_data.queue_id =
5425 			kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5426 		ad_data.tc_size =
5427 			ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5428 	} else {
5429 		ad_data.forward_to_direct_queue = true;
5430 		ad_data.queue_id = rule->queue_id;
5431 	}
5432 
5433 	ad_data.use_counter = false;
5434 	ad_data.counter_id = 0;
5435 
5436 	ad_data.use_next_stage = false;
5437 	ad_data.next_input_key = 0;
5438 
5439 	ad_data.write_rule_id_to_bd = true;
5440 	ad_data.rule_id = rule->location;
5441 
5442 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5443 }
5444 
5445 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5446 				       u32 *unused_tuple)
5447 {
5448 	if (!spec || !unused_tuple)
5449 		return -EINVAL;
5450 
5451 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5452 
5453 	if (!spec->ip4src)
5454 		*unused_tuple |= BIT(INNER_SRC_IP);
5455 
5456 	if (!spec->ip4dst)
5457 		*unused_tuple |= BIT(INNER_DST_IP);
5458 
5459 	if (!spec->psrc)
5460 		*unused_tuple |= BIT(INNER_SRC_PORT);
5461 
5462 	if (!spec->pdst)
5463 		*unused_tuple |= BIT(INNER_DST_PORT);
5464 
5465 	if (!spec->tos)
5466 		*unused_tuple |= BIT(INNER_IP_TOS);
5467 
5468 	return 0;
5469 }
5470 
5471 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5472 				    u32 *unused_tuple)
5473 {
5474 	if (!spec || !unused_tuple)
5475 		return -EINVAL;
5476 
5477 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5478 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5479 
5480 	if (!spec->ip4src)
5481 		*unused_tuple |= BIT(INNER_SRC_IP);
5482 
5483 	if (!spec->ip4dst)
5484 		*unused_tuple |= BIT(INNER_DST_IP);
5485 
5486 	if (!spec->tos)
5487 		*unused_tuple |= BIT(INNER_IP_TOS);
5488 
5489 	if (!spec->proto)
5490 		*unused_tuple |= BIT(INNER_IP_PROTO);
5491 
5492 	if (spec->l4_4_bytes)
5493 		return -EOPNOTSUPP;
5494 
5495 	if (spec->ip_ver != ETH_RX_NFC_IP4)
5496 		return -EOPNOTSUPP;
5497 
5498 	return 0;
5499 }
5500 
5501 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5502 				       u32 *unused_tuple)
5503 {
5504 	if (!spec || !unused_tuple)
5505 		return -EINVAL;
5506 
5507 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5508 		BIT(INNER_IP_TOS);
5509 
5510 	/* check whether src/dst ip address used */
5511 	if (!spec->ip6src[0] && !spec->ip6src[1] &&
5512 	    !spec->ip6src[2] && !spec->ip6src[3])
5513 		*unused_tuple |= BIT(INNER_SRC_IP);
5514 
5515 	if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5516 	    !spec->ip6dst[2] && !spec->ip6dst[3])
5517 		*unused_tuple |= BIT(INNER_DST_IP);
5518 
5519 	if (!spec->psrc)
5520 		*unused_tuple |= BIT(INNER_SRC_PORT);
5521 
5522 	if (!spec->pdst)
5523 		*unused_tuple |= BIT(INNER_DST_PORT);
5524 
5525 	if (spec->tclass)
5526 		return -EOPNOTSUPP;
5527 
5528 	return 0;
5529 }
5530 
5531 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5532 				    u32 *unused_tuple)
5533 {
5534 	if (!spec || !unused_tuple)
5535 		return -EINVAL;
5536 
5537 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5538 		BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5539 
5540 	/* check whether src/dst ip address used */
5541 	if (!spec->ip6src[0] && !spec->ip6src[1] &&
5542 	    !spec->ip6src[2] && !spec->ip6src[3])
5543 		*unused_tuple |= BIT(INNER_SRC_IP);
5544 
5545 	if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5546 	    !spec->ip6dst[2] && !spec->ip6dst[3])
5547 		*unused_tuple |= BIT(INNER_DST_IP);
5548 
5549 	if (!spec->l4_proto)
5550 		*unused_tuple |= BIT(INNER_IP_PROTO);
5551 
5552 	if (spec->tclass)
5553 		return -EOPNOTSUPP;
5554 
5555 	if (spec->l4_4_bytes)
5556 		return -EOPNOTSUPP;
5557 
5558 	return 0;
5559 }
5560 
5561 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5562 {
5563 	if (!spec || !unused_tuple)
5564 		return -EINVAL;
5565 
5566 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5567 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5568 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5569 
5570 	if (is_zero_ether_addr(spec->h_source))
5571 		*unused_tuple |= BIT(INNER_SRC_MAC);
5572 
5573 	if (is_zero_ether_addr(spec->h_dest))
5574 		*unused_tuple |= BIT(INNER_DST_MAC);
5575 
5576 	if (!spec->h_proto)
5577 		*unused_tuple |= BIT(INNER_ETH_TYPE);
5578 
5579 	return 0;
5580 }
5581 
5582 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5583 				    struct ethtool_rx_flow_spec *fs,
5584 				    u32 *unused_tuple)
5585 {
5586 	if (fs->flow_type & FLOW_EXT) {
5587 		if (fs->h_ext.vlan_etype) {
5588 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5589 			return -EOPNOTSUPP;
5590 		}
5591 
5592 		if (!fs->h_ext.vlan_tci)
5593 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5594 
5595 		if (fs->m_ext.vlan_tci &&
5596 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5597 			dev_err(&hdev->pdev->dev,
5598 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
5599 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5600 			return -EINVAL;
5601 		}
5602 	} else {
5603 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5604 	}
5605 
5606 	if (fs->flow_type & FLOW_MAC_EXT) {
5607 		if (hdev->fd_cfg.fd_mode !=
5608 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5609 			dev_err(&hdev->pdev->dev,
5610 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
5611 			return -EOPNOTSUPP;
5612 		}
5613 
5614 		if (is_zero_ether_addr(fs->h_ext.h_dest))
5615 			*unused_tuple |= BIT(INNER_DST_MAC);
5616 		else
5617 			*unused_tuple &= ~BIT(INNER_DST_MAC);
5618 	}
5619 
5620 	return 0;
5621 }
5622 
5623 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5624 			       struct ethtool_rx_flow_spec *fs,
5625 			       u32 *unused_tuple)
5626 {
5627 	u32 flow_type;
5628 	int ret;
5629 
5630 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5631 		dev_err(&hdev->pdev->dev,
5632 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
5633 			fs->location,
5634 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5635 		return -EINVAL;
5636 	}
5637 
5638 	if ((fs->flow_type & FLOW_EXT) &&
5639 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5640 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5641 		return -EOPNOTSUPP;
5642 	}
5643 
5644 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5645 	switch (flow_type) {
5646 	case SCTP_V4_FLOW:
5647 	case TCP_V4_FLOW:
5648 	case UDP_V4_FLOW:
5649 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5650 						  unused_tuple);
5651 		break;
5652 	case IP_USER_FLOW:
5653 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5654 					       unused_tuple);
5655 		break;
5656 	case SCTP_V6_FLOW:
5657 	case TCP_V6_FLOW:
5658 	case UDP_V6_FLOW:
5659 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5660 						  unused_tuple);
5661 		break;
5662 	case IPV6_USER_FLOW:
5663 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5664 					       unused_tuple);
5665 		break;
5666 	case ETHER_FLOW:
5667 		if (hdev->fd_cfg.fd_mode !=
5668 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5669 			dev_err(&hdev->pdev->dev,
5670 				"ETHER_FLOW is not supported in current fd mode!\n");
5671 			return -EOPNOTSUPP;
5672 		}
5673 
5674 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5675 						 unused_tuple);
5676 		break;
5677 	default:
5678 		dev_err(&hdev->pdev->dev,
5679 			"unsupported protocol type, protocol type = %#x\n",
5680 			flow_type);
5681 		return -EOPNOTSUPP;
5682 	}
5683 
5684 	if (ret) {
5685 		dev_err(&hdev->pdev->dev,
5686 			"failed to check flow union tuple, ret = %d\n",
5687 			ret);
5688 		return ret;
5689 	}
5690 
5691 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5692 }
5693 
5694 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5695 {
5696 	struct hclge_fd_rule *rule = NULL;
5697 	struct hlist_node *node2;
5698 
5699 	spin_lock_bh(&hdev->fd_rule_lock);
5700 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5701 		if (rule->location >= location)
5702 			break;
5703 	}
5704 
5705 	spin_unlock_bh(&hdev->fd_rule_lock);
5706 
5707 	return  rule && rule->location == location;
5708 }
5709 
5710 /* make sure being called after lock up with fd_rule_lock */
5711 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5712 				     struct hclge_fd_rule *new_rule,
5713 				     u16 location,
5714 				     bool is_add)
5715 {
5716 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
5717 	struct hlist_node *node2;
5718 
5719 	if (is_add && !new_rule)
5720 		return -EINVAL;
5721 
5722 	hlist_for_each_entry_safe(rule, node2,
5723 				  &hdev->fd_rule_list, rule_node) {
5724 		if (rule->location >= location)
5725 			break;
5726 		parent = rule;
5727 	}
5728 
5729 	if (rule && rule->location == location) {
5730 		hlist_del(&rule->rule_node);
5731 		kfree(rule);
5732 		hdev->hclge_fd_rule_num--;
5733 
5734 		if (!is_add) {
5735 			if (!hdev->hclge_fd_rule_num)
5736 				hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5737 			clear_bit(location, hdev->fd_bmap);
5738 
5739 			return 0;
5740 		}
5741 	} else if (!is_add) {
5742 		dev_err(&hdev->pdev->dev,
5743 			"delete fail, rule %u is inexistent\n",
5744 			location);
5745 		return -EINVAL;
5746 	}
5747 
5748 	INIT_HLIST_NODE(&new_rule->rule_node);
5749 
5750 	if (parent)
5751 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5752 	else
5753 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5754 
5755 	set_bit(location, hdev->fd_bmap);
5756 	hdev->hclge_fd_rule_num++;
5757 	hdev->fd_active_type = new_rule->rule_type;
5758 
5759 	return 0;
5760 }
5761 
5762 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5763 			      struct ethtool_rx_flow_spec *fs,
5764 			      struct hclge_fd_rule *rule)
5765 {
5766 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5767 
5768 	switch (flow_type) {
5769 	case SCTP_V4_FLOW:
5770 	case TCP_V4_FLOW:
5771 	case UDP_V4_FLOW:
5772 		rule->tuples.src_ip[IPV4_INDEX] =
5773 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5774 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5775 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5776 
5777 		rule->tuples.dst_ip[IPV4_INDEX] =
5778 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5779 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5780 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5781 
5782 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5783 		rule->tuples_mask.src_port =
5784 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5785 
5786 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5787 		rule->tuples_mask.dst_port =
5788 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5789 
5790 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5791 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5792 
5793 		rule->tuples.ether_proto = ETH_P_IP;
5794 		rule->tuples_mask.ether_proto = 0xFFFF;
5795 
5796 		break;
5797 	case IP_USER_FLOW:
5798 		rule->tuples.src_ip[IPV4_INDEX] =
5799 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5800 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5801 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5802 
5803 		rule->tuples.dst_ip[IPV4_INDEX] =
5804 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5805 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5806 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5807 
5808 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5809 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5810 
5811 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5812 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5813 
5814 		rule->tuples.ether_proto = ETH_P_IP;
5815 		rule->tuples_mask.ether_proto = 0xFFFF;
5816 
5817 		break;
5818 	case SCTP_V6_FLOW:
5819 	case TCP_V6_FLOW:
5820 	case UDP_V6_FLOW:
5821 		be32_to_cpu_array(rule->tuples.src_ip,
5822 				  fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5823 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5824 				  fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5825 
5826 		be32_to_cpu_array(rule->tuples.dst_ip,
5827 				  fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5828 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5829 				  fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5830 
5831 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5832 		rule->tuples_mask.src_port =
5833 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5834 
5835 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5836 		rule->tuples_mask.dst_port =
5837 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5838 
5839 		rule->tuples.ether_proto = ETH_P_IPV6;
5840 		rule->tuples_mask.ether_proto = 0xFFFF;
5841 
5842 		break;
5843 	case IPV6_USER_FLOW:
5844 		be32_to_cpu_array(rule->tuples.src_ip,
5845 				  fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5846 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5847 				  fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5848 
5849 		be32_to_cpu_array(rule->tuples.dst_ip,
5850 				  fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5851 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5852 				  fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5853 
5854 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5855 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5856 
5857 		rule->tuples.ether_proto = ETH_P_IPV6;
5858 		rule->tuples_mask.ether_proto = 0xFFFF;
5859 
5860 		break;
5861 	case ETHER_FLOW:
5862 		ether_addr_copy(rule->tuples.src_mac,
5863 				fs->h_u.ether_spec.h_source);
5864 		ether_addr_copy(rule->tuples_mask.src_mac,
5865 				fs->m_u.ether_spec.h_source);
5866 
5867 		ether_addr_copy(rule->tuples.dst_mac,
5868 				fs->h_u.ether_spec.h_dest);
5869 		ether_addr_copy(rule->tuples_mask.dst_mac,
5870 				fs->m_u.ether_spec.h_dest);
5871 
5872 		rule->tuples.ether_proto =
5873 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
5874 		rule->tuples_mask.ether_proto =
5875 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
5876 
5877 		break;
5878 	default:
5879 		return -EOPNOTSUPP;
5880 	}
5881 
5882 	switch (flow_type) {
5883 	case SCTP_V4_FLOW:
5884 	case SCTP_V6_FLOW:
5885 		rule->tuples.ip_proto = IPPROTO_SCTP;
5886 		rule->tuples_mask.ip_proto = 0xFF;
5887 		break;
5888 	case TCP_V4_FLOW:
5889 	case TCP_V6_FLOW:
5890 		rule->tuples.ip_proto = IPPROTO_TCP;
5891 		rule->tuples_mask.ip_proto = 0xFF;
5892 		break;
5893 	case UDP_V4_FLOW:
5894 	case UDP_V6_FLOW:
5895 		rule->tuples.ip_proto = IPPROTO_UDP;
5896 		rule->tuples_mask.ip_proto = 0xFF;
5897 		break;
5898 	default:
5899 		break;
5900 	}
5901 
5902 	if (fs->flow_type & FLOW_EXT) {
5903 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5904 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5905 	}
5906 
5907 	if (fs->flow_type & FLOW_MAC_EXT) {
5908 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5909 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5910 	}
5911 
5912 	return 0;
5913 }
5914 
5915 /* make sure being called after lock up with fd_rule_lock */
5916 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5917 				struct hclge_fd_rule *rule)
5918 {
5919 	int ret;
5920 
5921 	if (!rule) {
5922 		dev_err(&hdev->pdev->dev,
5923 			"The flow director rule is NULL\n");
5924 		return -EINVAL;
5925 	}
5926 
5927 	/* it will never fail here, so needn't to check return value */
5928 	hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5929 
5930 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5931 	if (ret)
5932 		goto clear_rule;
5933 
5934 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5935 	if (ret)
5936 		goto clear_rule;
5937 
5938 	return 0;
5939 
5940 clear_rule:
5941 	hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5942 	return ret;
5943 }
5944 
5945 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
5946 {
5947 	struct hclge_vport *vport = hclge_get_vport(handle);
5948 	struct hclge_dev *hdev = vport->back;
5949 
5950 	return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
5951 }
5952 
5953 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5954 			      struct ethtool_rxnfc *cmd)
5955 {
5956 	struct hclge_vport *vport = hclge_get_vport(handle);
5957 	struct hclge_dev *hdev = vport->back;
5958 	u16 dst_vport_id = 0, q_index = 0;
5959 	struct ethtool_rx_flow_spec *fs;
5960 	struct hclge_fd_rule *rule;
5961 	u32 unused = 0;
5962 	u8 action;
5963 	int ret;
5964 
5965 	if (!hnae3_dev_fd_supported(hdev)) {
5966 		dev_err(&hdev->pdev->dev,
5967 			"flow table director is not supported\n");
5968 		return -EOPNOTSUPP;
5969 	}
5970 
5971 	if (!hdev->fd_en) {
5972 		dev_err(&hdev->pdev->dev,
5973 			"please enable flow director first\n");
5974 		return -EOPNOTSUPP;
5975 	}
5976 
5977 	if (hclge_is_cls_flower_active(handle)) {
5978 		dev_err(&hdev->pdev->dev,
5979 			"please delete all exist cls flower rules first\n");
5980 		return -EINVAL;
5981 	}
5982 
5983 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5984 
5985 	ret = hclge_fd_check_spec(hdev, fs, &unused);
5986 	if (ret)
5987 		return ret;
5988 
5989 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5990 		action = HCLGE_FD_ACTION_DROP_PACKET;
5991 	} else {
5992 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5993 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5994 		u16 tqps;
5995 
5996 		if (vf > hdev->num_req_vfs) {
5997 			dev_err(&hdev->pdev->dev,
5998 				"Error: vf id (%u) > max vf num (%u)\n",
5999 				vf, hdev->num_req_vfs);
6000 			return -EINVAL;
6001 		}
6002 
6003 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6004 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
6005 
6006 		if (ring >= tqps) {
6007 			dev_err(&hdev->pdev->dev,
6008 				"Error: queue id (%u) > max tqp num (%u)\n",
6009 				ring, tqps - 1);
6010 			return -EINVAL;
6011 		}
6012 
6013 		action = HCLGE_FD_ACTION_SELECT_QUEUE;
6014 		q_index = ring;
6015 	}
6016 
6017 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6018 	if (!rule)
6019 		return -ENOMEM;
6020 
6021 	ret = hclge_fd_get_tuple(hdev, fs, rule);
6022 	if (ret) {
6023 		kfree(rule);
6024 		return ret;
6025 	}
6026 
6027 	rule->flow_type = fs->flow_type;
6028 	rule->location = fs->location;
6029 	rule->unused_tuple = unused;
6030 	rule->vf_id = dst_vport_id;
6031 	rule->queue_id = q_index;
6032 	rule->action = action;
6033 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
6034 
6035 	/* to avoid rule conflict, when user configure rule by ethtool,
6036 	 * we need to clear all arfs rules
6037 	 */
6038 	spin_lock_bh(&hdev->fd_rule_lock);
6039 	hclge_clear_arfs_rules(handle);
6040 
6041 	ret = hclge_fd_config_rule(hdev, rule);
6042 
6043 	spin_unlock_bh(&hdev->fd_rule_lock);
6044 
6045 	return ret;
6046 }
6047 
6048 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6049 			      struct ethtool_rxnfc *cmd)
6050 {
6051 	struct hclge_vport *vport = hclge_get_vport(handle);
6052 	struct hclge_dev *hdev = vport->back;
6053 	struct ethtool_rx_flow_spec *fs;
6054 	int ret;
6055 
6056 	if (!hnae3_dev_fd_supported(hdev))
6057 		return -EOPNOTSUPP;
6058 
6059 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6060 
6061 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6062 		return -EINVAL;
6063 
6064 	if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num ||
6065 	    !hclge_fd_rule_exist(hdev, fs->location)) {
6066 		dev_err(&hdev->pdev->dev,
6067 			"Delete fail, rule %u is inexistent\n", fs->location);
6068 		return -ENOENT;
6069 	}
6070 
6071 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6072 				   NULL, false);
6073 	if (ret)
6074 		return ret;
6075 
6076 	spin_lock_bh(&hdev->fd_rule_lock);
6077 	ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
6078 
6079 	spin_unlock_bh(&hdev->fd_rule_lock);
6080 
6081 	return ret;
6082 }
6083 
6084 /* make sure being called after lock up with fd_rule_lock */
6085 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
6086 				     bool clear_list)
6087 {
6088 	struct hclge_vport *vport = hclge_get_vport(handle);
6089 	struct hclge_dev *hdev = vport->back;
6090 	struct hclge_fd_rule *rule;
6091 	struct hlist_node *node;
6092 	u16 location;
6093 
6094 	if (!hnae3_dev_fd_supported(hdev))
6095 		return;
6096 
6097 	for_each_set_bit(location, hdev->fd_bmap,
6098 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6099 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6100 				     NULL, false);
6101 
6102 	if (clear_list) {
6103 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6104 					  rule_node) {
6105 			hlist_del(&rule->rule_node);
6106 			kfree(rule);
6107 		}
6108 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6109 		hdev->hclge_fd_rule_num = 0;
6110 		bitmap_zero(hdev->fd_bmap,
6111 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6112 	}
6113 }
6114 
6115 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6116 {
6117 	struct hclge_vport *vport = hclge_get_vport(handle);
6118 	struct hclge_dev *hdev = vport->back;
6119 	struct hclge_fd_rule *rule;
6120 	struct hlist_node *node;
6121 	int ret;
6122 
6123 	/* Return ok here, because reset error handling will check this
6124 	 * return value. If error is returned here, the reset process will
6125 	 * fail.
6126 	 */
6127 	if (!hnae3_dev_fd_supported(hdev))
6128 		return 0;
6129 
6130 	/* if fd is disabled, should not restore it when reset */
6131 	if (!hdev->fd_en)
6132 		return 0;
6133 
6134 	spin_lock_bh(&hdev->fd_rule_lock);
6135 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6136 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6137 		if (!ret)
6138 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6139 
6140 		if (ret) {
6141 			dev_warn(&hdev->pdev->dev,
6142 				 "Restore rule %u failed, remove it\n",
6143 				 rule->location);
6144 			clear_bit(rule->location, hdev->fd_bmap);
6145 			hlist_del(&rule->rule_node);
6146 			kfree(rule);
6147 			hdev->hclge_fd_rule_num--;
6148 		}
6149 	}
6150 
6151 	if (hdev->hclge_fd_rule_num)
6152 		hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
6153 
6154 	spin_unlock_bh(&hdev->fd_rule_lock);
6155 
6156 	return 0;
6157 }
6158 
6159 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6160 				 struct ethtool_rxnfc *cmd)
6161 {
6162 	struct hclge_vport *vport = hclge_get_vport(handle);
6163 	struct hclge_dev *hdev = vport->back;
6164 
6165 	if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6166 		return -EOPNOTSUPP;
6167 
6168 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
6169 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6170 
6171 	return 0;
6172 }
6173 
6174 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6175 				     struct ethtool_tcpip4_spec *spec,
6176 				     struct ethtool_tcpip4_spec *spec_mask)
6177 {
6178 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6179 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6180 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6181 
6182 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6183 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6184 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6185 
6186 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6187 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6188 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6189 
6190 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6191 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6192 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6193 
6194 	spec->tos = rule->tuples.ip_tos;
6195 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6196 			0 : rule->tuples_mask.ip_tos;
6197 }
6198 
6199 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6200 				  struct ethtool_usrip4_spec *spec,
6201 				  struct ethtool_usrip4_spec *spec_mask)
6202 {
6203 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6204 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6205 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6206 
6207 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6208 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6209 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6210 
6211 	spec->tos = rule->tuples.ip_tos;
6212 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6213 			0 : rule->tuples_mask.ip_tos;
6214 
6215 	spec->proto = rule->tuples.ip_proto;
6216 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6217 			0 : rule->tuples_mask.ip_proto;
6218 
6219 	spec->ip_ver = ETH_RX_NFC_IP4;
6220 }
6221 
6222 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6223 				     struct ethtool_tcpip6_spec *spec,
6224 				     struct ethtool_tcpip6_spec *spec_mask)
6225 {
6226 	cpu_to_be32_array(spec->ip6src,
6227 			  rule->tuples.src_ip, IPV6_SIZE);
6228 	cpu_to_be32_array(spec->ip6dst,
6229 			  rule->tuples.dst_ip, IPV6_SIZE);
6230 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6231 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6232 	else
6233 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6234 				  IPV6_SIZE);
6235 
6236 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6237 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6238 	else
6239 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6240 				  IPV6_SIZE);
6241 
6242 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6243 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6244 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6245 
6246 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6247 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6248 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6249 }
6250 
6251 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6252 				  struct ethtool_usrip6_spec *spec,
6253 				  struct ethtool_usrip6_spec *spec_mask)
6254 {
6255 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6256 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6257 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6258 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6259 	else
6260 		cpu_to_be32_array(spec_mask->ip6src,
6261 				  rule->tuples_mask.src_ip, IPV6_SIZE);
6262 
6263 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6264 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6265 	else
6266 		cpu_to_be32_array(spec_mask->ip6dst,
6267 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
6268 
6269 	spec->l4_proto = rule->tuples.ip_proto;
6270 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6271 			0 : rule->tuples_mask.ip_proto;
6272 }
6273 
6274 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6275 				    struct ethhdr *spec,
6276 				    struct ethhdr *spec_mask)
6277 {
6278 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6279 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6280 
6281 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6282 		eth_zero_addr(spec_mask->h_source);
6283 	else
6284 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6285 
6286 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
6287 		eth_zero_addr(spec_mask->h_dest);
6288 	else
6289 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6290 
6291 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6292 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6293 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6294 }
6295 
6296 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6297 				  struct hclge_fd_rule *rule)
6298 {
6299 	if (fs->flow_type & FLOW_EXT) {
6300 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6301 		fs->m_ext.vlan_tci =
6302 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6303 				cpu_to_be16(VLAN_VID_MASK) :
6304 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
6305 	}
6306 
6307 	if (fs->flow_type & FLOW_MAC_EXT) {
6308 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6309 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6310 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6311 		else
6312 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6313 					rule->tuples_mask.dst_mac);
6314 	}
6315 }
6316 
6317 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6318 				  struct ethtool_rxnfc *cmd)
6319 {
6320 	struct hclge_vport *vport = hclge_get_vport(handle);
6321 	struct hclge_fd_rule *rule = NULL;
6322 	struct hclge_dev *hdev = vport->back;
6323 	struct ethtool_rx_flow_spec *fs;
6324 	struct hlist_node *node2;
6325 
6326 	if (!hnae3_dev_fd_supported(hdev))
6327 		return -EOPNOTSUPP;
6328 
6329 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6330 
6331 	spin_lock_bh(&hdev->fd_rule_lock);
6332 
6333 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6334 		if (rule->location >= fs->location)
6335 			break;
6336 	}
6337 
6338 	if (!rule || fs->location != rule->location) {
6339 		spin_unlock_bh(&hdev->fd_rule_lock);
6340 
6341 		return -ENOENT;
6342 	}
6343 
6344 	fs->flow_type = rule->flow_type;
6345 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6346 	case SCTP_V4_FLOW:
6347 	case TCP_V4_FLOW:
6348 	case UDP_V4_FLOW:
6349 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6350 					 &fs->m_u.tcp_ip4_spec);
6351 		break;
6352 	case IP_USER_FLOW:
6353 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6354 				      &fs->m_u.usr_ip4_spec);
6355 		break;
6356 	case SCTP_V6_FLOW:
6357 	case TCP_V6_FLOW:
6358 	case UDP_V6_FLOW:
6359 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6360 					 &fs->m_u.tcp_ip6_spec);
6361 		break;
6362 	case IPV6_USER_FLOW:
6363 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6364 				      &fs->m_u.usr_ip6_spec);
6365 		break;
6366 	/* The flow type of fd rule has been checked before adding in to rule
6367 	 * list. As other flow types have been handled, it must be ETHER_FLOW
6368 	 * for the default case
6369 	 */
6370 	default:
6371 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6372 					&fs->m_u.ether_spec);
6373 		break;
6374 	}
6375 
6376 	hclge_fd_get_ext_info(fs, rule);
6377 
6378 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6379 		fs->ring_cookie = RX_CLS_FLOW_DISC;
6380 	} else {
6381 		u64 vf_id;
6382 
6383 		fs->ring_cookie = rule->queue_id;
6384 		vf_id = rule->vf_id;
6385 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6386 		fs->ring_cookie |= vf_id;
6387 	}
6388 
6389 	spin_unlock_bh(&hdev->fd_rule_lock);
6390 
6391 	return 0;
6392 }
6393 
6394 static int hclge_get_all_rules(struct hnae3_handle *handle,
6395 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
6396 {
6397 	struct hclge_vport *vport = hclge_get_vport(handle);
6398 	struct hclge_dev *hdev = vport->back;
6399 	struct hclge_fd_rule *rule;
6400 	struct hlist_node *node2;
6401 	int cnt = 0;
6402 
6403 	if (!hnae3_dev_fd_supported(hdev))
6404 		return -EOPNOTSUPP;
6405 
6406 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6407 
6408 	spin_lock_bh(&hdev->fd_rule_lock);
6409 	hlist_for_each_entry_safe(rule, node2,
6410 				  &hdev->fd_rule_list, rule_node) {
6411 		if (cnt == cmd->rule_cnt) {
6412 			spin_unlock_bh(&hdev->fd_rule_lock);
6413 			return -EMSGSIZE;
6414 		}
6415 
6416 		rule_locs[cnt] = rule->location;
6417 		cnt++;
6418 	}
6419 
6420 	spin_unlock_bh(&hdev->fd_rule_lock);
6421 
6422 	cmd->rule_cnt = cnt;
6423 
6424 	return 0;
6425 }
6426 
6427 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6428 				     struct hclge_fd_rule_tuples *tuples)
6429 {
6430 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6431 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6432 
6433 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6434 	tuples->ip_proto = fkeys->basic.ip_proto;
6435 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6436 
6437 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6438 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6439 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6440 	} else {
6441 		int i;
6442 
6443 		for (i = 0; i < IPV6_SIZE; i++) {
6444 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6445 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6446 		}
6447 	}
6448 }
6449 
6450 /* traverse all rules, check whether an existed rule has the same tuples */
6451 static struct hclge_fd_rule *
6452 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6453 			  const struct hclge_fd_rule_tuples *tuples)
6454 {
6455 	struct hclge_fd_rule *rule = NULL;
6456 	struct hlist_node *node;
6457 
6458 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6459 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6460 			return rule;
6461 	}
6462 
6463 	return NULL;
6464 }
6465 
6466 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6467 				     struct hclge_fd_rule *rule)
6468 {
6469 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6470 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6471 			     BIT(INNER_SRC_PORT);
6472 	rule->action = 0;
6473 	rule->vf_id = 0;
6474 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6475 	if (tuples->ether_proto == ETH_P_IP) {
6476 		if (tuples->ip_proto == IPPROTO_TCP)
6477 			rule->flow_type = TCP_V4_FLOW;
6478 		else
6479 			rule->flow_type = UDP_V4_FLOW;
6480 	} else {
6481 		if (tuples->ip_proto == IPPROTO_TCP)
6482 			rule->flow_type = TCP_V6_FLOW;
6483 		else
6484 			rule->flow_type = UDP_V6_FLOW;
6485 	}
6486 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6487 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6488 }
6489 
6490 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6491 				      u16 flow_id, struct flow_keys *fkeys)
6492 {
6493 	struct hclge_vport *vport = hclge_get_vport(handle);
6494 	struct hclge_fd_rule_tuples new_tuples = {};
6495 	struct hclge_dev *hdev = vport->back;
6496 	struct hclge_fd_rule *rule;
6497 	u16 tmp_queue_id;
6498 	u16 bit_id;
6499 	int ret;
6500 
6501 	if (!hnae3_dev_fd_supported(hdev))
6502 		return -EOPNOTSUPP;
6503 
6504 	/* when there is already fd rule existed add by user,
6505 	 * arfs should not work
6506 	 */
6507 	spin_lock_bh(&hdev->fd_rule_lock);
6508 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
6509 	    hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
6510 		spin_unlock_bh(&hdev->fd_rule_lock);
6511 		return -EOPNOTSUPP;
6512 	}
6513 
6514 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6515 
6516 	/* check is there flow director filter existed for this flow,
6517 	 * if not, create a new filter for it;
6518 	 * if filter exist with different queue id, modify the filter;
6519 	 * if filter exist with same queue id, do nothing
6520 	 */
6521 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6522 	if (!rule) {
6523 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6524 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6525 			spin_unlock_bh(&hdev->fd_rule_lock);
6526 			return -ENOSPC;
6527 		}
6528 
6529 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6530 		if (!rule) {
6531 			spin_unlock_bh(&hdev->fd_rule_lock);
6532 			return -ENOMEM;
6533 		}
6534 
6535 		set_bit(bit_id, hdev->fd_bmap);
6536 		rule->location = bit_id;
6537 		rule->arfs.flow_id = flow_id;
6538 		rule->queue_id = queue_id;
6539 		hclge_fd_build_arfs_rule(&new_tuples, rule);
6540 		ret = hclge_fd_config_rule(hdev, rule);
6541 
6542 		spin_unlock_bh(&hdev->fd_rule_lock);
6543 
6544 		if (ret)
6545 			return ret;
6546 
6547 		return rule->location;
6548 	}
6549 
6550 	spin_unlock_bh(&hdev->fd_rule_lock);
6551 
6552 	if (rule->queue_id == queue_id)
6553 		return rule->location;
6554 
6555 	tmp_queue_id = rule->queue_id;
6556 	rule->queue_id = queue_id;
6557 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6558 	if (ret) {
6559 		rule->queue_id = tmp_queue_id;
6560 		return ret;
6561 	}
6562 
6563 	return rule->location;
6564 }
6565 
6566 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6567 {
6568 #ifdef CONFIG_RFS_ACCEL
6569 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6570 	struct hclge_fd_rule *rule;
6571 	struct hlist_node *node;
6572 	HLIST_HEAD(del_list);
6573 
6574 	spin_lock_bh(&hdev->fd_rule_lock);
6575 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6576 		spin_unlock_bh(&hdev->fd_rule_lock);
6577 		return;
6578 	}
6579 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6580 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6581 					rule->arfs.flow_id, rule->location)) {
6582 			hlist_del_init(&rule->rule_node);
6583 			hlist_add_head(&rule->rule_node, &del_list);
6584 			hdev->hclge_fd_rule_num--;
6585 			clear_bit(rule->location, hdev->fd_bmap);
6586 		}
6587 	}
6588 	spin_unlock_bh(&hdev->fd_rule_lock);
6589 
6590 	hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6591 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6592 				     rule->location, NULL, false);
6593 		kfree(rule);
6594 	}
6595 #endif
6596 }
6597 
6598 /* make sure being called after lock up with fd_rule_lock */
6599 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6600 {
6601 #ifdef CONFIG_RFS_ACCEL
6602 	struct hclge_vport *vport = hclge_get_vport(handle);
6603 	struct hclge_dev *hdev = vport->back;
6604 
6605 	if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6606 		hclge_del_all_fd_entries(handle, true);
6607 #endif
6608 }
6609 
6610 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
6611 				    struct hclge_fd_rule *rule)
6612 {
6613 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
6614 		struct flow_match_basic match;
6615 		u16 ethtype_key, ethtype_mask;
6616 
6617 		flow_rule_match_basic(flow, &match);
6618 		ethtype_key = ntohs(match.key->n_proto);
6619 		ethtype_mask = ntohs(match.mask->n_proto);
6620 
6621 		if (ethtype_key == ETH_P_ALL) {
6622 			ethtype_key = 0;
6623 			ethtype_mask = 0;
6624 		}
6625 		rule->tuples.ether_proto = ethtype_key;
6626 		rule->tuples_mask.ether_proto = ethtype_mask;
6627 		rule->tuples.ip_proto = match.key->ip_proto;
6628 		rule->tuples_mask.ip_proto = match.mask->ip_proto;
6629 	} else {
6630 		rule->unused_tuple |= BIT(INNER_IP_PROTO);
6631 		rule->unused_tuple |= BIT(INNER_ETH_TYPE);
6632 	}
6633 }
6634 
6635 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
6636 				  struct hclge_fd_rule *rule)
6637 {
6638 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
6639 		struct flow_match_eth_addrs match;
6640 
6641 		flow_rule_match_eth_addrs(flow, &match);
6642 		ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
6643 		ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
6644 		ether_addr_copy(rule->tuples.src_mac, match.key->src);
6645 		ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
6646 	} else {
6647 		rule->unused_tuple |= BIT(INNER_DST_MAC);
6648 		rule->unused_tuple |= BIT(INNER_SRC_MAC);
6649 	}
6650 }
6651 
6652 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
6653 				   struct hclge_fd_rule *rule)
6654 {
6655 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
6656 		struct flow_match_vlan match;
6657 
6658 		flow_rule_match_vlan(flow, &match);
6659 		rule->tuples.vlan_tag1 = match.key->vlan_id |
6660 				(match.key->vlan_priority << VLAN_PRIO_SHIFT);
6661 		rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
6662 				(match.mask->vlan_priority << VLAN_PRIO_SHIFT);
6663 	} else {
6664 		rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6665 	}
6666 }
6667 
6668 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
6669 				 struct hclge_fd_rule *rule)
6670 {
6671 	u16 addr_type = 0;
6672 
6673 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
6674 		struct flow_match_control match;
6675 
6676 		flow_rule_match_control(flow, &match);
6677 		addr_type = match.key->addr_type;
6678 	}
6679 
6680 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
6681 		struct flow_match_ipv4_addrs match;
6682 
6683 		flow_rule_match_ipv4_addrs(flow, &match);
6684 		rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
6685 		rule->tuples_mask.src_ip[IPV4_INDEX] =
6686 						be32_to_cpu(match.mask->src);
6687 		rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
6688 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
6689 						be32_to_cpu(match.mask->dst);
6690 	} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
6691 		struct flow_match_ipv6_addrs match;
6692 
6693 		flow_rule_match_ipv6_addrs(flow, &match);
6694 		be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
6695 				  IPV6_SIZE);
6696 		be32_to_cpu_array(rule->tuples_mask.src_ip,
6697 				  match.mask->src.s6_addr32, IPV6_SIZE);
6698 		be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
6699 				  IPV6_SIZE);
6700 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
6701 				  match.mask->dst.s6_addr32, IPV6_SIZE);
6702 	} else {
6703 		rule->unused_tuple |= BIT(INNER_SRC_IP);
6704 		rule->unused_tuple |= BIT(INNER_DST_IP);
6705 	}
6706 }
6707 
6708 static void hclge_get_cls_key_port(const struct flow_rule *flow,
6709 				   struct hclge_fd_rule *rule)
6710 {
6711 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
6712 		struct flow_match_ports match;
6713 
6714 		flow_rule_match_ports(flow, &match);
6715 
6716 		rule->tuples.src_port = be16_to_cpu(match.key->src);
6717 		rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
6718 		rule->tuples.dst_port = be16_to_cpu(match.key->dst);
6719 		rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
6720 	} else {
6721 		rule->unused_tuple |= BIT(INNER_SRC_PORT);
6722 		rule->unused_tuple |= BIT(INNER_DST_PORT);
6723 	}
6724 }
6725 
6726 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
6727 				  struct flow_cls_offload *cls_flower,
6728 				  struct hclge_fd_rule *rule)
6729 {
6730 	struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
6731 	struct flow_dissector *dissector = flow->match.dissector;
6732 
6733 	if (dissector->used_keys &
6734 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
6735 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
6736 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
6737 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
6738 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
6739 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
6740 	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
6741 		dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
6742 			dissector->used_keys);
6743 		return -EOPNOTSUPP;
6744 	}
6745 
6746 	hclge_get_cls_key_basic(flow, rule);
6747 	hclge_get_cls_key_mac(flow, rule);
6748 	hclge_get_cls_key_vlan(flow, rule);
6749 	hclge_get_cls_key_ip(flow, rule);
6750 	hclge_get_cls_key_port(flow, rule);
6751 
6752 	return 0;
6753 }
6754 
6755 static int hclge_check_cls_flower(struct hclge_dev *hdev,
6756 				  struct flow_cls_offload *cls_flower, int tc)
6757 {
6758 	u32 prio = cls_flower->common.prio;
6759 
6760 	if (tc < 0 || tc > hdev->tc_max) {
6761 		dev_err(&hdev->pdev->dev, "invalid traffic class\n");
6762 		return -EINVAL;
6763 	}
6764 
6765 	if (prio == 0 ||
6766 	    prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6767 		dev_err(&hdev->pdev->dev,
6768 			"prio %u should be in range[1, %u]\n",
6769 			prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6770 		return -EINVAL;
6771 	}
6772 
6773 	if (test_bit(prio - 1, hdev->fd_bmap)) {
6774 		dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
6775 		return -EINVAL;
6776 	}
6777 	return 0;
6778 }
6779 
6780 static int hclge_add_cls_flower(struct hnae3_handle *handle,
6781 				struct flow_cls_offload *cls_flower,
6782 				int tc)
6783 {
6784 	struct hclge_vport *vport = hclge_get_vport(handle);
6785 	struct hclge_dev *hdev = vport->back;
6786 	struct hclge_fd_rule *rule;
6787 	int ret;
6788 
6789 	if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6790 		dev_err(&hdev->pdev->dev,
6791 			"please remove all exist fd rules via ethtool first\n");
6792 		return -EINVAL;
6793 	}
6794 
6795 	ret = hclge_check_cls_flower(hdev, cls_flower, tc);
6796 	if (ret) {
6797 		dev_err(&hdev->pdev->dev,
6798 			"failed to check cls flower params, ret = %d\n", ret);
6799 		return ret;
6800 	}
6801 
6802 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6803 	if (!rule)
6804 		return -ENOMEM;
6805 
6806 	ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
6807 	if (ret)
6808 		goto err;
6809 
6810 	rule->action = HCLGE_FD_ACTION_SELECT_TC;
6811 	rule->cls_flower.tc = tc;
6812 	rule->location = cls_flower->common.prio - 1;
6813 	rule->vf_id = 0;
6814 	rule->cls_flower.cookie = cls_flower->cookie;
6815 	rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
6816 
6817 	spin_lock_bh(&hdev->fd_rule_lock);
6818 	hclge_clear_arfs_rules(handle);
6819 
6820 	ret = hclge_fd_config_rule(hdev, rule);
6821 
6822 	spin_unlock_bh(&hdev->fd_rule_lock);
6823 
6824 	if (ret) {
6825 		dev_err(&hdev->pdev->dev,
6826 			"failed to add cls flower rule, ret = %d\n", ret);
6827 		goto err;
6828 	}
6829 
6830 	return 0;
6831 err:
6832 	kfree(rule);
6833 	return ret;
6834 }
6835 
6836 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
6837 						   unsigned long cookie)
6838 {
6839 	struct hclge_fd_rule *rule;
6840 	struct hlist_node *node;
6841 
6842 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6843 		if (rule->cls_flower.cookie == cookie)
6844 			return rule;
6845 	}
6846 
6847 	return NULL;
6848 }
6849 
6850 static int hclge_del_cls_flower(struct hnae3_handle *handle,
6851 				struct flow_cls_offload *cls_flower)
6852 {
6853 	struct hclge_vport *vport = hclge_get_vport(handle);
6854 	struct hclge_dev *hdev = vport->back;
6855 	struct hclge_fd_rule *rule;
6856 	int ret;
6857 
6858 	spin_lock_bh(&hdev->fd_rule_lock);
6859 
6860 	rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
6861 	if (!rule) {
6862 		spin_unlock_bh(&hdev->fd_rule_lock);
6863 		return -EINVAL;
6864 	}
6865 
6866 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
6867 				   NULL, false);
6868 	if (ret) {
6869 		dev_err(&hdev->pdev->dev,
6870 			"failed to delete cls flower rule %u, ret = %d\n",
6871 			rule->location, ret);
6872 		spin_unlock_bh(&hdev->fd_rule_lock);
6873 		return ret;
6874 	}
6875 
6876 	ret = hclge_fd_update_rule_list(hdev, NULL, rule->location, false);
6877 	if (ret) {
6878 		dev_err(&hdev->pdev->dev,
6879 			"failed to delete cls flower rule %u in list, ret = %d\n",
6880 			rule->location, ret);
6881 		spin_unlock_bh(&hdev->fd_rule_lock);
6882 		return ret;
6883 	}
6884 
6885 	spin_unlock_bh(&hdev->fd_rule_lock);
6886 
6887 	return 0;
6888 }
6889 
6890 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6891 {
6892 	struct hclge_vport *vport = hclge_get_vport(handle);
6893 	struct hclge_dev *hdev = vport->back;
6894 
6895 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6896 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6897 }
6898 
6899 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6900 {
6901 	struct hclge_vport *vport = hclge_get_vport(handle);
6902 	struct hclge_dev *hdev = vport->back;
6903 
6904 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6905 }
6906 
6907 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6908 {
6909 	struct hclge_vport *vport = hclge_get_vport(handle);
6910 	struct hclge_dev *hdev = vport->back;
6911 
6912 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6913 }
6914 
6915 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6916 {
6917 	struct hclge_vport *vport = hclge_get_vport(handle);
6918 	struct hclge_dev *hdev = vport->back;
6919 
6920 	return hdev->rst_stats.hw_reset_done_cnt;
6921 }
6922 
6923 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6924 {
6925 	struct hclge_vport *vport = hclge_get_vport(handle);
6926 	struct hclge_dev *hdev = vport->back;
6927 	bool clear;
6928 
6929 	hdev->fd_en = enable;
6930 	clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6931 
6932 	if (!enable) {
6933 		spin_lock_bh(&hdev->fd_rule_lock);
6934 		hclge_del_all_fd_entries(handle, clear);
6935 		spin_unlock_bh(&hdev->fd_rule_lock);
6936 	} else {
6937 		hclge_restore_fd_entries(handle);
6938 	}
6939 }
6940 
6941 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6942 {
6943 	struct hclge_desc desc;
6944 	struct hclge_config_mac_mode_cmd *req =
6945 		(struct hclge_config_mac_mode_cmd *)desc.data;
6946 	u32 loop_en = 0;
6947 	int ret;
6948 
6949 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6950 
6951 	if (enable) {
6952 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6953 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6954 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6955 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6956 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6957 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6958 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6959 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6960 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6961 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6962 	}
6963 
6964 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6965 
6966 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6967 	if (ret)
6968 		dev_err(&hdev->pdev->dev,
6969 			"mac enable fail, ret =%d.\n", ret);
6970 }
6971 
6972 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6973 				     u8 switch_param, u8 param_mask)
6974 {
6975 	struct hclge_mac_vlan_switch_cmd *req;
6976 	struct hclge_desc desc;
6977 	u32 func_id;
6978 	int ret;
6979 
6980 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6981 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6982 
6983 	/* read current config parameter */
6984 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6985 				   true);
6986 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6987 	req->func_id = cpu_to_le32(func_id);
6988 
6989 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6990 	if (ret) {
6991 		dev_err(&hdev->pdev->dev,
6992 			"read mac vlan switch parameter fail, ret = %d\n", ret);
6993 		return ret;
6994 	}
6995 
6996 	/* modify and write new config parameter */
6997 	hclge_cmd_reuse_desc(&desc, false);
6998 	req->switch_param = (req->switch_param & param_mask) | switch_param;
6999 	req->param_mask = param_mask;
7000 
7001 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7002 	if (ret)
7003 		dev_err(&hdev->pdev->dev,
7004 			"set mac vlan switch parameter fail, ret = %d\n", ret);
7005 	return ret;
7006 }
7007 
7008 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7009 				       int link_ret)
7010 {
7011 #define HCLGE_PHY_LINK_STATUS_NUM  200
7012 
7013 	struct phy_device *phydev = hdev->hw.mac.phydev;
7014 	int i = 0;
7015 	int ret;
7016 
7017 	do {
7018 		ret = phy_read_status(phydev);
7019 		if (ret) {
7020 			dev_err(&hdev->pdev->dev,
7021 				"phy update link status fail, ret = %d\n", ret);
7022 			return;
7023 		}
7024 
7025 		if (phydev->link == link_ret)
7026 			break;
7027 
7028 		msleep(HCLGE_LINK_STATUS_MS);
7029 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7030 }
7031 
7032 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7033 {
7034 #define HCLGE_MAC_LINK_STATUS_NUM  100
7035 
7036 	int link_status;
7037 	int i = 0;
7038 	int ret;
7039 
7040 	do {
7041 		ret = hclge_get_mac_link_status(hdev, &link_status);
7042 		if (ret)
7043 			return ret;
7044 		if (link_status == link_ret)
7045 			return 0;
7046 
7047 		msleep(HCLGE_LINK_STATUS_MS);
7048 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7049 	return -EBUSY;
7050 }
7051 
7052 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7053 					  bool is_phy)
7054 {
7055 	int link_ret;
7056 
7057 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7058 
7059 	if (is_phy)
7060 		hclge_phy_link_status_wait(hdev, link_ret);
7061 
7062 	return hclge_mac_link_status_wait(hdev, link_ret);
7063 }
7064 
7065 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7066 {
7067 	struct hclge_config_mac_mode_cmd *req;
7068 	struct hclge_desc desc;
7069 	u32 loop_en;
7070 	int ret;
7071 
7072 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7073 	/* 1 Read out the MAC mode config at first */
7074 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7075 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7076 	if (ret) {
7077 		dev_err(&hdev->pdev->dev,
7078 			"mac loopback get fail, ret =%d.\n", ret);
7079 		return ret;
7080 	}
7081 
7082 	/* 2 Then setup the loopback flag */
7083 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7084 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7085 
7086 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7087 
7088 	/* 3 Config mac work mode with loopback flag
7089 	 * and its original configure parameters
7090 	 */
7091 	hclge_cmd_reuse_desc(&desc, false);
7092 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7093 	if (ret)
7094 		dev_err(&hdev->pdev->dev,
7095 			"mac loopback set fail, ret =%d.\n", ret);
7096 	return ret;
7097 }
7098 
7099 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
7100 				     enum hnae3_loop loop_mode)
7101 {
7102 #define HCLGE_SERDES_RETRY_MS	10
7103 #define HCLGE_SERDES_RETRY_NUM	100
7104 
7105 	struct hclge_serdes_lb_cmd *req;
7106 	struct hclge_desc desc;
7107 	int ret, i = 0;
7108 	u8 loop_mode_b;
7109 
7110 	req = (struct hclge_serdes_lb_cmd *)desc.data;
7111 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
7112 
7113 	switch (loop_mode) {
7114 	case HNAE3_LOOP_SERIAL_SERDES:
7115 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7116 		break;
7117 	case HNAE3_LOOP_PARALLEL_SERDES:
7118 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7119 		break;
7120 	default:
7121 		dev_err(&hdev->pdev->dev,
7122 			"unsupported serdes loopback mode %d\n", loop_mode);
7123 		return -ENOTSUPP;
7124 	}
7125 
7126 	if (en) {
7127 		req->enable = loop_mode_b;
7128 		req->mask = loop_mode_b;
7129 	} else {
7130 		req->mask = loop_mode_b;
7131 	}
7132 
7133 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7134 	if (ret) {
7135 		dev_err(&hdev->pdev->dev,
7136 			"serdes loopback set fail, ret = %d\n", ret);
7137 		return ret;
7138 	}
7139 
7140 	do {
7141 		msleep(HCLGE_SERDES_RETRY_MS);
7142 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
7143 					   true);
7144 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7145 		if (ret) {
7146 			dev_err(&hdev->pdev->dev,
7147 				"serdes loopback get, ret = %d\n", ret);
7148 			return ret;
7149 		}
7150 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
7151 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
7152 
7153 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
7154 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
7155 		return -EBUSY;
7156 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
7157 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
7158 		return -EIO;
7159 	}
7160 	return ret;
7161 }
7162 
7163 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
7164 				     enum hnae3_loop loop_mode)
7165 {
7166 	int ret;
7167 
7168 	ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
7169 	if (ret)
7170 		return ret;
7171 
7172 	hclge_cfg_mac_mode(hdev, en);
7173 
7174 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7175 	if (ret)
7176 		dev_err(&hdev->pdev->dev,
7177 			"serdes loopback config mac mode timeout\n");
7178 
7179 	return ret;
7180 }
7181 
7182 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7183 				     struct phy_device *phydev)
7184 {
7185 	int ret;
7186 
7187 	if (!phydev->suspended) {
7188 		ret = phy_suspend(phydev);
7189 		if (ret)
7190 			return ret;
7191 	}
7192 
7193 	ret = phy_resume(phydev);
7194 	if (ret)
7195 		return ret;
7196 
7197 	return phy_loopback(phydev, true);
7198 }
7199 
7200 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7201 				      struct phy_device *phydev)
7202 {
7203 	int ret;
7204 
7205 	ret = phy_loopback(phydev, false);
7206 	if (ret)
7207 		return ret;
7208 
7209 	return phy_suspend(phydev);
7210 }
7211 
7212 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7213 {
7214 	struct phy_device *phydev = hdev->hw.mac.phydev;
7215 	int ret;
7216 
7217 	if (!phydev)
7218 		return -ENOTSUPP;
7219 
7220 	if (en)
7221 		ret = hclge_enable_phy_loopback(hdev, phydev);
7222 	else
7223 		ret = hclge_disable_phy_loopback(hdev, phydev);
7224 	if (ret) {
7225 		dev_err(&hdev->pdev->dev,
7226 			"set phy loopback fail, ret = %d\n", ret);
7227 		return ret;
7228 	}
7229 
7230 	hclge_cfg_mac_mode(hdev, en);
7231 
7232 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7233 	if (ret)
7234 		dev_err(&hdev->pdev->dev,
7235 			"phy loopback config mac mode timeout\n");
7236 
7237 	return ret;
7238 }
7239 
7240 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
7241 			    int stream_id, bool enable)
7242 {
7243 	struct hclge_desc desc;
7244 	struct hclge_cfg_com_tqp_queue_cmd *req =
7245 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7246 	int ret;
7247 
7248 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7249 	req->tqp_id = cpu_to_le16(tqp_id);
7250 	req->stream_id = cpu_to_le16(stream_id);
7251 	if (enable)
7252 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7253 
7254 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7255 	if (ret)
7256 		dev_err(&hdev->pdev->dev,
7257 			"Tqp enable fail, status =%d.\n", ret);
7258 	return ret;
7259 }
7260 
7261 static int hclge_set_loopback(struct hnae3_handle *handle,
7262 			      enum hnae3_loop loop_mode, bool en)
7263 {
7264 	struct hclge_vport *vport = hclge_get_vport(handle);
7265 	struct hnae3_knic_private_info *kinfo;
7266 	struct hclge_dev *hdev = vport->back;
7267 	int i, ret;
7268 
7269 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7270 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7271 	 * the same, the packets are looped back in the SSU. If SSU loopback
7272 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7273 	 */
7274 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7275 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7276 
7277 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7278 						HCLGE_SWITCH_ALW_LPBK_MASK);
7279 		if (ret)
7280 			return ret;
7281 	}
7282 
7283 	switch (loop_mode) {
7284 	case HNAE3_LOOP_APP:
7285 		ret = hclge_set_app_loopback(hdev, en);
7286 		break;
7287 	case HNAE3_LOOP_SERIAL_SERDES:
7288 	case HNAE3_LOOP_PARALLEL_SERDES:
7289 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
7290 		break;
7291 	case HNAE3_LOOP_PHY:
7292 		ret = hclge_set_phy_loopback(hdev, en);
7293 		break;
7294 	default:
7295 		ret = -ENOTSUPP;
7296 		dev_err(&hdev->pdev->dev,
7297 			"loop_mode %d is not supported\n", loop_mode);
7298 		break;
7299 	}
7300 
7301 	if (ret)
7302 		return ret;
7303 
7304 	kinfo = &vport->nic.kinfo;
7305 	for (i = 0; i < kinfo->num_tqps; i++) {
7306 		ret = hclge_tqp_enable(hdev, i, 0, en);
7307 		if (ret)
7308 			return ret;
7309 	}
7310 
7311 	return 0;
7312 }
7313 
7314 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7315 {
7316 	int ret;
7317 
7318 	ret = hclge_set_app_loopback(hdev, false);
7319 	if (ret)
7320 		return ret;
7321 
7322 	ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7323 	if (ret)
7324 		return ret;
7325 
7326 	return hclge_cfg_serdes_loopback(hdev, false,
7327 					 HNAE3_LOOP_PARALLEL_SERDES);
7328 }
7329 
7330 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
7331 {
7332 	struct hclge_vport *vport = hclge_get_vport(handle);
7333 	struct hnae3_knic_private_info *kinfo;
7334 	struct hnae3_queue *queue;
7335 	struct hclge_tqp *tqp;
7336 	int i;
7337 
7338 	kinfo = &vport->nic.kinfo;
7339 	for (i = 0; i < kinfo->num_tqps; i++) {
7340 		queue = handle->kinfo.tqp[i];
7341 		tqp = container_of(queue, struct hclge_tqp, q);
7342 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
7343 	}
7344 }
7345 
7346 static void hclge_flush_link_update(struct hclge_dev *hdev)
7347 {
7348 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
7349 
7350 	unsigned long last = hdev->serv_processed_cnt;
7351 	int i = 0;
7352 
7353 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7354 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7355 	       last == hdev->serv_processed_cnt)
7356 		usleep_range(1, 1);
7357 }
7358 
7359 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7360 {
7361 	struct hclge_vport *vport = hclge_get_vport(handle);
7362 	struct hclge_dev *hdev = vport->back;
7363 
7364 	if (enable) {
7365 		hclge_task_schedule(hdev, 0);
7366 	} else {
7367 		/* Set the DOWN flag here to disable link updating */
7368 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
7369 
7370 		/* flush memory to make sure DOWN is seen by service task */
7371 		smp_mb__before_atomic();
7372 		hclge_flush_link_update(hdev);
7373 	}
7374 }
7375 
7376 static int hclge_ae_start(struct hnae3_handle *handle)
7377 {
7378 	struct hclge_vport *vport = hclge_get_vport(handle);
7379 	struct hclge_dev *hdev = vport->back;
7380 
7381 	/* mac enable */
7382 	hclge_cfg_mac_mode(hdev, true);
7383 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7384 	hdev->hw.mac.link = 0;
7385 
7386 	/* reset tqp stats */
7387 	hclge_reset_tqp_stats(handle);
7388 
7389 	hclge_mac_start_phy(hdev);
7390 
7391 	return 0;
7392 }
7393 
7394 static void hclge_ae_stop(struct hnae3_handle *handle)
7395 {
7396 	struct hclge_vport *vport = hclge_get_vport(handle);
7397 	struct hclge_dev *hdev = vport->back;
7398 	int i;
7399 
7400 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
7401 	spin_lock_bh(&hdev->fd_rule_lock);
7402 	hclge_clear_arfs_rules(handle);
7403 	spin_unlock_bh(&hdev->fd_rule_lock);
7404 
7405 	/* If it is not PF reset, the firmware will disable the MAC,
7406 	 * so it only need to stop phy here.
7407 	 */
7408 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7409 	    hdev->reset_type != HNAE3_FUNC_RESET) {
7410 		hclge_mac_stop_phy(hdev);
7411 		hclge_update_link_status(hdev);
7412 		return;
7413 	}
7414 
7415 	for (i = 0; i < handle->kinfo.num_tqps; i++)
7416 		hclge_reset_tqp(handle, i);
7417 
7418 	hclge_config_mac_tnl_int(hdev, false);
7419 
7420 	/* Mac disable */
7421 	hclge_cfg_mac_mode(hdev, false);
7422 
7423 	hclge_mac_stop_phy(hdev);
7424 
7425 	/* reset tqp stats */
7426 	hclge_reset_tqp_stats(handle);
7427 	hclge_update_link_status(hdev);
7428 }
7429 
7430 int hclge_vport_start(struct hclge_vport *vport)
7431 {
7432 	struct hclge_dev *hdev = vport->back;
7433 
7434 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7435 	vport->last_active_jiffies = jiffies;
7436 
7437 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7438 		if (vport->vport_id) {
7439 			hclge_restore_mac_table_common(vport);
7440 			hclge_restore_vport_vlan_table(vport);
7441 		} else {
7442 			hclge_restore_hw_table(hdev);
7443 		}
7444 	}
7445 
7446 	clear_bit(vport->vport_id, hdev->vport_config_block);
7447 
7448 	return 0;
7449 }
7450 
7451 void hclge_vport_stop(struct hclge_vport *vport)
7452 {
7453 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7454 }
7455 
7456 static int hclge_client_start(struct hnae3_handle *handle)
7457 {
7458 	struct hclge_vport *vport = hclge_get_vport(handle);
7459 
7460 	return hclge_vport_start(vport);
7461 }
7462 
7463 static void hclge_client_stop(struct hnae3_handle *handle)
7464 {
7465 	struct hclge_vport *vport = hclge_get_vport(handle);
7466 
7467 	hclge_vport_stop(vport);
7468 }
7469 
7470 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7471 					 u16 cmdq_resp, u8  resp_code,
7472 					 enum hclge_mac_vlan_tbl_opcode op)
7473 {
7474 	struct hclge_dev *hdev = vport->back;
7475 
7476 	if (cmdq_resp) {
7477 		dev_err(&hdev->pdev->dev,
7478 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7479 			cmdq_resp);
7480 		return -EIO;
7481 	}
7482 
7483 	if (op == HCLGE_MAC_VLAN_ADD) {
7484 		if (!resp_code || resp_code == 1)
7485 			return 0;
7486 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7487 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
7488 			return -ENOSPC;
7489 
7490 		dev_err(&hdev->pdev->dev,
7491 			"add mac addr failed for undefined, code=%u.\n",
7492 			resp_code);
7493 		return -EIO;
7494 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
7495 		if (!resp_code) {
7496 			return 0;
7497 		} else if (resp_code == 1) {
7498 			dev_dbg(&hdev->pdev->dev,
7499 				"remove mac addr failed for miss.\n");
7500 			return -ENOENT;
7501 		}
7502 
7503 		dev_err(&hdev->pdev->dev,
7504 			"remove mac addr failed for undefined, code=%u.\n",
7505 			resp_code);
7506 		return -EIO;
7507 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
7508 		if (!resp_code) {
7509 			return 0;
7510 		} else if (resp_code == 1) {
7511 			dev_dbg(&hdev->pdev->dev,
7512 				"lookup mac addr failed for miss.\n");
7513 			return -ENOENT;
7514 		}
7515 
7516 		dev_err(&hdev->pdev->dev,
7517 			"lookup mac addr failed for undefined, code=%u.\n",
7518 			resp_code);
7519 		return -EIO;
7520 	}
7521 
7522 	dev_err(&hdev->pdev->dev,
7523 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7524 
7525 	return -EINVAL;
7526 }
7527 
7528 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7529 {
7530 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7531 
7532 	unsigned int word_num;
7533 	unsigned int bit_num;
7534 
7535 	if (vfid > 255 || vfid < 0)
7536 		return -EIO;
7537 
7538 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7539 		word_num = vfid / 32;
7540 		bit_num  = vfid % 32;
7541 		if (clr)
7542 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7543 		else
7544 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7545 	} else {
7546 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7547 		bit_num  = vfid % 32;
7548 		if (clr)
7549 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7550 		else
7551 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7552 	}
7553 
7554 	return 0;
7555 }
7556 
7557 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7558 {
7559 #define HCLGE_DESC_NUMBER 3
7560 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7561 	int i, j;
7562 
7563 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7564 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7565 			if (desc[i].data[j])
7566 				return false;
7567 
7568 	return true;
7569 }
7570 
7571 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7572 				   const u8 *addr, bool is_mc)
7573 {
7574 	const unsigned char *mac_addr = addr;
7575 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7576 		       (mac_addr[0]) | (mac_addr[1] << 8);
7577 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
7578 
7579 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7580 	if (is_mc) {
7581 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7582 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7583 	}
7584 
7585 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7586 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7587 }
7588 
7589 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7590 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
7591 {
7592 	struct hclge_dev *hdev = vport->back;
7593 	struct hclge_desc desc;
7594 	u8 resp_code;
7595 	u16 retval;
7596 	int ret;
7597 
7598 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7599 
7600 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7601 
7602 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7603 	if (ret) {
7604 		dev_err(&hdev->pdev->dev,
7605 			"del mac addr failed for cmd_send, ret =%d.\n",
7606 			ret);
7607 		return ret;
7608 	}
7609 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7610 	retval = le16_to_cpu(desc.retval);
7611 
7612 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7613 					     HCLGE_MAC_VLAN_REMOVE);
7614 }
7615 
7616 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7617 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
7618 				     struct hclge_desc *desc,
7619 				     bool is_mc)
7620 {
7621 	struct hclge_dev *hdev = vport->back;
7622 	u8 resp_code;
7623 	u16 retval;
7624 	int ret;
7625 
7626 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7627 	if (is_mc) {
7628 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7629 		memcpy(desc[0].data,
7630 		       req,
7631 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7632 		hclge_cmd_setup_basic_desc(&desc[1],
7633 					   HCLGE_OPC_MAC_VLAN_ADD,
7634 					   true);
7635 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7636 		hclge_cmd_setup_basic_desc(&desc[2],
7637 					   HCLGE_OPC_MAC_VLAN_ADD,
7638 					   true);
7639 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
7640 	} else {
7641 		memcpy(desc[0].data,
7642 		       req,
7643 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7644 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
7645 	}
7646 	if (ret) {
7647 		dev_err(&hdev->pdev->dev,
7648 			"lookup mac addr failed for cmd_send, ret =%d.\n",
7649 			ret);
7650 		return ret;
7651 	}
7652 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7653 	retval = le16_to_cpu(desc[0].retval);
7654 
7655 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7656 					     HCLGE_MAC_VLAN_LKUP);
7657 }
7658 
7659 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7660 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
7661 				  struct hclge_desc *mc_desc)
7662 {
7663 	struct hclge_dev *hdev = vport->back;
7664 	int cfg_status;
7665 	u8 resp_code;
7666 	u16 retval;
7667 	int ret;
7668 
7669 	if (!mc_desc) {
7670 		struct hclge_desc desc;
7671 
7672 		hclge_cmd_setup_basic_desc(&desc,
7673 					   HCLGE_OPC_MAC_VLAN_ADD,
7674 					   false);
7675 		memcpy(desc.data, req,
7676 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7677 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7678 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7679 		retval = le16_to_cpu(desc.retval);
7680 
7681 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7682 							   resp_code,
7683 							   HCLGE_MAC_VLAN_ADD);
7684 	} else {
7685 		hclge_cmd_reuse_desc(&mc_desc[0], false);
7686 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7687 		hclge_cmd_reuse_desc(&mc_desc[1], false);
7688 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7689 		hclge_cmd_reuse_desc(&mc_desc[2], false);
7690 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7691 		memcpy(mc_desc[0].data, req,
7692 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7693 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7694 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7695 		retval = le16_to_cpu(mc_desc[0].retval);
7696 
7697 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7698 							   resp_code,
7699 							   HCLGE_MAC_VLAN_ADD);
7700 	}
7701 
7702 	if (ret) {
7703 		dev_err(&hdev->pdev->dev,
7704 			"add mac addr failed for cmd_send, ret =%d.\n",
7705 			ret);
7706 		return ret;
7707 	}
7708 
7709 	return cfg_status;
7710 }
7711 
7712 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7713 			       u16 *allocated_size)
7714 {
7715 	struct hclge_umv_spc_alc_cmd *req;
7716 	struct hclge_desc desc;
7717 	int ret;
7718 
7719 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7720 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7721 
7722 	req->space_size = cpu_to_le32(space_size);
7723 
7724 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7725 	if (ret) {
7726 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7727 			ret);
7728 		return ret;
7729 	}
7730 
7731 	*allocated_size = le32_to_cpu(desc.data[1]);
7732 
7733 	return 0;
7734 }
7735 
7736 static int hclge_init_umv_space(struct hclge_dev *hdev)
7737 {
7738 	u16 allocated_size = 0;
7739 	int ret;
7740 
7741 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7742 	if (ret)
7743 		return ret;
7744 
7745 	if (allocated_size < hdev->wanted_umv_size)
7746 		dev_warn(&hdev->pdev->dev,
7747 			 "failed to alloc umv space, want %u, get %u\n",
7748 			 hdev->wanted_umv_size, allocated_size);
7749 
7750 	hdev->max_umv_size = allocated_size;
7751 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7752 	hdev->share_umv_size = hdev->priv_umv_size +
7753 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7754 
7755 	return 0;
7756 }
7757 
7758 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7759 {
7760 	struct hclge_vport *vport;
7761 	int i;
7762 
7763 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7764 		vport = &hdev->vport[i];
7765 		vport->used_umv_num = 0;
7766 	}
7767 
7768 	mutex_lock(&hdev->vport_lock);
7769 	hdev->share_umv_size = hdev->priv_umv_size +
7770 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7771 	mutex_unlock(&hdev->vport_lock);
7772 }
7773 
7774 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7775 {
7776 	struct hclge_dev *hdev = vport->back;
7777 	bool is_full;
7778 
7779 	if (need_lock)
7780 		mutex_lock(&hdev->vport_lock);
7781 
7782 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7783 		   hdev->share_umv_size == 0);
7784 
7785 	if (need_lock)
7786 		mutex_unlock(&hdev->vport_lock);
7787 
7788 	return is_full;
7789 }
7790 
7791 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7792 {
7793 	struct hclge_dev *hdev = vport->back;
7794 
7795 	if (is_free) {
7796 		if (vport->used_umv_num > hdev->priv_umv_size)
7797 			hdev->share_umv_size++;
7798 
7799 		if (vport->used_umv_num > 0)
7800 			vport->used_umv_num--;
7801 	} else {
7802 		if (vport->used_umv_num >= hdev->priv_umv_size &&
7803 		    hdev->share_umv_size > 0)
7804 			hdev->share_umv_size--;
7805 		vport->used_umv_num++;
7806 	}
7807 }
7808 
7809 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7810 						  const u8 *mac_addr)
7811 {
7812 	struct hclge_mac_node *mac_node, *tmp;
7813 
7814 	list_for_each_entry_safe(mac_node, tmp, list, node)
7815 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7816 			return mac_node;
7817 
7818 	return NULL;
7819 }
7820 
7821 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7822 				  enum HCLGE_MAC_NODE_STATE state)
7823 {
7824 	switch (state) {
7825 	/* from set_rx_mode or tmp_add_list */
7826 	case HCLGE_MAC_TO_ADD:
7827 		if (mac_node->state == HCLGE_MAC_TO_DEL)
7828 			mac_node->state = HCLGE_MAC_ACTIVE;
7829 		break;
7830 	/* only from set_rx_mode */
7831 	case HCLGE_MAC_TO_DEL:
7832 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
7833 			list_del(&mac_node->node);
7834 			kfree(mac_node);
7835 		} else {
7836 			mac_node->state = HCLGE_MAC_TO_DEL;
7837 		}
7838 		break;
7839 	/* only from tmp_add_list, the mac_node->state won't be
7840 	 * ACTIVE.
7841 	 */
7842 	case HCLGE_MAC_ACTIVE:
7843 		if (mac_node->state == HCLGE_MAC_TO_ADD)
7844 			mac_node->state = HCLGE_MAC_ACTIVE;
7845 
7846 		break;
7847 	}
7848 }
7849 
7850 int hclge_update_mac_list(struct hclge_vport *vport,
7851 			  enum HCLGE_MAC_NODE_STATE state,
7852 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
7853 			  const unsigned char *addr)
7854 {
7855 	struct hclge_dev *hdev = vport->back;
7856 	struct hclge_mac_node *mac_node;
7857 	struct list_head *list;
7858 
7859 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7860 		&vport->uc_mac_list : &vport->mc_mac_list;
7861 
7862 	spin_lock_bh(&vport->mac_list_lock);
7863 
7864 	/* if the mac addr is already in the mac list, no need to add a new
7865 	 * one into it, just check the mac addr state, convert it to a new
7866 	 * new state, or just remove it, or do nothing.
7867 	 */
7868 	mac_node = hclge_find_mac_node(list, addr);
7869 	if (mac_node) {
7870 		hclge_update_mac_node(mac_node, state);
7871 		spin_unlock_bh(&vport->mac_list_lock);
7872 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7873 		return 0;
7874 	}
7875 
7876 	/* if this address is never added, unnecessary to delete */
7877 	if (state == HCLGE_MAC_TO_DEL) {
7878 		spin_unlock_bh(&vport->mac_list_lock);
7879 		dev_err(&hdev->pdev->dev,
7880 			"failed to delete address %pM from mac list\n",
7881 			addr);
7882 		return -ENOENT;
7883 	}
7884 
7885 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7886 	if (!mac_node) {
7887 		spin_unlock_bh(&vport->mac_list_lock);
7888 		return -ENOMEM;
7889 	}
7890 
7891 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7892 
7893 	mac_node->state = state;
7894 	ether_addr_copy(mac_node->mac_addr, addr);
7895 	list_add_tail(&mac_node->node, list);
7896 
7897 	spin_unlock_bh(&vport->mac_list_lock);
7898 
7899 	return 0;
7900 }
7901 
7902 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7903 			     const unsigned char *addr)
7904 {
7905 	struct hclge_vport *vport = hclge_get_vport(handle);
7906 
7907 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7908 				     addr);
7909 }
7910 
7911 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7912 			     const unsigned char *addr)
7913 {
7914 	struct hclge_dev *hdev = vport->back;
7915 	struct hclge_mac_vlan_tbl_entry_cmd req;
7916 	struct hclge_desc desc;
7917 	u16 egress_port = 0;
7918 	int ret;
7919 
7920 	/* mac addr check */
7921 	if (is_zero_ether_addr(addr) ||
7922 	    is_broadcast_ether_addr(addr) ||
7923 	    is_multicast_ether_addr(addr)) {
7924 		dev_err(&hdev->pdev->dev,
7925 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7926 			 addr, is_zero_ether_addr(addr),
7927 			 is_broadcast_ether_addr(addr),
7928 			 is_multicast_ether_addr(addr));
7929 		return -EINVAL;
7930 	}
7931 
7932 	memset(&req, 0, sizeof(req));
7933 
7934 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7935 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7936 
7937 	req.egress_port = cpu_to_le16(egress_port);
7938 
7939 	hclge_prepare_mac_addr(&req, addr, false);
7940 
7941 	/* Lookup the mac address in the mac_vlan table, and add
7942 	 * it if the entry is inexistent. Repeated unicast entry
7943 	 * is not allowed in the mac vlan table.
7944 	 */
7945 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7946 	if (ret == -ENOENT) {
7947 		mutex_lock(&hdev->vport_lock);
7948 		if (!hclge_is_umv_space_full(vport, false)) {
7949 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7950 			if (!ret)
7951 				hclge_update_umv_space(vport, false);
7952 			mutex_unlock(&hdev->vport_lock);
7953 			return ret;
7954 		}
7955 		mutex_unlock(&hdev->vport_lock);
7956 
7957 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7958 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7959 				hdev->priv_umv_size);
7960 
7961 		return -ENOSPC;
7962 	}
7963 
7964 	/* check if we just hit the duplicate */
7965 	if (!ret) {
7966 		dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7967 			 vport->vport_id, addr);
7968 		return 0;
7969 	}
7970 
7971 	dev_err(&hdev->pdev->dev,
7972 		"PF failed to add unicast entry(%pM) in the MAC table\n",
7973 		addr);
7974 
7975 	return ret;
7976 }
7977 
7978 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7979 			    const unsigned char *addr)
7980 {
7981 	struct hclge_vport *vport = hclge_get_vport(handle);
7982 
7983 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7984 				     addr);
7985 }
7986 
7987 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7988 			    const unsigned char *addr)
7989 {
7990 	struct hclge_dev *hdev = vport->back;
7991 	struct hclge_mac_vlan_tbl_entry_cmd req;
7992 	int ret;
7993 
7994 	/* mac addr check */
7995 	if (is_zero_ether_addr(addr) ||
7996 	    is_broadcast_ether_addr(addr) ||
7997 	    is_multicast_ether_addr(addr)) {
7998 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7999 			addr);
8000 		return -EINVAL;
8001 	}
8002 
8003 	memset(&req, 0, sizeof(req));
8004 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8005 	hclge_prepare_mac_addr(&req, addr, false);
8006 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
8007 	if (!ret) {
8008 		mutex_lock(&hdev->vport_lock);
8009 		hclge_update_umv_space(vport, true);
8010 		mutex_unlock(&hdev->vport_lock);
8011 	} else if (ret == -ENOENT) {
8012 		ret = 0;
8013 	}
8014 
8015 	return ret;
8016 }
8017 
8018 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8019 			     const unsigned char *addr)
8020 {
8021 	struct hclge_vport *vport = hclge_get_vport(handle);
8022 
8023 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8024 				     addr);
8025 }
8026 
8027 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8028 			     const unsigned char *addr)
8029 {
8030 	struct hclge_dev *hdev = vport->back;
8031 	struct hclge_mac_vlan_tbl_entry_cmd req;
8032 	struct hclge_desc desc[3];
8033 	int status;
8034 
8035 	/* mac addr check */
8036 	if (!is_multicast_ether_addr(addr)) {
8037 		dev_err(&hdev->pdev->dev,
8038 			"Add mc mac err! invalid mac:%pM.\n",
8039 			 addr);
8040 		return -EINVAL;
8041 	}
8042 	memset(&req, 0, sizeof(req));
8043 	hclge_prepare_mac_addr(&req, addr, true);
8044 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8045 	if (status) {
8046 		/* This mac addr do not exist, add new entry for it */
8047 		memset(desc[0].data, 0, sizeof(desc[0].data));
8048 		memset(desc[1].data, 0, sizeof(desc[0].data));
8049 		memset(desc[2].data, 0, sizeof(desc[0].data));
8050 	}
8051 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8052 	if (status)
8053 		return status;
8054 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8055 
8056 	/* if already overflow, not to print each time */
8057 	if (status == -ENOSPC &&
8058 	    !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8059 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8060 
8061 	return status;
8062 }
8063 
8064 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8065 			    const unsigned char *addr)
8066 {
8067 	struct hclge_vport *vport = hclge_get_vport(handle);
8068 
8069 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8070 				     addr);
8071 }
8072 
8073 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8074 			    const unsigned char *addr)
8075 {
8076 	struct hclge_dev *hdev = vport->back;
8077 	struct hclge_mac_vlan_tbl_entry_cmd req;
8078 	enum hclge_cmd_status status;
8079 	struct hclge_desc desc[3];
8080 
8081 	/* mac addr check */
8082 	if (!is_multicast_ether_addr(addr)) {
8083 		dev_dbg(&hdev->pdev->dev,
8084 			"Remove mc mac err! invalid mac:%pM.\n",
8085 			 addr);
8086 		return -EINVAL;
8087 	}
8088 
8089 	memset(&req, 0, sizeof(req));
8090 	hclge_prepare_mac_addr(&req, addr, true);
8091 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8092 	if (!status) {
8093 		/* This mac addr exist, remove this handle's VFID for it */
8094 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8095 		if (status)
8096 			return status;
8097 
8098 		if (hclge_is_all_function_id_zero(desc))
8099 			/* All the vfid is zero, so need to delete this entry */
8100 			status = hclge_remove_mac_vlan_tbl(vport, &req);
8101 		else
8102 			/* Not all the vfid is zero, update the vfid */
8103 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8104 
8105 	} else if (status == -ENOENT) {
8106 		status = 0;
8107 	}
8108 
8109 	return status;
8110 }
8111 
8112 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8113 				      struct list_head *list,
8114 				      int (*sync)(struct hclge_vport *,
8115 						  const unsigned char *))
8116 {
8117 	struct hclge_mac_node *mac_node, *tmp;
8118 	int ret;
8119 
8120 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8121 		ret = sync(vport, mac_node->mac_addr);
8122 		if (!ret) {
8123 			mac_node->state = HCLGE_MAC_ACTIVE;
8124 		} else {
8125 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8126 				&vport->state);
8127 			break;
8128 		}
8129 	}
8130 }
8131 
8132 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8133 					struct list_head *list,
8134 					int (*unsync)(struct hclge_vport *,
8135 						      const unsigned char *))
8136 {
8137 	struct hclge_mac_node *mac_node, *tmp;
8138 	int ret;
8139 
8140 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8141 		ret = unsync(vport, mac_node->mac_addr);
8142 		if (!ret || ret == -ENOENT) {
8143 			list_del(&mac_node->node);
8144 			kfree(mac_node);
8145 		} else {
8146 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8147 				&vport->state);
8148 			break;
8149 		}
8150 	}
8151 }
8152 
8153 static bool hclge_sync_from_add_list(struct list_head *add_list,
8154 				     struct list_head *mac_list)
8155 {
8156 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8157 	bool all_added = true;
8158 
8159 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8160 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8161 			all_added = false;
8162 
8163 		/* if the mac address from tmp_add_list is not in the
8164 		 * uc/mc_mac_list, it means have received a TO_DEL request
8165 		 * during the time window of adding the mac address into mac
8166 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8167 		 * then it will be removed at next time. else it must be TO_ADD,
8168 		 * this address hasn't been added into mac table,
8169 		 * so just remove the mac node.
8170 		 */
8171 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8172 		if (new_node) {
8173 			hclge_update_mac_node(new_node, mac_node->state);
8174 			list_del(&mac_node->node);
8175 			kfree(mac_node);
8176 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8177 			mac_node->state = HCLGE_MAC_TO_DEL;
8178 			list_del(&mac_node->node);
8179 			list_add_tail(&mac_node->node, mac_list);
8180 		} else {
8181 			list_del(&mac_node->node);
8182 			kfree(mac_node);
8183 		}
8184 	}
8185 
8186 	return all_added;
8187 }
8188 
8189 static void hclge_sync_from_del_list(struct list_head *del_list,
8190 				     struct list_head *mac_list)
8191 {
8192 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8193 
8194 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8195 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8196 		if (new_node) {
8197 			/* If the mac addr exists in the mac list, it means
8198 			 * received a new TO_ADD request during the time window
8199 			 * of configuring the mac address. For the mac node
8200 			 * state is TO_ADD, and the address is already in the
8201 			 * in the hardware(due to delete fail), so we just need
8202 			 * to change the mac node state to ACTIVE.
8203 			 */
8204 			new_node->state = HCLGE_MAC_ACTIVE;
8205 			list_del(&mac_node->node);
8206 			kfree(mac_node);
8207 		} else {
8208 			list_del(&mac_node->node);
8209 			list_add_tail(&mac_node->node, mac_list);
8210 		}
8211 	}
8212 }
8213 
8214 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8215 					enum HCLGE_MAC_ADDR_TYPE mac_type,
8216 					bool is_all_added)
8217 {
8218 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8219 		if (is_all_added)
8220 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8221 		else
8222 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8223 	} else {
8224 		if (is_all_added)
8225 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8226 		else
8227 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8228 	}
8229 }
8230 
8231 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8232 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
8233 {
8234 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8235 	struct list_head tmp_add_list, tmp_del_list;
8236 	struct list_head *list;
8237 	bool all_added;
8238 
8239 	INIT_LIST_HEAD(&tmp_add_list);
8240 	INIT_LIST_HEAD(&tmp_del_list);
8241 
8242 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
8243 	 * we can add/delete these mac addr outside the spin lock
8244 	 */
8245 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8246 		&vport->uc_mac_list : &vport->mc_mac_list;
8247 
8248 	spin_lock_bh(&vport->mac_list_lock);
8249 
8250 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8251 		switch (mac_node->state) {
8252 		case HCLGE_MAC_TO_DEL:
8253 			list_del(&mac_node->node);
8254 			list_add_tail(&mac_node->node, &tmp_del_list);
8255 			break;
8256 		case HCLGE_MAC_TO_ADD:
8257 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8258 			if (!new_node)
8259 				goto stop_traverse;
8260 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8261 			new_node->state = mac_node->state;
8262 			list_add_tail(&new_node->node, &tmp_add_list);
8263 			break;
8264 		default:
8265 			break;
8266 		}
8267 	}
8268 
8269 stop_traverse:
8270 	spin_unlock_bh(&vport->mac_list_lock);
8271 
8272 	/* delete first, in order to get max mac table space for adding */
8273 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8274 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8275 					    hclge_rm_uc_addr_common);
8276 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8277 					  hclge_add_uc_addr_common);
8278 	} else {
8279 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8280 					    hclge_rm_mc_addr_common);
8281 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8282 					  hclge_add_mc_addr_common);
8283 	}
8284 
8285 	/* if some mac addresses were added/deleted fail, move back to the
8286 	 * mac_list, and retry at next time.
8287 	 */
8288 	spin_lock_bh(&vport->mac_list_lock);
8289 
8290 	hclge_sync_from_del_list(&tmp_del_list, list);
8291 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8292 
8293 	spin_unlock_bh(&vport->mac_list_lock);
8294 
8295 	hclge_update_overflow_flags(vport, mac_type, all_added);
8296 }
8297 
8298 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8299 {
8300 	struct hclge_dev *hdev = vport->back;
8301 
8302 	if (test_bit(vport->vport_id, hdev->vport_config_block))
8303 		return false;
8304 
8305 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8306 		return true;
8307 
8308 	return false;
8309 }
8310 
8311 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8312 {
8313 	int i;
8314 
8315 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8316 		struct hclge_vport *vport = &hdev->vport[i];
8317 
8318 		if (!hclge_need_sync_mac_table(vport))
8319 			continue;
8320 
8321 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8322 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8323 	}
8324 }
8325 
8326 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8327 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
8328 {
8329 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8330 	struct hclge_mac_node *mac_cfg, *tmp;
8331 	struct hclge_dev *hdev = vport->back;
8332 	struct list_head tmp_del_list, *list;
8333 	int ret;
8334 
8335 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8336 		list = &vport->uc_mac_list;
8337 		unsync = hclge_rm_uc_addr_common;
8338 	} else {
8339 		list = &vport->mc_mac_list;
8340 		unsync = hclge_rm_mc_addr_common;
8341 	}
8342 
8343 	INIT_LIST_HEAD(&tmp_del_list);
8344 
8345 	if (!is_del_list)
8346 		set_bit(vport->vport_id, hdev->vport_config_block);
8347 
8348 	spin_lock_bh(&vport->mac_list_lock);
8349 
8350 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8351 		switch (mac_cfg->state) {
8352 		case HCLGE_MAC_TO_DEL:
8353 		case HCLGE_MAC_ACTIVE:
8354 			list_del(&mac_cfg->node);
8355 			list_add_tail(&mac_cfg->node, &tmp_del_list);
8356 			break;
8357 		case HCLGE_MAC_TO_ADD:
8358 			if (is_del_list) {
8359 				list_del(&mac_cfg->node);
8360 				kfree(mac_cfg);
8361 			}
8362 			break;
8363 		}
8364 	}
8365 
8366 	spin_unlock_bh(&vport->mac_list_lock);
8367 
8368 	list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
8369 		ret = unsync(vport, mac_cfg->mac_addr);
8370 		if (!ret || ret == -ENOENT) {
8371 			/* clear all mac addr from hardware, but remain these
8372 			 * mac addr in the mac list, and restore them after
8373 			 * vf reset finished.
8374 			 */
8375 			if (!is_del_list &&
8376 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
8377 				mac_cfg->state = HCLGE_MAC_TO_ADD;
8378 			} else {
8379 				list_del(&mac_cfg->node);
8380 				kfree(mac_cfg);
8381 			}
8382 		} else if (is_del_list) {
8383 			mac_cfg->state = HCLGE_MAC_TO_DEL;
8384 		}
8385 	}
8386 
8387 	spin_lock_bh(&vport->mac_list_lock);
8388 
8389 	hclge_sync_from_del_list(&tmp_del_list, list);
8390 
8391 	spin_unlock_bh(&vport->mac_list_lock);
8392 }
8393 
8394 /* remove all mac address when uninitailize */
8395 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8396 					enum HCLGE_MAC_ADDR_TYPE mac_type)
8397 {
8398 	struct hclge_mac_node *mac_node, *tmp;
8399 	struct hclge_dev *hdev = vport->back;
8400 	struct list_head tmp_del_list, *list;
8401 
8402 	INIT_LIST_HEAD(&tmp_del_list);
8403 
8404 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8405 		&vport->uc_mac_list : &vport->mc_mac_list;
8406 
8407 	spin_lock_bh(&vport->mac_list_lock);
8408 
8409 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8410 		switch (mac_node->state) {
8411 		case HCLGE_MAC_TO_DEL:
8412 		case HCLGE_MAC_ACTIVE:
8413 			list_del(&mac_node->node);
8414 			list_add_tail(&mac_node->node, &tmp_del_list);
8415 			break;
8416 		case HCLGE_MAC_TO_ADD:
8417 			list_del(&mac_node->node);
8418 			kfree(mac_node);
8419 			break;
8420 		}
8421 	}
8422 
8423 	spin_unlock_bh(&vport->mac_list_lock);
8424 
8425 	if (mac_type == HCLGE_MAC_ADDR_UC)
8426 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8427 					    hclge_rm_uc_addr_common);
8428 	else
8429 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8430 					    hclge_rm_mc_addr_common);
8431 
8432 	if (!list_empty(&tmp_del_list))
8433 		dev_warn(&hdev->pdev->dev,
8434 			 "uninit %s mac list for vport %u not completely.\n",
8435 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8436 			 vport->vport_id);
8437 
8438 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8439 		list_del(&mac_node->node);
8440 		kfree(mac_node);
8441 	}
8442 }
8443 
8444 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8445 {
8446 	struct hclge_vport *vport;
8447 	int i;
8448 
8449 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8450 		vport = &hdev->vport[i];
8451 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8452 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8453 	}
8454 }
8455 
8456 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8457 					      u16 cmdq_resp, u8 resp_code)
8458 {
8459 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
8460 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
8461 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
8462 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
8463 
8464 	int return_status;
8465 
8466 	if (cmdq_resp) {
8467 		dev_err(&hdev->pdev->dev,
8468 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8469 			cmdq_resp);
8470 		return -EIO;
8471 	}
8472 
8473 	switch (resp_code) {
8474 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
8475 	case HCLGE_ETHERTYPE_ALREADY_ADD:
8476 		return_status = 0;
8477 		break;
8478 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8479 		dev_err(&hdev->pdev->dev,
8480 			"add mac ethertype failed for manager table overflow.\n");
8481 		return_status = -EIO;
8482 		break;
8483 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
8484 		dev_err(&hdev->pdev->dev,
8485 			"add mac ethertype failed for key conflict.\n");
8486 		return_status = -EIO;
8487 		break;
8488 	default:
8489 		dev_err(&hdev->pdev->dev,
8490 			"add mac ethertype failed for undefined, code=%u.\n",
8491 			resp_code);
8492 		return_status = -EIO;
8493 	}
8494 
8495 	return return_status;
8496 }
8497 
8498 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
8499 				     u8 *mac_addr)
8500 {
8501 	struct hclge_mac_vlan_tbl_entry_cmd req;
8502 	struct hclge_dev *hdev = vport->back;
8503 	struct hclge_desc desc;
8504 	u16 egress_port = 0;
8505 	int i;
8506 
8507 	if (is_zero_ether_addr(mac_addr))
8508 		return false;
8509 
8510 	memset(&req, 0, sizeof(req));
8511 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8512 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8513 	req.egress_port = cpu_to_le16(egress_port);
8514 	hclge_prepare_mac_addr(&req, mac_addr, false);
8515 
8516 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8517 		return true;
8518 
8519 	vf_idx += HCLGE_VF_VPORT_START_NUM;
8520 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8521 		if (i != vf_idx &&
8522 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8523 			return true;
8524 
8525 	return false;
8526 }
8527 
8528 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8529 			    u8 *mac_addr)
8530 {
8531 	struct hclge_vport *vport = hclge_get_vport(handle);
8532 	struct hclge_dev *hdev = vport->back;
8533 
8534 	vport = hclge_get_vf_vport(hdev, vf);
8535 	if (!vport)
8536 		return -EINVAL;
8537 
8538 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8539 		dev_info(&hdev->pdev->dev,
8540 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
8541 			 mac_addr);
8542 		return 0;
8543 	}
8544 
8545 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8546 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8547 			mac_addr);
8548 		return -EEXIST;
8549 	}
8550 
8551 	ether_addr_copy(vport->vf_info.mac, mac_addr);
8552 
8553 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8554 		dev_info(&hdev->pdev->dev,
8555 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8556 			 vf, mac_addr);
8557 		return hclge_inform_reset_assert_to_vf(vport);
8558 	}
8559 
8560 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8561 		 vf, mac_addr);
8562 	return 0;
8563 }
8564 
8565 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8566 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
8567 {
8568 	struct hclge_desc desc;
8569 	u8 resp_code;
8570 	u16 retval;
8571 	int ret;
8572 
8573 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8574 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8575 
8576 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8577 	if (ret) {
8578 		dev_err(&hdev->pdev->dev,
8579 			"add mac ethertype failed for cmd_send, ret =%d.\n",
8580 			ret);
8581 		return ret;
8582 	}
8583 
8584 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8585 	retval = le16_to_cpu(desc.retval);
8586 
8587 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8588 }
8589 
8590 static int init_mgr_tbl(struct hclge_dev *hdev)
8591 {
8592 	int ret;
8593 	int i;
8594 
8595 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8596 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8597 		if (ret) {
8598 			dev_err(&hdev->pdev->dev,
8599 				"add mac ethertype failed, ret =%d.\n",
8600 				ret);
8601 			return ret;
8602 		}
8603 	}
8604 
8605 	return 0;
8606 }
8607 
8608 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8609 {
8610 	struct hclge_vport *vport = hclge_get_vport(handle);
8611 	struct hclge_dev *hdev = vport->back;
8612 
8613 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
8614 }
8615 
8616 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8617 				       const u8 *old_addr, const u8 *new_addr)
8618 {
8619 	struct list_head *list = &vport->uc_mac_list;
8620 	struct hclge_mac_node *old_node, *new_node;
8621 
8622 	new_node = hclge_find_mac_node(list, new_addr);
8623 	if (!new_node) {
8624 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8625 		if (!new_node)
8626 			return -ENOMEM;
8627 
8628 		new_node->state = HCLGE_MAC_TO_ADD;
8629 		ether_addr_copy(new_node->mac_addr, new_addr);
8630 		list_add(&new_node->node, list);
8631 	} else {
8632 		if (new_node->state == HCLGE_MAC_TO_DEL)
8633 			new_node->state = HCLGE_MAC_ACTIVE;
8634 
8635 		/* make sure the new addr is in the list head, avoid dev
8636 		 * addr may be not re-added into mac table for the umv space
8637 		 * limitation after global/imp reset which will clear mac
8638 		 * table by hardware.
8639 		 */
8640 		list_move(&new_node->node, list);
8641 	}
8642 
8643 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8644 		old_node = hclge_find_mac_node(list, old_addr);
8645 		if (old_node) {
8646 			if (old_node->state == HCLGE_MAC_TO_ADD) {
8647 				list_del(&old_node->node);
8648 				kfree(old_node);
8649 			} else {
8650 				old_node->state = HCLGE_MAC_TO_DEL;
8651 			}
8652 		}
8653 	}
8654 
8655 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8656 
8657 	return 0;
8658 }
8659 
8660 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8661 			      bool is_first)
8662 {
8663 	const unsigned char *new_addr = (const unsigned char *)p;
8664 	struct hclge_vport *vport = hclge_get_vport(handle);
8665 	struct hclge_dev *hdev = vport->back;
8666 	unsigned char *old_addr = NULL;
8667 	int ret;
8668 
8669 	/* mac addr check */
8670 	if (is_zero_ether_addr(new_addr) ||
8671 	    is_broadcast_ether_addr(new_addr) ||
8672 	    is_multicast_ether_addr(new_addr)) {
8673 		dev_err(&hdev->pdev->dev,
8674 			"change uc mac err! invalid mac: %pM.\n",
8675 			 new_addr);
8676 		return -EINVAL;
8677 	}
8678 
8679 	ret = hclge_pause_addr_cfg(hdev, new_addr);
8680 	if (ret) {
8681 		dev_err(&hdev->pdev->dev,
8682 			"failed to configure mac pause address, ret = %d\n",
8683 			ret);
8684 		return ret;
8685 	}
8686 
8687 	if (!is_first)
8688 		old_addr = hdev->hw.mac.mac_addr;
8689 
8690 	spin_lock_bh(&vport->mac_list_lock);
8691 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8692 	if (ret) {
8693 		dev_err(&hdev->pdev->dev,
8694 			"failed to change the mac addr:%pM, ret = %d\n",
8695 			new_addr, ret);
8696 		spin_unlock_bh(&vport->mac_list_lock);
8697 
8698 		if (!is_first)
8699 			hclge_pause_addr_cfg(hdev, old_addr);
8700 
8701 		return ret;
8702 	}
8703 	/* we must update dev addr with spin lock protect, preventing dev addr
8704 	 * being removed by set_rx_mode path.
8705 	 */
8706 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8707 	spin_unlock_bh(&vport->mac_list_lock);
8708 
8709 	hclge_task_schedule(hdev, 0);
8710 
8711 	return 0;
8712 }
8713 
8714 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8715 			  int cmd)
8716 {
8717 	struct hclge_vport *vport = hclge_get_vport(handle);
8718 	struct hclge_dev *hdev = vport->back;
8719 
8720 	if (!hdev->hw.mac.phydev)
8721 		return -EOPNOTSUPP;
8722 
8723 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8724 }
8725 
8726 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8727 				      u8 fe_type, bool filter_en, u8 vf_id)
8728 {
8729 	struct hclge_vlan_filter_ctrl_cmd *req;
8730 	struct hclge_desc desc;
8731 	int ret;
8732 
8733 	/* read current vlan filter parameter */
8734 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8735 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8736 	req->vlan_type = vlan_type;
8737 	req->vf_id = vf_id;
8738 
8739 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8740 	if (ret) {
8741 		dev_err(&hdev->pdev->dev,
8742 			"failed to get vlan filter config, ret = %d.\n", ret);
8743 		return ret;
8744 	}
8745 
8746 	/* modify and write new config parameter */
8747 	hclge_cmd_reuse_desc(&desc, false);
8748 	req->vlan_fe = filter_en ?
8749 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8750 
8751 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8752 	if (ret)
8753 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8754 			ret);
8755 
8756 	return ret;
8757 }
8758 
8759 #define HCLGE_FILTER_TYPE_VF		0
8760 #define HCLGE_FILTER_TYPE_PORT		1
8761 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
8762 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
8763 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
8764 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
8765 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
8766 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
8767 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
8768 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
8769 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
8770 
8771 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8772 {
8773 	struct hclge_vport *vport = hclge_get_vport(handle);
8774 	struct hclge_dev *hdev = vport->back;
8775 
8776 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8777 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8778 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
8779 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8780 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
8781 	} else {
8782 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8783 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8784 					   0);
8785 	}
8786 	if (enable)
8787 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
8788 	else
8789 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8790 }
8791 
8792 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8793 				    bool is_kill, u16 vlan,
8794 				    __be16 proto)
8795 {
8796 	struct hclge_vport *vport = &hdev->vport[vfid];
8797 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
8798 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
8799 	struct hclge_desc desc[2];
8800 	u8 vf_byte_val;
8801 	u8 vf_byte_off;
8802 	int ret;
8803 
8804 	/* if vf vlan table is full, firmware will close vf vlan filter, it
8805 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
8806 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
8807 	 * new vlan, because tx packets with these vlan id will be dropped.
8808 	 */
8809 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8810 		if (vport->vf_info.spoofchk && vlan) {
8811 			dev_err(&hdev->pdev->dev,
8812 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
8813 			return -EPERM;
8814 		}
8815 		return 0;
8816 	}
8817 
8818 	hclge_cmd_setup_basic_desc(&desc[0],
8819 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8820 	hclge_cmd_setup_basic_desc(&desc[1],
8821 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8822 
8823 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8824 
8825 	vf_byte_off = vfid / 8;
8826 	vf_byte_val = 1 << (vfid % 8);
8827 
8828 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8829 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8830 
8831 	req0->vlan_id  = cpu_to_le16(vlan);
8832 	req0->vlan_cfg = is_kill;
8833 
8834 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8835 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8836 	else
8837 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8838 
8839 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
8840 	if (ret) {
8841 		dev_err(&hdev->pdev->dev,
8842 			"Send vf vlan command fail, ret =%d.\n",
8843 			ret);
8844 		return ret;
8845 	}
8846 
8847 	if (!is_kill) {
8848 #define HCLGE_VF_VLAN_NO_ENTRY	2
8849 		if (!req0->resp_code || req0->resp_code == 1)
8850 			return 0;
8851 
8852 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8853 			set_bit(vfid, hdev->vf_vlan_full);
8854 			dev_warn(&hdev->pdev->dev,
8855 				 "vf vlan table is full, vf vlan filter is disabled\n");
8856 			return 0;
8857 		}
8858 
8859 		dev_err(&hdev->pdev->dev,
8860 			"Add vf vlan filter fail, ret =%u.\n",
8861 			req0->resp_code);
8862 	} else {
8863 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
8864 		if (!req0->resp_code)
8865 			return 0;
8866 
8867 		/* vf vlan filter is disabled when vf vlan table is full,
8868 		 * then new vlan id will not be added into vf vlan table.
8869 		 * Just return 0 without warning, avoid massive verbose
8870 		 * print logs when unload.
8871 		 */
8872 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8873 			return 0;
8874 
8875 		dev_err(&hdev->pdev->dev,
8876 			"Kill vf vlan filter fail, ret =%u.\n",
8877 			req0->resp_code);
8878 	}
8879 
8880 	return -EIO;
8881 }
8882 
8883 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8884 				      u16 vlan_id, bool is_kill)
8885 {
8886 	struct hclge_vlan_filter_pf_cfg_cmd *req;
8887 	struct hclge_desc desc;
8888 	u8 vlan_offset_byte_val;
8889 	u8 vlan_offset_byte;
8890 	u8 vlan_offset_160;
8891 	int ret;
8892 
8893 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8894 
8895 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8896 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8897 			   HCLGE_VLAN_BYTE_SIZE;
8898 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8899 
8900 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8901 	req->vlan_offset = vlan_offset_160;
8902 	req->vlan_cfg = is_kill;
8903 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8904 
8905 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8906 	if (ret)
8907 		dev_err(&hdev->pdev->dev,
8908 			"port vlan command, send fail, ret =%d.\n", ret);
8909 	return ret;
8910 }
8911 
8912 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8913 				    u16 vport_id, u16 vlan_id,
8914 				    bool is_kill)
8915 {
8916 	u16 vport_idx, vport_num = 0;
8917 	int ret;
8918 
8919 	if (is_kill && !vlan_id)
8920 		return 0;
8921 
8922 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8923 				       proto);
8924 	if (ret) {
8925 		dev_err(&hdev->pdev->dev,
8926 			"Set %u vport vlan filter config fail, ret =%d.\n",
8927 			vport_id, ret);
8928 		return ret;
8929 	}
8930 
8931 	/* vlan 0 may be added twice when 8021q module is enabled */
8932 	if (!is_kill && !vlan_id &&
8933 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
8934 		return 0;
8935 
8936 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
8937 		dev_err(&hdev->pdev->dev,
8938 			"Add port vlan failed, vport %u is already in vlan %u\n",
8939 			vport_id, vlan_id);
8940 		return -EINVAL;
8941 	}
8942 
8943 	if (is_kill &&
8944 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8945 		dev_err(&hdev->pdev->dev,
8946 			"Delete port vlan failed, vport %u is not in vlan %u\n",
8947 			vport_id, vlan_id);
8948 		return -EINVAL;
8949 	}
8950 
8951 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
8952 		vport_num++;
8953 
8954 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8955 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8956 						 is_kill);
8957 
8958 	return ret;
8959 }
8960 
8961 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8962 {
8963 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8964 	struct hclge_vport_vtag_tx_cfg_cmd *req;
8965 	struct hclge_dev *hdev = vport->back;
8966 	struct hclge_desc desc;
8967 	u16 bmap_index;
8968 	int status;
8969 
8970 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8971 
8972 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8973 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8974 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8975 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8976 		      vcfg->accept_tag1 ? 1 : 0);
8977 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8978 		      vcfg->accept_untag1 ? 1 : 0);
8979 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8980 		      vcfg->accept_tag2 ? 1 : 0);
8981 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8982 		      vcfg->accept_untag2 ? 1 : 0);
8983 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8984 		      vcfg->insert_tag1_en ? 1 : 0);
8985 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8986 		      vcfg->insert_tag2_en ? 1 : 0);
8987 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
8988 		      vcfg->tag_shift_mode_en ? 1 : 0);
8989 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8990 
8991 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8992 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8993 			HCLGE_VF_NUM_PER_BYTE;
8994 	req->vf_bitmap[bmap_index] =
8995 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8996 
8997 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8998 	if (status)
8999 		dev_err(&hdev->pdev->dev,
9000 			"Send port txvlan cfg command fail, ret =%d\n",
9001 			status);
9002 
9003 	return status;
9004 }
9005 
9006 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9007 {
9008 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9009 	struct hclge_vport_vtag_rx_cfg_cmd *req;
9010 	struct hclge_dev *hdev = vport->back;
9011 	struct hclge_desc desc;
9012 	u16 bmap_index;
9013 	int status;
9014 
9015 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9016 
9017 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9018 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9019 		      vcfg->strip_tag1_en ? 1 : 0);
9020 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9021 		      vcfg->strip_tag2_en ? 1 : 0);
9022 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9023 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
9024 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9025 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
9026 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9027 		      vcfg->strip_tag1_discard_en ? 1 : 0);
9028 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9029 		      vcfg->strip_tag2_discard_en ? 1 : 0);
9030 
9031 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9032 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9033 			HCLGE_VF_NUM_PER_BYTE;
9034 	req->vf_bitmap[bmap_index] =
9035 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9036 
9037 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9038 	if (status)
9039 		dev_err(&hdev->pdev->dev,
9040 			"Send port rxvlan cfg command fail, ret =%d\n",
9041 			status);
9042 
9043 	return status;
9044 }
9045 
9046 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9047 				  u16 port_base_vlan_state,
9048 				  u16 vlan_tag)
9049 {
9050 	int ret;
9051 
9052 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9053 		vport->txvlan_cfg.accept_tag1 = true;
9054 		vport->txvlan_cfg.insert_tag1_en = false;
9055 		vport->txvlan_cfg.default_tag1 = 0;
9056 	} else {
9057 		struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9058 
9059 		vport->txvlan_cfg.accept_tag1 =
9060 			ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9061 		vport->txvlan_cfg.insert_tag1_en = true;
9062 		vport->txvlan_cfg.default_tag1 = vlan_tag;
9063 	}
9064 
9065 	vport->txvlan_cfg.accept_untag1 = true;
9066 
9067 	/* accept_tag2 and accept_untag2 are not supported on
9068 	 * pdev revision(0x20), new revision support them,
9069 	 * this two fields can not be configured by user.
9070 	 */
9071 	vport->txvlan_cfg.accept_tag2 = true;
9072 	vport->txvlan_cfg.accept_untag2 = true;
9073 	vport->txvlan_cfg.insert_tag2_en = false;
9074 	vport->txvlan_cfg.default_tag2 = 0;
9075 	vport->txvlan_cfg.tag_shift_mode_en = true;
9076 
9077 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9078 		vport->rxvlan_cfg.strip_tag1_en = false;
9079 		vport->rxvlan_cfg.strip_tag2_en =
9080 				vport->rxvlan_cfg.rx_vlan_offload_en;
9081 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
9082 	} else {
9083 		vport->rxvlan_cfg.strip_tag1_en =
9084 				vport->rxvlan_cfg.rx_vlan_offload_en;
9085 		vport->rxvlan_cfg.strip_tag2_en = true;
9086 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
9087 	}
9088 
9089 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
9090 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9091 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9092 
9093 	ret = hclge_set_vlan_tx_offload_cfg(vport);
9094 	if (ret)
9095 		return ret;
9096 
9097 	return hclge_set_vlan_rx_offload_cfg(vport);
9098 }
9099 
9100 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9101 {
9102 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9103 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9104 	struct hclge_desc desc;
9105 	int status;
9106 
9107 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9108 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9109 	rx_req->ot_fst_vlan_type =
9110 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9111 	rx_req->ot_sec_vlan_type =
9112 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9113 	rx_req->in_fst_vlan_type =
9114 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9115 	rx_req->in_sec_vlan_type =
9116 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9117 
9118 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9119 	if (status) {
9120 		dev_err(&hdev->pdev->dev,
9121 			"Send rxvlan protocol type command fail, ret =%d\n",
9122 			status);
9123 		return status;
9124 	}
9125 
9126 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9127 
9128 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9129 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9130 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9131 
9132 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9133 	if (status)
9134 		dev_err(&hdev->pdev->dev,
9135 			"Send txvlan protocol type command fail, ret =%d\n",
9136 			status);
9137 
9138 	return status;
9139 }
9140 
9141 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9142 {
9143 #define HCLGE_DEF_VLAN_TYPE		0x8100
9144 
9145 	struct hnae3_handle *handle = &hdev->vport[0].nic;
9146 	struct hclge_vport *vport;
9147 	int ret;
9148 	int i;
9149 
9150 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
9151 		/* for revision 0x21, vf vlan filter is per function */
9152 		for (i = 0; i < hdev->num_alloc_vport; i++) {
9153 			vport = &hdev->vport[i];
9154 			ret = hclge_set_vlan_filter_ctrl(hdev,
9155 							 HCLGE_FILTER_TYPE_VF,
9156 							 HCLGE_FILTER_FE_EGRESS,
9157 							 true,
9158 							 vport->vport_id);
9159 			if (ret)
9160 				return ret;
9161 		}
9162 
9163 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9164 						 HCLGE_FILTER_FE_INGRESS, true,
9165 						 0);
9166 		if (ret)
9167 			return ret;
9168 	} else {
9169 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9170 						 HCLGE_FILTER_FE_EGRESS_V1_B,
9171 						 true, 0);
9172 		if (ret)
9173 			return ret;
9174 	}
9175 
9176 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
9177 
9178 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9179 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9180 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
9181 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
9182 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
9183 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
9184 
9185 	ret = hclge_set_vlan_protocol_type(hdev);
9186 	if (ret)
9187 		return ret;
9188 
9189 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9190 		u16 vlan_tag;
9191 
9192 		vport = &hdev->vport[i];
9193 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9194 
9195 		ret = hclge_vlan_offload_cfg(vport,
9196 					     vport->port_base_vlan_cfg.state,
9197 					     vlan_tag);
9198 		if (ret)
9199 			return ret;
9200 	}
9201 
9202 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9203 }
9204 
9205 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9206 				       bool writen_to_tbl)
9207 {
9208 	struct hclge_vport_vlan_cfg *vlan;
9209 
9210 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9211 	if (!vlan)
9212 		return;
9213 
9214 	vlan->hd_tbl_status = writen_to_tbl;
9215 	vlan->vlan_id = vlan_id;
9216 
9217 	list_add_tail(&vlan->node, &vport->vlan_list);
9218 }
9219 
9220 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9221 {
9222 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9223 	struct hclge_dev *hdev = vport->back;
9224 	int ret;
9225 
9226 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9227 		if (!vlan->hd_tbl_status) {
9228 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9229 						       vport->vport_id,
9230 						       vlan->vlan_id, false);
9231 			if (ret) {
9232 				dev_err(&hdev->pdev->dev,
9233 					"restore vport vlan list failed, ret=%d\n",
9234 					ret);
9235 				return ret;
9236 			}
9237 		}
9238 		vlan->hd_tbl_status = true;
9239 	}
9240 
9241 	return 0;
9242 }
9243 
9244 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9245 				      bool is_write_tbl)
9246 {
9247 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9248 	struct hclge_dev *hdev = vport->back;
9249 
9250 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9251 		if (vlan->vlan_id == vlan_id) {
9252 			if (is_write_tbl && vlan->hd_tbl_status)
9253 				hclge_set_vlan_filter_hw(hdev,
9254 							 htons(ETH_P_8021Q),
9255 							 vport->vport_id,
9256 							 vlan_id,
9257 							 true);
9258 
9259 			list_del(&vlan->node);
9260 			kfree(vlan);
9261 			break;
9262 		}
9263 	}
9264 }
9265 
9266 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9267 {
9268 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9269 	struct hclge_dev *hdev = vport->back;
9270 
9271 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9272 		if (vlan->hd_tbl_status)
9273 			hclge_set_vlan_filter_hw(hdev,
9274 						 htons(ETH_P_8021Q),
9275 						 vport->vport_id,
9276 						 vlan->vlan_id,
9277 						 true);
9278 
9279 		vlan->hd_tbl_status = false;
9280 		if (is_del_list) {
9281 			list_del(&vlan->node);
9282 			kfree(vlan);
9283 		}
9284 	}
9285 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
9286 }
9287 
9288 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9289 {
9290 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9291 	struct hclge_vport *vport;
9292 	int i;
9293 
9294 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9295 		vport = &hdev->vport[i];
9296 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9297 			list_del(&vlan->node);
9298 			kfree(vlan);
9299 		}
9300 	}
9301 }
9302 
9303 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9304 {
9305 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9306 	struct hclge_dev *hdev = vport->back;
9307 	u16 vlan_proto;
9308 	u16 vlan_id;
9309 	u16 state;
9310 	int ret;
9311 
9312 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
9313 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
9314 	state = vport->port_base_vlan_cfg.state;
9315 
9316 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9317 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9318 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9319 					 vport->vport_id, vlan_id,
9320 					 false);
9321 		return;
9322 	}
9323 
9324 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9325 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9326 					       vport->vport_id,
9327 					       vlan->vlan_id, false);
9328 		if (ret)
9329 			break;
9330 		vlan->hd_tbl_status = true;
9331 	}
9332 }
9333 
9334 /* For global reset and imp reset, hardware will clear the mac table,
9335  * so we change the mac address state from ACTIVE to TO_ADD, then they
9336  * can be restored in the service task after reset complete. Furtherly,
9337  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
9338  * be restored after reset, so just remove these mac nodes from mac_list.
9339  */
9340 static void hclge_mac_node_convert_for_reset(struct list_head *list)
9341 {
9342 	struct hclge_mac_node *mac_node, *tmp;
9343 
9344 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9345 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
9346 			mac_node->state = HCLGE_MAC_TO_ADD;
9347 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
9348 			list_del(&mac_node->node);
9349 			kfree(mac_node);
9350 		}
9351 	}
9352 }
9353 
9354 void hclge_restore_mac_table_common(struct hclge_vport *vport)
9355 {
9356 	spin_lock_bh(&vport->mac_list_lock);
9357 
9358 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
9359 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
9360 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9361 
9362 	spin_unlock_bh(&vport->mac_list_lock);
9363 }
9364 
9365 static void hclge_restore_hw_table(struct hclge_dev *hdev)
9366 {
9367 	struct hclge_vport *vport = &hdev->vport[0];
9368 	struct hnae3_handle *handle = &vport->nic;
9369 
9370 	hclge_restore_mac_table_common(vport);
9371 	hclge_restore_vport_vlan_table(vport);
9372 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
9373 
9374 	hclge_restore_fd_entries(handle);
9375 }
9376 
9377 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
9378 {
9379 	struct hclge_vport *vport = hclge_get_vport(handle);
9380 
9381 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9382 		vport->rxvlan_cfg.strip_tag1_en = false;
9383 		vport->rxvlan_cfg.strip_tag2_en = enable;
9384 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
9385 	} else {
9386 		vport->rxvlan_cfg.strip_tag1_en = enable;
9387 		vport->rxvlan_cfg.strip_tag2_en = true;
9388 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
9389 	}
9390 
9391 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
9392 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9393 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9394 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
9395 
9396 	return hclge_set_vlan_rx_offload_cfg(vport);
9397 }
9398 
9399 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
9400 					    u16 port_base_vlan_state,
9401 					    struct hclge_vlan_info *new_info,
9402 					    struct hclge_vlan_info *old_info)
9403 {
9404 	struct hclge_dev *hdev = vport->back;
9405 	int ret;
9406 
9407 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
9408 		hclge_rm_vport_all_vlan_table(vport, false);
9409 		return hclge_set_vlan_filter_hw(hdev,
9410 						 htons(new_info->vlan_proto),
9411 						 vport->vport_id,
9412 						 new_info->vlan_tag,
9413 						 false);
9414 	}
9415 
9416 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
9417 				       vport->vport_id, old_info->vlan_tag,
9418 				       true);
9419 	if (ret)
9420 		return ret;
9421 
9422 	return hclge_add_vport_all_vlan_table(vport);
9423 }
9424 
9425 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
9426 				    struct hclge_vlan_info *vlan_info)
9427 {
9428 	struct hnae3_handle *nic = &vport->nic;
9429 	struct hclge_vlan_info *old_vlan_info;
9430 	struct hclge_dev *hdev = vport->back;
9431 	int ret;
9432 
9433 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9434 
9435 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
9436 	if (ret)
9437 		return ret;
9438 
9439 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
9440 		/* add new VLAN tag */
9441 		ret = hclge_set_vlan_filter_hw(hdev,
9442 					       htons(vlan_info->vlan_proto),
9443 					       vport->vport_id,
9444 					       vlan_info->vlan_tag,
9445 					       false);
9446 		if (ret)
9447 			return ret;
9448 
9449 		/* remove old VLAN tag */
9450 		ret = hclge_set_vlan_filter_hw(hdev,
9451 					       htons(old_vlan_info->vlan_proto),
9452 					       vport->vport_id,
9453 					       old_vlan_info->vlan_tag,
9454 					       true);
9455 		if (ret)
9456 			return ret;
9457 
9458 		goto update;
9459 	}
9460 
9461 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
9462 					       old_vlan_info);
9463 	if (ret)
9464 		return ret;
9465 
9466 	/* update state only when disable/enable port based VLAN */
9467 	vport->port_base_vlan_cfg.state = state;
9468 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
9469 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
9470 	else
9471 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
9472 
9473 update:
9474 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
9475 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
9476 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
9477 
9478 	return 0;
9479 }
9480 
9481 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
9482 					  enum hnae3_port_base_vlan_state state,
9483 					  u16 vlan)
9484 {
9485 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9486 		if (!vlan)
9487 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9488 		else
9489 			return HNAE3_PORT_BASE_VLAN_ENABLE;
9490 	} else {
9491 		if (!vlan)
9492 			return HNAE3_PORT_BASE_VLAN_DISABLE;
9493 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
9494 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
9495 		else
9496 			return HNAE3_PORT_BASE_VLAN_MODIFY;
9497 	}
9498 }
9499 
9500 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
9501 				    u16 vlan, u8 qos, __be16 proto)
9502 {
9503 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
9504 	struct hclge_vport *vport = hclge_get_vport(handle);
9505 	struct hclge_dev *hdev = vport->back;
9506 	struct hclge_vlan_info vlan_info;
9507 	u16 state;
9508 	int ret;
9509 
9510 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9511 		return -EOPNOTSUPP;
9512 
9513 	vport = hclge_get_vf_vport(hdev, vfid);
9514 	if (!vport)
9515 		return -EINVAL;
9516 
9517 	/* qos is a 3 bits value, so can not be bigger than 7 */
9518 	if (vlan > VLAN_N_VID - 1 || qos > 7)
9519 		return -EINVAL;
9520 	if (proto != htons(ETH_P_8021Q))
9521 		return -EPROTONOSUPPORT;
9522 
9523 	state = hclge_get_port_base_vlan_state(vport,
9524 					       vport->port_base_vlan_cfg.state,
9525 					       vlan);
9526 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9527 		return 0;
9528 
9529 	vlan_info.vlan_tag = vlan;
9530 	vlan_info.qos = qos;
9531 	vlan_info.vlan_proto = ntohs(proto);
9532 
9533 	ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
9534 	if (ret) {
9535 		dev_err(&hdev->pdev->dev,
9536 			"failed to update port base vlan for vf %d, ret = %d\n",
9537 			vfid, ret);
9538 		return ret;
9539 	}
9540 
9541 	/* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
9542 	 * VLAN state.
9543 	 */
9544 	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
9545 	    test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
9546 		hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9547 						  vport->vport_id, state,
9548 						  vlan, qos,
9549 						  ntohs(proto));
9550 
9551 	return 0;
9552 }
9553 
9554 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9555 {
9556 	struct hclge_vlan_info *vlan_info;
9557 	struct hclge_vport *vport;
9558 	int ret;
9559 	int vf;
9560 
9561 	/* clear port base vlan for all vf */
9562 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9563 		vport = &hdev->vport[vf];
9564 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9565 
9566 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9567 					       vport->vport_id,
9568 					       vlan_info->vlan_tag, true);
9569 		if (ret)
9570 			dev_err(&hdev->pdev->dev,
9571 				"failed to clear vf vlan for vf%d, ret = %d\n",
9572 				vf - HCLGE_VF_VPORT_START_NUM, ret);
9573 	}
9574 }
9575 
9576 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9577 			  u16 vlan_id, bool is_kill)
9578 {
9579 	struct hclge_vport *vport = hclge_get_vport(handle);
9580 	struct hclge_dev *hdev = vport->back;
9581 	bool writen_to_tbl = false;
9582 	int ret = 0;
9583 
9584 	/* When device is resetting or reset failed, firmware is unable to
9585 	 * handle mailbox. Just record the vlan id, and remove it after
9586 	 * reset finished.
9587 	 */
9588 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9589 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
9590 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
9591 		return -EBUSY;
9592 	}
9593 
9594 	/* when port base vlan enabled, we use port base vlan as the vlan
9595 	 * filter entry. In this case, we don't update vlan filter table
9596 	 * when user add new vlan or remove exist vlan, just update the vport
9597 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
9598 	 * table until port base vlan disabled
9599 	 */
9600 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9601 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9602 					       vlan_id, is_kill);
9603 		writen_to_tbl = true;
9604 	}
9605 
9606 	if (!ret) {
9607 		if (is_kill)
9608 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
9609 		else
9610 			hclge_add_vport_vlan_table(vport, vlan_id,
9611 						   writen_to_tbl);
9612 	} else if (is_kill) {
9613 		/* when remove hw vlan filter failed, record the vlan id,
9614 		 * and try to remove it from hw later, to be consistence
9615 		 * with stack
9616 		 */
9617 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
9618 	}
9619 	return ret;
9620 }
9621 
9622 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9623 {
9624 #define HCLGE_MAX_SYNC_COUNT	60
9625 
9626 	int i, ret, sync_cnt = 0;
9627 	u16 vlan_id;
9628 
9629 	/* start from vport 1 for PF is always alive */
9630 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9631 		struct hclge_vport *vport = &hdev->vport[i];
9632 
9633 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9634 					 VLAN_N_VID);
9635 		while (vlan_id != VLAN_N_VID) {
9636 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9637 						       vport->vport_id, vlan_id,
9638 						       true);
9639 			if (ret && ret != -EINVAL)
9640 				return;
9641 
9642 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9643 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
9644 
9645 			sync_cnt++;
9646 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9647 				return;
9648 
9649 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9650 						 VLAN_N_VID);
9651 		}
9652 	}
9653 }
9654 
9655 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9656 {
9657 	struct hclge_config_max_frm_size_cmd *req;
9658 	struct hclge_desc desc;
9659 
9660 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9661 
9662 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9663 	req->max_frm_size = cpu_to_le16(new_mps);
9664 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9665 
9666 	return hclge_cmd_send(&hdev->hw, &desc, 1);
9667 }
9668 
9669 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9670 {
9671 	struct hclge_vport *vport = hclge_get_vport(handle);
9672 
9673 	return hclge_set_vport_mtu(vport, new_mtu);
9674 }
9675 
9676 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9677 {
9678 	struct hclge_dev *hdev = vport->back;
9679 	int i, max_frm_size, ret;
9680 
9681 	/* HW supprt 2 layer vlan */
9682 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9683 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9684 	    max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
9685 		return -EINVAL;
9686 
9687 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9688 	mutex_lock(&hdev->vport_lock);
9689 	/* VF's mps must fit within hdev->mps */
9690 	if (vport->vport_id && max_frm_size > hdev->mps) {
9691 		mutex_unlock(&hdev->vport_lock);
9692 		return -EINVAL;
9693 	} else if (vport->vport_id) {
9694 		vport->mps = max_frm_size;
9695 		mutex_unlock(&hdev->vport_lock);
9696 		return 0;
9697 	}
9698 
9699 	/* PF's mps must be greater then VF's mps */
9700 	for (i = 1; i < hdev->num_alloc_vport; i++)
9701 		if (max_frm_size < hdev->vport[i].mps) {
9702 			mutex_unlock(&hdev->vport_lock);
9703 			return -EINVAL;
9704 		}
9705 
9706 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9707 
9708 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
9709 	if (ret) {
9710 		dev_err(&hdev->pdev->dev,
9711 			"Change mtu fail, ret =%d\n", ret);
9712 		goto out;
9713 	}
9714 
9715 	hdev->mps = max_frm_size;
9716 	vport->mps = max_frm_size;
9717 
9718 	ret = hclge_buffer_alloc(hdev);
9719 	if (ret)
9720 		dev_err(&hdev->pdev->dev,
9721 			"Allocate buffer fail, ret =%d\n", ret);
9722 
9723 out:
9724 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9725 	mutex_unlock(&hdev->vport_lock);
9726 	return ret;
9727 }
9728 
9729 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9730 				    bool enable)
9731 {
9732 	struct hclge_reset_tqp_queue_cmd *req;
9733 	struct hclge_desc desc;
9734 	int ret;
9735 
9736 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9737 
9738 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9739 	req->tqp_id = cpu_to_le16(queue_id);
9740 	if (enable)
9741 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9742 
9743 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9744 	if (ret) {
9745 		dev_err(&hdev->pdev->dev,
9746 			"Send tqp reset cmd error, status =%d\n", ret);
9747 		return ret;
9748 	}
9749 
9750 	return 0;
9751 }
9752 
9753 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9754 {
9755 	struct hclge_reset_tqp_queue_cmd *req;
9756 	struct hclge_desc desc;
9757 	int ret;
9758 
9759 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9760 
9761 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9762 	req->tqp_id = cpu_to_le16(queue_id);
9763 
9764 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9765 	if (ret) {
9766 		dev_err(&hdev->pdev->dev,
9767 			"Get reset status error, status =%d\n", ret);
9768 		return ret;
9769 	}
9770 
9771 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9772 }
9773 
9774 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9775 {
9776 	struct hnae3_queue *queue;
9777 	struct hclge_tqp *tqp;
9778 
9779 	queue = handle->kinfo.tqp[queue_id];
9780 	tqp = container_of(queue, struct hclge_tqp, q);
9781 
9782 	return tqp->index;
9783 }
9784 
9785 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9786 {
9787 	struct hclge_vport *vport = hclge_get_vport(handle);
9788 	struct hclge_dev *hdev = vport->back;
9789 	int reset_try_times = 0;
9790 	int reset_status;
9791 	u16 queue_gid;
9792 	int ret;
9793 
9794 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9795 
9796 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9797 	if (ret) {
9798 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9799 		return ret;
9800 	}
9801 
9802 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9803 	if (ret) {
9804 		dev_err(&hdev->pdev->dev,
9805 			"Send reset tqp cmd fail, ret = %d\n", ret);
9806 		return ret;
9807 	}
9808 
9809 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9810 		reset_status = hclge_get_reset_status(hdev, queue_gid);
9811 		if (reset_status)
9812 			break;
9813 
9814 		/* Wait for tqp hw reset */
9815 		usleep_range(1000, 1200);
9816 	}
9817 
9818 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9819 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9820 		return ret;
9821 	}
9822 
9823 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9824 	if (ret)
9825 		dev_err(&hdev->pdev->dev,
9826 			"Deassert the soft reset fail, ret = %d\n", ret);
9827 
9828 	return ret;
9829 }
9830 
9831 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9832 {
9833 	struct hnae3_handle *handle = &vport->nic;
9834 	struct hclge_dev *hdev = vport->back;
9835 	int reset_try_times = 0;
9836 	int reset_status;
9837 	u16 queue_gid;
9838 	int ret;
9839 
9840 	if (queue_id >= handle->kinfo.num_tqps) {
9841 		dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
9842 			 queue_id);
9843 		return;
9844 	}
9845 
9846 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9847 
9848 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9849 	if (ret) {
9850 		dev_warn(&hdev->pdev->dev,
9851 			 "Send reset tqp cmd fail, ret = %d\n", ret);
9852 		return;
9853 	}
9854 
9855 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9856 		reset_status = hclge_get_reset_status(hdev, queue_gid);
9857 		if (reset_status)
9858 			break;
9859 
9860 		/* Wait for tqp hw reset */
9861 		usleep_range(1000, 1200);
9862 	}
9863 
9864 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9865 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9866 		return;
9867 	}
9868 
9869 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9870 	if (ret)
9871 		dev_warn(&hdev->pdev->dev,
9872 			 "Deassert the soft reset fail, ret = %d\n", ret);
9873 }
9874 
9875 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9876 {
9877 	struct hclge_vport *vport = hclge_get_vport(handle);
9878 	struct hclge_dev *hdev = vport->back;
9879 
9880 	return hdev->fw_version;
9881 }
9882 
9883 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9884 {
9885 	struct phy_device *phydev = hdev->hw.mac.phydev;
9886 
9887 	if (!phydev)
9888 		return;
9889 
9890 	phy_set_asym_pause(phydev, rx_en, tx_en);
9891 }
9892 
9893 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9894 {
9895 	int ret;
9896 
9897 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9898 		return 0;
9899 
9900 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9901 	if (ret)
9902 		dev_err(&hdev->pdev->dev,
9903 			"configure pauseparam error, ret = %d.\n", ret);
9904 
9905 	return ret;
9906 }
9907 
9908 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9909 {
9910 	struct phy_device *phydev = hdev->hw.mac.phydev;
9911 	u16 remote_advertising = 0;
9912 	u16 local_advertising;
9913 	u32 rx_pause, tx_pause;
9914 	u8 flowctl;
9915 
9916 	if (!phydev->link || !phydev->autoneg)
9917 		return 0;
9918 
9919 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9920 
9921 	if (phydev->pause)
9922 		remote_advertising = LPA_PAUSE_CAP;
9923 
9924 	if (phydev->asym_pause)
9925 		remote_advertising |= LPA_PAUSE_ASYM;
9926 
9927 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9928 					   remote_advertising);
9929 	tx_pause = flowctl & FLOW_CTRL_TX;
9930 	rx_pause = flowctl & FLOW_CTRL_RX;
9931 
9932 	if (phydev->duplex == HCLGE_MAC_HALF) {
9933 		tx_pause = 0;
9934 		rx_pause = 0;
9935 	}
9936 
9937 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9938 }
9939 
9940 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9941 				 u32 *rx_en, u32 *tx_en)
9942 {
9943 	struct hclge_vport *vport = hclge_get_vport(handle);
9944 	struct hclge_dev *hdev = vport->back;
9945 	struct phy_device *phydev = hdev->hw.mac.phydev;
9946 
9947 	*auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
9948 
9949 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9950 		*rx_en = 0;
9951 		*tx_en = 0;
9952 		return;
9953 	}
9954 
9955 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9956 		*rx_en = 1;
9957 		*tx_en = 0;
9958 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9959 		*tx_en = 1;
9960 		*rx_en = 0;
9961 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9962 		*rx_en = 1;
9963 		*tx_en = 1;
9964 	} else {
9965 		*rx_en = 0;
9966 		*tx_en = 0;
9967 	}
9968 }
9969 
9970 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9971 					 u32 rx_en, u32 tx_en)
9972 {
9973 	if (rx_en && tx_en)
9974 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
9975 	else if (rx_en && !tx_en)
9976 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9977 	else if (!rx_en && tx_en)
9978 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9979 	else
9980 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
9981 
9982 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9983 }
9984 
9985 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9986 				u32 rx_en, u32 tx_en)
9987 {
9988 	struct hclge_vport *vport = hclge_get_vport(handle);
9989 	struct hclge_dev *hdev = vport->back;
9990 	struct phy_device *phydev = hdev->hw.mac.phydev;
9991 	u32 fc_autoneg;
9992 
9993 	if (phydev) {
9994 		fc_autoneg = hclge_get_autoneg(handle);
9995 		if (auto_neg != fc_autoneg) {
9996 			dev_info(&hdev->pdev->dev,
9997 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9998 			return -EOPNOTSUPP;
9999 		}
10000 	}
10001 
10002 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10003 		dev_info(&hdev->pdev->dev,
10004 			 "Priority flow control enabled. Cannot set link flow control.\n");
10005 		return -EOPNOTSUPP;
10006 	}
10007 
10008 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10009 
10010 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10011 
10012 	if (!auto_neg)
10013 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10014 
10015 	if (phydev)
10016 		return phy_start_aneg(phydev);
10017 
10018 	return -EOPNOTSUPP;
10019 }
10020 
10021 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10022 					  u8 *auto_neg, u32 *speed, u8 *duplex)
10023 {
10024 	struct hclge_vport *vport = hclge_get_vport(handle);
10025 	struct hclge_dev *hdev = vport->back;
10026 
10027 	if (speed)
10028 		*speed = hdev->hw.mac.speed;
10029 	if (duplex)
10030 		*duplex = hdev->hw.mac.duplex;
10031 	if (auto_neg)
10032 		*auto_neg = hdev->hw.mac.autoneg;
10033 }
10034 
10035 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10036 				 u8 *module_type)
10037 {
10038 	struct hclge_vport *vport = hclge_get_vport(handle);
10039 	struct hclge_dev *hdev = vport->back;
10040 
10041 	/* When nic is down, the service task is not running, doesn't update
10042 	 * the port information per second. Query the port information before
10043 	 * return the media type, ensure getting the correct media information.
10044 	 */
10045 	hclge_update_port_info(hdev);
10046 
10047 	if (media_type)
10048 		*media_type = hdev->hw.mac.media_type;
10049 
10050 	if (module_type)
10051 		*module_type = hdev->hw.mac.module_type;
10052 }
10053 
10054 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10055 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
10056 {
10057 	struct hclge_vport *vport = hclge_get_vport(handle);
10058 	struct hclge_dev *hdev = vport->back;
10059 	struct phy_device *phydev = hdev->hw.mac.phydev;
10060 	int mdix_ctrl, mdix, is_resolved;
10061 	unsigned int retval;
10062 
10063 	if (!phydev) {
10064 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10065 		*tp_mdix = ETH_TP_MDI_INVALID;
10066 		return;
10067 	}
10068 
10069 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10070 
10071 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10072 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10073 				    HCLGE_PHY_MDIX_CTRL_S);
10074 
10075 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10076 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10077 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10078 
10079 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10080 
10081 	switch (mdix_ctrl) {
10082 	case 0x0:
10083 		*tp_mdix_ctrl = ETH_TP_MDI;
10084 		break;
10085 	case 0x1:
10086 		*tp_mdix_ctrl = ETH_TP_MDI_X;
10087 		break;
10088 	case 0x3:
10089 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10090 		break;
10091 	default:
10092 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10093 		break;
10094 	}
10095 
10096 	if (!is_resolved)
10097 		*tp_mdix = ETH_TP_MDI_INVALID;
10098 	else if (mdix)
10099 		*tp_mdix = ETH_TP_MDI_X;
10100 	else
10101 		*tp_mdix = ETH_TP_MDI;
10102 }
10103 
10104 static void hclge_info_show(struct hclge_dev *hdev)
10105 {
10106 	struct device *dev = &hdev->pdev->dev;
10107 
10108 	dev_info(dev, "PF info begin:\n");
10109 
10110 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10111 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10112 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10113 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10114 	dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
10115 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10116 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10117 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10118 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10119 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10120 	dev_info(dev, "This is %s PF\n",
10121 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10122 	dev_info(dev, "DCB %s\n",
10123 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10124 	dev_info(dev, "MQPRIO %s\n",
10125 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10126 
10127 	dev_info(dev, "PF info end.\n");
10128 }
10129 
10130 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10131 					  struct hclge_vport *vport)
10132 {
10133 	struct hnae3_client *client = vport->nic.client;
10134 	struct hclge_dev *hdev = ae_dev->priv;
10135 	int rst_cnt = hdev->rst_stats.reset_cnt;
10136 	int ret;
10137 
10138 	ret = client->ops->init_instance(&vport->nic);
10139 	if (ret)
10140 		return ret;
10141 
10142 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10143 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10144 	    rst_cnt != hdev->rst_stats.reset_cnt) {
10145 		ret = -EBUSY;
10146 		goto init_nic_err;
10147 	}
10148 
10149 	/* Enable nic hw error interrupts */
10150 	ret = hclge_config_nic_hw_error(hdev, true);
10151 	if (ret) {
10152 		dev_err(&ae_dev->pdev->dev,
10153 			"fail(%d) to enable hw error interrupts\n", ret);
10154 		goto init_nic_err;
10155 	}
10156 
10157 	hnae3_set_client_init_flag(client, ae_dev, 1);
10158 
10159 	if (netif_msg_drv(&hdev->vport->nic))
10160 		hclge_info_show(hdev);
10161 
10162 	return ret;
10163 
10164 init_nic_err:
10165 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10166 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10167 		msleep(HCLGE_WAIT_RESET_DONE);
10168 
10169 	client->ops->uninit_instance(&vport->nic, 0);
10170 
10171 	return ret;
10172 }
10173 
10174 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10175 					   struct hclge_vport *vport)
10176 {
10177 	struct hclge_dev *hdev = ae_dev->priv;
10178 	struct hnae3_client *client;
10179 	int rst_cnt;
10180 	int ret;
10181 
10182 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10183 	    !hdev->nic_client)
10184 		return 0;
10185 
10186 	client = hdev->roce_client;
10187 	ret = hclge_init_roce_base_info(vport);
10188 	if (ret)
10189 		return ret;
10190 
10191 	rst_cnt = hdev->rst_stats.reset_cnt;
10192 	ret = client->ops->init_instance(&vport->roce);
10193 	if (ret)
10194 		return ret;
10195 
10196 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10197 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10198 	    rst_cnt != hdev->rst_stats.reset_cnt) {
10199 		ret = -EBUSY;
10200 		goto init_roce_err;
10201 	}
10202 
10203 	/* Enable roce ras interrupts */
10204 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
10205 	if (ret) {
10206 		dev_err(&ae_dev->pdev->dev,
10207 			"fail(%d) to enable roce ras interrupts\n", ret);
10208 		goto init_roce_err;
10209 	}
10210 
10211 	hnae3_set_client_init_flag(client, ae_dev, 1);
10212 
10213 	return 0;
10214 
10215 init_roce_err:
10216 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10217 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10218 		msleep(HCLGE_WAIT_RESET_DONE);
10219 
10220 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10221 
10222 	return ret;
10223 }
10224 
10225 static int hclge_init_client_instance(struct hnae3_client *client,
10226 				      struct hnae3_ae_dev *ae_dev)
10227 {
10228 	struct hclge_dev *hdev = ae_dev->priv;
10229 	struct hclge_vport *vport;
10230 	int i, ret;
10231 
10232 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
10233 		vport = &hdev->vport[i];
10234 
10235 		switch (client->type) {
10236 		case HNAE3_CLIENT_KNIC:
10237 			hdev->nic_client = client;
10238 			vport->nic.client = client;
10239 			ret = hclge_init_nic_client_instance(ae_dev, vport);
10240 			if (ret)
10241 				goto clear_nic;
10242 
10243 			ret = hclge_init_roce_client_instance(ae_dev, vport);
10244 			if (ret)
10245 				goto clear_roce;
10246 
10247 			break;
10248 		case HNAE3_CLIENT_ROCE:
10249 			if (hnae3_dev_roce_supported(hdev)) {
10250 				hdev->roce_client = client;
10251 				vport->roce.client = client;
10252 			}
10253 
10254 			ret = hclge_init_roce_client_instance(ae_dev, vport);
10255 			if (ret)
10256 				goto clear_roce;
10257 
10258 			break;
10259 		default:
10260 			return -EINVAL;
10261 		}
10262 	}
10263 
10264 	return 0;
10265 
10266 clear_nic:
10267 	hdev->nic_client = NULL;
10268 	vport->nic.client = NULL;
10269 	return ret;
10270 clear_roce:
10271 	hdev->roce_client = NULL;
10272 	vport->roce.client = NULL;
10273 	return ret;
10274 }
10275 
10276 static void hclge_uninit_client_instance(struct hnae3_client *client,
10277 					 struct hnae3_ae_dev *ae_dev)
10278 {
10279 	struct hclge_dev *hdev = ae_dev->priv;
10280 	struct hclge_vport *vport;
10281 	int i;
10282 
10283 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
10284 		vport = &hdev->vport[i];
10285 		if (hdev->roce_client) {
10286 			clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10287 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10288 				msleep(HCLGE_WAIT_RESET_DONE);
10289 
10290 			hdev->roce_client->ops->uninit_instance(&vport->roce,
10291 								0);
10292 			hdev->roce_client = NULL;
10293 			vport->roce.client = NULL;
10294 		}
10295 		if (client->type == HNAE3_CLIENT_ROCE)
10296 			return;
10297 		if (hdev->nic_client && client->ops->uninit_instance) {
10298 			clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10299 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10300 				msleep(HCLGE_WAIT_RESET_DONE);
10301 
10302 			client->ops->uninit_instance(&vport->nic, 0);
10303 			hdev->nic_client = NULL;
10304 			vport->nic.client = NULL;
10305 		}
10306 	}
10307 }
10308 
10309 static int hclge_dev_mem_map(struct hclge_dev *hdev)
10310 {
10311 #define HCLGE_MEM_BAR		4
10312 
10313 	struct pci_dev *pdev = hdev->pdev;
10314 	struct hclge_hw *hw = &hdev->hw;
10315 
10316 	/* for device does not have device memory, return directly */
10317 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
10318 		return 0;
10319 
10320 	hw->mem_base = devm_ioremap_wc(&pdev->dev,
10321 				       pci_resource_start(pdev, HCLGE_MEM_BAR),
10322 				       pci_resource_len(pdev, HCLGE_MEM_BAR));
10323 	if (!hw->mem_base) {
10324 		dev_err(&pdev->dev, "failed to map device memory\n");
10325 		return -EFAULT;
10326 	}
10327 
10328 	return 0;
10329 }
10330 
10331 static int hclge_pci_init(struct hclge_dev *hdev)
10332 {
10333 	struct pci_dev *pdev = hdev->pdev;
10334 	struct hclge_hw *hw;
10335 	int ret;
10336 
10337 	ret = pci_enable_device(pdev);
10338 	if (ret) {
10339 		dev_err(&pdev->dev, "failed to enable PCI device\n");
10340 		return ret;
10341 	}
10342 
10343 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10344 	if (ret) {
10345 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10346 		if (ret) {
10347 			dev_err(&pdev->dev,
10348 				"can't set consistent PCI DMA");
10349 			goto err_disable_device;
10350 		}
10351 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
10352 	}
10353 
10354 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
10355 	if (ret) {
10356 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
10357 		goto err_disable_device;
10358 	}
10359 
10360 	pci_set_master(pdev);
10361 	hw = &hdev->hw;
10362 	hw->io_base = pcim_iomap(pdev, 2, 0);
10363 	if (!hw->io_base) {
10364 		dev_err(&pdev->dev, "Can't map configuration register space\n");
10365 		ret = -ENOMEM;
10366 		goto err_clr_master;
10367 	}
10368 
10369 	ret = hclge_dev_mem_map(hdev);
10370 	if (ret)
10371 		goto err_unmap_io_base;
10372 
10373 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
10374 
10375 	return 0;
10376 
10377 err_unmap_io_base:
10378 	pcim_iounmap(pdev, hdev->hw.io_base);
10379 err_clr_master:
10380 	pci_clear_master(pdev);
10381 	pci_release_regions(pdev);
10382 err_disable_device:
10383 	pci_disable_device(pdev);
10384 
10385 	return ret;
10386 }
10387 
10388 static void hclge_pci_uninit(struct hclge_dev *hdev)
10389 {
10390 	struct pci_dev *pdev = hdev->pdev;
10391 
10392 	if (hdev->hw.mem_base)
10393 		devm_iounmap(&pdev->dev, hdev->hw.mem_base);
10394 
10395 	pcim_iounmap(pdev, hdev->hw.io_base);
10396 	pci_free_irq_vectors(pdev);
10397 	pci_clear_master(pdev);
10398 	pci_release_mem_regions(pdev);
10399 	pci_disable_device(pdev);
10400 }
10401 
10402 static void hclge_state_init(struct hclge_dev *hdev)
10403 {
10404 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
10405 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
10406 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
10407 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10408 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
10409 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
10410 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
10411 }
10412 
10413 static void hclge_state_uninit(struct hclge_dev *hdev)
10414 {
10415 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
10416 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
10417 
10418 	if (hdev->reset_timer.function)
10419 		del_timer_sync(&hdev->reset_timer);
10420 	if (hdev->service_task.work.func)
10421 		cancel_delayed_work_sync(&hdev->service_task);
10422 }
10423 
10424 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
10425 {
10426 #define HCLGE_FLR_RETRY_WAIT_MS	500
10427 #define HCLGE_FLR_RETRY_CNT	5
10428 
10429 	struct hclge_dev *hdev = ae_dev->priv;
10430 	int retry_cnt = 0;
10431 	int ret;
10432 
10433 retry:
10434 	down(&hdev->reset_sem);
10435 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10436 	hdev->reset_type = HNAE3_FLR_RESET;
10437 	ret = hclge_reset_prepare(hdev);
10438 	if (ret || hdev->reset_pending) {
10439 		dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
10440 			ret);
10441 		if (hdev->reset_pending ||
10442 		    retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
10443 			dev_err(&hdev->pdev->dev,
10444 				"reset_pending:0x%lx, retry_cnt:%d\n",
10445 				hdev->reset_pending, retry_cnt);
10446 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10447 			up(&hdev->reset_sem);
10448 			msleep(HCLGE_FLR_RETRY_WAIT_MS);
10449 			goto retry;
10450 		}
10451 	}
10452 
10453 	/* disable misc vector before FLR done */
10454 	hclge_enable_vector(&hdev->misc_vector, false);
10455 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
10456 	hdev->rst_stats.flr_rst_cnt++;
10457 }
10458 
10459 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
10460 {
10461 	struct hclge_dev *hdev = ae_dev->priv;
10462 	int ret;
10463 
10464 	hclge_enable_vector(&hdev->misc_vector, true);
10465 
10466 	ret = hclge_reset_rebuild(hdev);
10467 	if (ret)
10468 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
10469 
10470 	hdev->reset_type = HNAE3_NONE_RESET;
10471 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
10472 	up(&hdev->reset_sem);
10473 }
10474 
10475 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
10476 {
10477 	u16 i;
10478 
10479 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10480 		struct hclge_vport *vport = &hdev->vport[i];
10481 		int ret;
10482 
10483 		 /* Send cmd to clear VF's FUNC_RST_ING */
10484 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
10485 		if (ret)
10486 			dev_warn(&hdev->pdev->dev,
10487 				 "clear vf(%u) rst failed %d!\n",
10488 				 vport->vport_id, ret);
10489 	}
10490 }
10491 
10492 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
10493 {
10494 	struct pci_dev *pdev = ae_dev->pdev;
10495 	struct hclge_dev *hdev;
10496 	int ret;
10497 
10498 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
10499 	if (!hdev)
10500 		return -ENOMEM;
10501 
10502 	hdev->pdev = pdev;
10503 	hdev->ae_dev = ae_dev;
10504 	hdev->reset_type = HNAE3_NONE_RESET;
10505 	hdev->reset_level = HNAE3_FUNC_RESET;
10506 	ae_dev->priv = hdev;
10507 
10508 	/* HW supprt 2 layer vlan */
10509 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10510 
10511 	mutex_init(&hdev->vport_lock);
10512 	spin_lock_init(&hdev->fd_rule_lock);
10513 	sema_init(&hdev->reset_sem, 1);
10514 
10515 	ret = hclge_pci_init(hdev);
10516 	if (ret)
10517 		goto out;
10518 
10519 	/* Firmware command queue initialize */
10520 	ret = hclge_cmd_queue_init(hdev);
10521 	if (ret)
10522 		goto err_pci_uninit;
10523 
10524 	/* Firmware command initialize */
10525 	ret = hclge_cmd_init(hdev);
10526 	if (ret)
10527 		goto err_cmd_uninit;
10528 
10529 	ret = hclge_get_cap(hdev);
10530 	if (ret)
10531 		goto err_cmd_uninit;
10532 
10533 	ret = hclge_query_dev_specs(hdev);
10534 	if (ret) {
10535 		dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
10536 			ret);
10537 		goto err_cmd_uninit;
10538 	}
10539 
10540 	ret = hclge_configure(hdev);
10541 	if (ret) {
10542 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
10543 		goto err_cmd_uninit;
10544 	}
10545 
10546 	ret = hclge_init_msi(hdev);
10547 	if (ret) {
10548 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
10549 		goto err_cmd_uninit;
10550 	}
10551 
10552 	ret = hclge_misc_irq_init(hdev);
10553 	if (ret)
10554 		goto err_msi_uninit;
10555 
10556 	ret = hclge_alloc_tqps(hdev);
10557 	if (ret) {
10558 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
10559 		goto err_msi_irq_uninit;
10560 	}
10561 
10562 	ret = hclge_alloc_vport(hdev);
10563 	if (ret)
10564 		goto err_msi_irq_uninit;
10565 
10566 	ret = hclge_map_tqp(hdev);
10567 	if (ret)
10568 		goto err_msi_irq_uninit;
10569 
10570 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
10571 		ret = hclge_mac_mdio_config(hdev);
10572 		if (ret)
10573 			goto err_msi_irq_uninit;
10574 	}
10575 
10576 	ret = hclge_init_umv_space(hdev);
10577 	if (ret)
10578 		goto err_mdiobus_unreg;
10579 
10580 	ret = hclge_mac_init(hdev);
10581 	if (ret) {
10582 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10583 		goto err_mdiobus_unreg;
10584 	}
10585 
10586 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10587 	if (ret) {
10588 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10589 		goto err_mdiobus_unreg;
10590 	}
10591 
10592 	ret = hclge_config_gro(hdev, true);
10593 	if (ret)
10594 		goto err_mdiobus_unreg;
10595 
10596 	ret = hclge_init_vlan_config(hdev);
10597 	if (ret) {
10598 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10599 		goto err_mdiobus_unreg;
10600 	}
10601 
10602 	ret = hclge_tm_schd_init(hdev);
10603 	if (ret) {
10604 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10605 		goto err_mdiobus_unreg;
10606 	}
10607 
10608 	ret = hclge_rss_init_cfg(hdev);
10609 	if (ret) {
10610 		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
10611 		goto err_mdiobus_unreg;
10612 	}
10613 
10614 	ret = hclge_rss_init_hw(hdev);
10615 	if (ret) {
10616 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10617 		goto err_mdiobus_unreg;
10618 	}
10619 
10620 	ret = init_mgr_tbl(hdev);
10621 	if (ret) {
10622 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10623 		goto err_mdiobus_unreg;
10624 	}
10625 
10626 	ret = hclge_init_fd_config(hdev);
10627 	if (ret) {
10628 		dev_err(&pdev->dev,
10629 			"fd table init fail, ret=%d\n", ret);
10630 		goto err_mdiobus_unreg;
10631 	}
10632 
10633 	INIT_KFIFO(hdev->mac_tnl_log);
10634 
10635 	hclge_dcb_ops_set(hdev);
10636 
10637 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10638 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10639 
10640 	/* Setup affinity after service timer setup because add_timer_on
10641 	 * is called in affinity notify.
10642 	 */
10643 	hclge_misc_affinity_setup(hdev);
10644 
10645 	hclge_clear_all_event_cause(hdev);
10646 	hclge_clear_resetting_state(hdev);
10647 
10648 	/* Log and clear the hw errors those already occurred */
10649 	hclge_handle_all_hns_hw_errors(ae_dev);
10650 
10651 	/* request delayed reset for the error recovery because an immediate
10652 	 * global reset on a PF affecting pending initialization of other PFs
10653 	 */
10654 	if (ae_dev->hw_err_reset_req) {
10655 		enum hnae3_reset_type reset_level;
10656 
10657 		reset_level = hclge_get_reset_level(ae_dev,
10658 						    &ae_dev->hw_err_reset_req);
10659 		hclge_set_def_reset_request(ae_dev, reset_level);
10660 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10661 	}
10662 
10663 	/* Enable MISC vector(vector0) */
10664 	hclge_enable_vector(&hdev->misc_vector, true);
10665 
10666 	hclge_state_init(hdev);
10667 	hdev->last_reset_time = jiffies;
10668 
10669 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10670 		 HCLGE_DRIVER_NAME);
10671 
10672 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10673 
10674 	return 0;
10675 
10676 err_mdiobus_unreg:
10677 	if (hdev->hw.mac.phydev)
10678 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
10679 err_msi_irq_uninit:
10680 	hclge_misc_irq_uninit(hdev);
10681 err_msi_uninit:
10682 	pci_free_irq_vectors(pdev);
10683 err_cmd_uninit:
10684 	hclge_cmd_uninit(hdev);
10685 err_pci_uninit:
10686 	pcim_iounmap(pdev, hdev->hw.io_base);
10687 	pci_clear_master(pdev);
10688 	pci_release_regions(pdev);
10689 	pci_disable_device(pdev);
10690 out:
10691 	mutex_destroy(&hdev->vport_lock);
10692 	return ret;
10693 }
10694 
10695 static void hclge_stats_clear(struct hclge_dev *hdev)
10696 {
10697 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10698 }
10699 
10700 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10701 {
10702 	return hclge_config_switch_param(hdev, vf, enable,
10703 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10704 }
10705 
10706 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10707 {
10708 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10709 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
10710 					  enable, vf);
10711 }
10712 
10713 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10714 {
10715 	int ret;
10716 
10717 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10718 	if (ret) {
10719 		dev_err(&hdev->pdev->dev,
10720 			"Set vf %d mac spoof check %s failed, ret=%d\n",
10721 			vf, enable ? "on" : "off", ret);
10722 		return ret;
10723 	}
10724 
10725 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10726 	if (ret)
10727 		dev_err(&hdev->pdev->dev,
10728 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
10729 			vf, enable ? "on" : "off", ret);
10730 
10731 	return ret;
10732 }
10733 
10734 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10735 				 bool enable)
10736 {
10737 	struct hclge_vport *vport = hclge_get_vport(handle);
10738 	struct hclge_dev *hdev = vport->back;
10739 	u32 new_spoofchk = enable ? 1 : 0;
10740 	int ret;
10741 
10742 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10743 		return -EOPNOTSUPP;
10744 
10745 	vport = hclge_get_vf_vport(hdev, vf);
10746 	if (!vport)
10747 		return -EINVAL;
10748 
10749 	if (vport->vf_info.spoofchk == new_spoofchk)
10750 		return 0;
10751 
10752 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10753 		dev_warn(&hdev->pdev->dev,
10754 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10755 			 vf);
10756 	else if (enable && hclge_is_umv_space_full(vport, true))
10757 		dev_warn(&hdev->pdev->dev,
10758 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10759 			 vf);
10760 
10761 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10762 	if (ret)
10763 		return ret;
10764 
10765 	vport->vf_info.spoofchk = new_spoofchk;
10766 	return 0;
10767 }
10768 
10769 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10770 {
10771 	struct hclge_vport *vport = hdev->vport;
10772 	int ret;
10773 	int i;
10774 
10775 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10776 		return 0;
10777 
10778 	/* resume the vf spoof check state after reset */
10779 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10780 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10781 					       vport->vf_info.spoofchk);
10782 		if (ret)
10783 			return ret;
10784 
10785 		vport++;
10786 	}
10787 
10788 	return 0;
10789 }
10790 
10791 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10792 {
10793 	struct hclge_vport *vport = hclge_get_vport(handle);
10794 	struct hclge_dev *hdev = vport->back;
10795 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
10796 	u32 new_trusted = enable ? 1 : 0;
10797 	bool en_bc_pmc;
10798 	int ret;
10799 
10800 	vport = hclge_get_vf_vport(hdev, vf);
10801 	if (!vport)
10802 		return -EINVAL;
10803 
10804 	if (vport->vf_info.trusted == new_trusted)
10805 		return 0;
10806 
10807 	/* Disable promisc mode for VF if it is not trusted any more. */
10808 	if (!enable && vport->vf_info.promisc_enable) {
10809 		en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
10810 		ret = hclge_set_vport_promisc_mode(vport, false, false,
10811 						   en_bc_pmc);
10812 		if (ret)
10813 			return ret;
10814 		vport->vf_info.promisc_enable = 0;
10815 		hclge_inform_vf_promisc_info(vport);
10816 	}
10817 
10818 	vport->vf_info.trusted = new_trusted;
10819 
10820 	return 0;
10821 }
10822 
10823 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10824 {
10825 	int ret;
10826 	int vf;
10827 
10828 	/* reset vf rate to default value */
10829 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10830 		struct hclge_vport *vport = &hdev->vport[vf];
10831 
10832 		vport->vf_info.max_tx_rate = 0;
10833 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10834 		if (ret)
10835 			dev_err(&hdev->pdev->dev,
10836 				"vf%d failed to reset to default, ret=%d\n",
10837 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10838 	}
10839 }
10840 
10841 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
10842 				     int min_tx_rate, int max_tx_rate)
10843 {
10844 	if (min_tx_rate != 0 ||
10845 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10846 		dev_err(&hdev->pdev->dev,
10847 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10848 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10849 		return -EINVAL;
10850 	}
10851 
10852 	return 0;
10853 }
10854 
10855 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10856 			     int min_tx_rate, int max_tx_rate, bool force)
10857 {
10858 	struct hclge_vport *vport = hclge_get_vport(handle);
10859 	struct hclge_dev *hdev = vport->back;
10860 	int ret;
10861 
10862 	ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
10863 	if (ret)
10864 		return ret;
10865 
10866 	vport = hclge_get_vf_vport(hdev, vf);
10867 	if (!vport)
10868 		return -EINVAL;
10869 
10870 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10871 		return 0;
10872 
10873 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10874 	if (ret)
10875 		return ret;
10876 
10877 	vport->vf_info.max_tx_rate = max_tx_rate;
10878 
10879 	return 0;
10880 }
10881 
10882 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10883 {
10884 	struct hnae3_handle *handle = &hdev->vport->nic;
10885 	struct hclge_vport *vport;
10886 	int ret;
10887 	int vf;
10888 
10889 	/* resume the vf max_tx_rate after reset */
10890 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10891 		vport = hclge_get_vf_vport(hdev, vf);
10892 		if (!vport)
10893 			return -EINVAL;
10894 
10895 		/* zero means max rate, after reset, firmware already set it to
10896 		 * max rate, so just continue.
10897 		 */
10898 		if (!vport->vf_info.max_tx_rate)
10899 			continue;
10900 
10901 		ret = hclge_set_vf_rate(handle, vf, 0,
10902 					vport->vf_info.max_tx_rate, true);
10903 		if (ret) {
10904 			dev_err(&hdev->pdev->dev,
10905 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
10906 				vf, vport->vf_info.max_tx_rate, ret);
10907 			return ret;
10908 		}
10909 	}
10910 
10911 	return 0;
10912 }
10913 
10914 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10915 {
10916 	struct hclge_vport *vport = hdev->vport;
10917 	int i;
10918 
10919 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10920 		hclge_vport_stop(vport);
10921 		vport++;
10922 	}
10923 }
10924 
10925 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10926 {
10927 	struct hclge_dev *hdev = ae_dev->priv;
10928 	struct pci_dev *pdev = ae_dev->pdev;
10929 	int ret;
10930 
10931 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
10932 
10933 	hclge_stats_clear(hdev);
10934 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10935 	 * so here should not clean table in memory.
10936 	 */
10937 	if (hdev->reset_type == HNAE3_IMP_RESET ||
10938 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
10939 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10940 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10941 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10942 		hclge_reset_umv_space(hdev);
10943 	}
10944 
10945 	ret = hclge_cmd_init(hdev);
10946 	if (ret) {
10947 		dev_err(&pdev->dev, "Cmd queue init failed\n");
10948 		return ret;
10949 	}
10950 
10951 	ret = hclge_map_tqp(hdev);
10952 	if (ret) {
10953 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10954 		return ret;
10955 	}
10956 
10957 	ret = hclge_mac_init(hdev);
10958 	if (ret) {
10959 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10960 		return ret;
10961 	}
10962 
10963 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10964 	if (ret) {
10965 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10966 		return ret;
10967 	}
10968 
10969 	ret = hclge_config_gro(hdev, true);
10970 	if (ret)
10971 		return ret;
10972 
10973 	ret = hclge_init_vlan_config(hdev);
10974 	if (ret) {
10975 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10976 		return ret;
10977 	}
10978 
10979 	ret = hclge_tm_init_hw(hdev, true);
10980 	if (ret) {
10981 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
10982 		return ret;
10983 	}
10984 
10985 	ret = hclge_rss_init_hw(hdev);
10986 	if (ret) {
10987 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10988 		return ret;
10989 	}
10990 
10991 	ret = init_mgr_tbl(hdev);
10992 	if (ret) {
10993 		dev_err(&pdev->dev,
10994 			"failed to reinit manager table, ret = %d\n", ret);
10995 		return ret;
10996 	}
10997 
10998 	ret = hclge_init_fd_config(hdev);
10999 	if (ret) {
11000 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11001 		return ret;
11002 	}
11003 
11004 	/* Log and clear the hw errors those already occurred */
11005 	hclge_handle_all_hns_hw_errors(ae_dev);
11006 
11007 	/* Re-enable the hw error interrupts because
11008 	 * the interrupts get disabled on global reset.
11009 	 */
11010 	ret = hclge_config_nic_hw_error(hdev, true);
11011 	if (ret) {
11012 		dev_err(&pdev->dev,
11013 			"fail(%d) to re-enable NIC hw error interrupts\n",
11014 			ret);
11015 		return ret;
11016 	}
11017 
11018 	if (hdev->roce_client) {
11019 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
11020 		if (ret) {
11021 			dev_err(&pdev->dev,
11022 				"fail(%d) to re-enable roce ras interrupts\n",
11023 				ret);
11024 			return ret;
11025 		}
11026 	}
11027 
11028 	hclge_reset_vport_state(hdev);
11029 	ret = hclge_reset_vport_spoofchk(hdev);
11030 	if (ret)
11031 		return ret;
11032 
11033 	ret = hclge_resume_vf_rate(hdev);
11034 	if (ret)
11035 		return ret;
11036 
11037 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11038 		 HCLGE_DRIVER_NAME);
11039 
11040 	return 0;
11041 }
11042 
11043 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11044 {
11045 	struct hclge_dev *hdev = ae_dev->priv;
11046 	struct hclge_mac *mac = &hdev->hw.mac;
11047 
11048 	hclge_reset_vf_rate(hdev);
11049 	hclge_clear_vf_vlan(hdev);
11050 	hclge_misc_affinity_teardown(hdev);
11051 	hclge_state_uninit(hdev);
11052 	hclge_uninit_mac_table(hdev);
11053 
11054 	if (mac->phydev)
11055 		mdiobus_unregister(mac->mdio_bus);
11056 
11057 	/* Disable MISC vector(vector0) */
11058 	hclge_enable_vector(&hdev->misc_vector, false);
11059 	synchronize_irq(hdev->misc_vector.vector_irq);
11060 
11061 	/* Disable all hw interrupts */
11062 	hclge_config_mac_tnl_int(hdev, false);
11063 	hclge_config_nic_hw_error(hdev, false);
11064 	hclge_config_rocee_ras_interrupt(hdev, false);
11065 
11066 	hclge_cmd_uninit(hdev);
11067 	hclge_misc_irq_uninit(hdev);
11068 	hclge_pci_uninit(hdev);
11069 	mutex_destroy(&hdev->vport_lock);
11070 	hclge_uninit_vport_vlan_table(hdev);
11071 	ae_dev->priv = NULL;
11072 }
11073 
11074 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11075 {
11076 	struct hclge_vport *vport = hclge_get_vport(handle);
11077 	struct hclge_dev *hdev = vport->back;
11078 
11079 	return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11080 }
11081 
11082 static void hclge_get_channels(struct hnae3_handle *handle,
11083 			       struct ethtool_channels *ch)
11084 {
11085 	ch->max_combined = hclge_get_max_channels(handle);
11086 	ch->other_count = 1;
11087 	ch->max_other = 1;
11088 	ch->combined_count = handle->kinfo.rss_size;
11089 }
11090 
11091 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11092 					u16 *alloc_tqps, u16 *max_rss_size)
11093 {
11094 	struct hclge_vport *vport = hclge_get_vport(handle);
11095 	struct hclge_dev *hdev = vport->back;
11096 
11097 	*alloc_tqps = vport->alloc_tqps;
11098 	*max_rss_size = hdev->pf_rss_size_max;
11099 }
11100 
11101 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11102 			      bool rxfh_configured)
11103 {
11104 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11105 	struct hclge_vport *vport = hclge_get_vport(handle);
11106 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11107 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11108 	struct hclge_dev *hdev = vport->back;
11109 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11110 	u16 cur_rss_size = kinfo->rss_size;
11111 	u16 cur_tqps = kinfo->num_tqps;
11112 	u16 tc_valid[HCLGE_MAX_TC_NUM];
11113 	u16 roundup_size;
11114 	u32 *rss_indir;
11115 	unsigned int i;
11116 	int ret;
11117 
11118 	kinfo->req_rss_size = new_tqps_num;
11119 
11120 	ret = hclge_tm_vport_map_update(hdev);
11121 	if (ret) {
11122 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11123 		return ret;
11124 	}
11125 
11126 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
11127 	roundup_size = ilog2(roundup_size);
11128 	/* Set the RSS TC mode according to the new RSS size */
11129 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11130 		tc_valid[i] = 0;
11131 
11132 		if (!(hdev->hw_tc_map & BIT(i)))
11133 			continue;
11134 
11135 		tc_valid[i] = 1;
11136 		tc_size[i] = roundup_size;
11137 		tc_offset[i] = kinfo->rss_size * i;
11138 	}
11139 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
11140 	if (ret)
11141 		return ret;
11142 
11143 	/* RSS indirection table has been configuared by user */
11144 	if (rxfh_configured)
11145 		goto out;
11146 
11147 	/* Reinitializes the rss indirect table according to the new RSS size */
11148 	rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
11149 			    GFP_KERNEL);
11150 	if (!rss_indir)
11151 		return -ENOMEM;
11152 
11153 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
11154 		rss_indir[i] = i % kinfo->rss_size;
11155 
11156 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11157 	if (ret)
11158 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11159 			ret);
11160 
11161 	kfree(rss_indir);
11162 
11163 out:
11164 	if (!ret)
11165 		dev_info(&hdev->pdev->dev,
11166 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
11167 			 cur_rss_size, kinfo->rss_size,
11168 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
11169 
11170 	return ret;
11171 }
11172 
11173 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
11174 			      u32 *regs_num_64_bit)
11175 {
11176 	struct hclge_desc desc;
11177 	u32 total_num;
11178 	int ret;
11179 
11180 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
11181 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11182 	if (ret) {
11183 		dev_err(&hdev->pdev->dev,
11184 			"Query register number cmd failed, ret = %d.\n", ret);
11185 		return ret;
11186 	}
11187 
11188 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
11189 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
11190 
11191 	total_num = *regs_num_32_bit + *regs_num_64_bit;
11192 	if (!total_num)
11193 		return -EINVAL;
11194 
11195 	return 0;
11196 }
11197 
11198 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11199 				 void *data)
11200 {
11201 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
11202 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
11203 
11204 	struct hclge_desc *desc;
11205 	u32 *reg_val = data;
11206 	__le32 *desc_data;
11207 	int nodata_num;
11208 	int cmd_num;
11209 	int i, k, n;
11210 	int ret;
11211 
11212 	if (regs_num == 0)
11213 		return 0;
11214 
11215 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
11216 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
11217 			       HCLGE_32_BIT_REG_RTN_DATANUM);
11218 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11219 	if (!desc)
11220 		return -ENOMEM;
11221 
11222 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
11223 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11224 	if (ret) {
11225 		dev_err(&hdev->pdev->dev,
11226 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
11227 		kfree(desc);
11228 		return ret;
11229 	}
11230 
11231 	for (i = 0; i < cmd_num; i++) {
11232 		if (i == 0) {
11233 			desc_data = (__le32 *)(&desc[i].data[0]);
11234 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
11235 		} else {
11236 			desc_data = (__le32 *)(&desc[i]);
11237 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
11238 		}
11239 		for (k = 0; k < n; k++) {
11240 			*reg_val++ = le32_to_cpu(*desc_data++);
11241 
11242 			regs_num--;
11243 			if (!regs_num)
11244 				break;
11245 		}
11246 	}
11247 
11248 	kfree(desc);
11249 	return 0;
11250 }
11251 
11252 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
11253 				 void *data)
11254 {
11255 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
11256 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
11257 
11258 	struct hclge_desc *desc;
11259 	u64 *reg_val = data;
11260 	__le64 *desc_data;
11261 	int nodata_len;
11262 	int cmd_num;
11263 	int i, k, n;
11264 	int ret;
11265 
11266 	if (regs_num == 0)
11267 		return 0;
11268 
11269 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
11270 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
11271 			       HCLGE_64_BIT_REG_RTN_DATANUM);
11272 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
11273 	if (!desc)
11274 		return -ENOMEM;
11275 
11276 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
11277 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
11278 	if (ret) {
11279 		dev_err(&hdev->pdev->dev,
11280 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
11281 		kfree(desc);
11282 		return ret;
11283 	}
11284 
11285 	for (i = 0; i < cmd_num; i++) {
11286 		if (i == 0) {
11287 			desc_data = (__le64 *)(&desc[i].data[0]);
11288 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
11289 		} else {
11290 			desc_data = (__le64 *)(&desc[i]);
11291 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
11292 		}
11293 		for (k = 0; k < n; k++) {
11294 			*reg_val++ = le64_to_cpu(*desc_data++);
11295 
11296 			regs_num--;
11297 			if (!regs_num)
11298 				break;
11299 		}
11300 	}
11301 
11302 	kfree(desc);
11303 	return 0;
11304 }
11305 
11306 #define MAX_SEPARATE_NUM	4
11307 #define SEPARATOR_VALUE		0xFDFCFBFA
11308 #define REG_NUM_PER_LINE	4
11309 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
11310 #define REG_SEPARATOR_LINE	1
11311 #define REG_NUM_REMAIN_MASK	3
11312 #define BD_LIST_MAX_NUM		30
11313 
11314 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
11315 {
11316 	int i;
11317 
11318 	/* initialize command BD except the last one */
11319 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
11320 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
11321 					   true);
11322 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11323 	}
11324 
11325 	/* initialize the last command BD */
11326 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
11327 
11328 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
11329 }
11330 
11331 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
11332 				    int *bd_num_list,
11333 				    u32 type_num)
11334 {
11335 	u32 entries_per_desc, desc_index, index, offset, i;
11336 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
11337 	int ret;
11338 
11339 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
11340 	if (ret) {
11341 		dev_err(&hdev->pdev->dev,
11342 			"Get dfx bd num fail, status is %d.\n", ret);
11343 		return ret;
11344 	}
11345 
11346 	entries_per_desc = ARRAY_SIZE(desc[0].data);
11347 	for (i = 0; i < type_num; i++) {
11348 		offset = hclge_dfx_bd_offset_list[i];
11349 		index = offset % entries_per_desc;
11350 		desc_index = offset / entries_per_desc;
11351 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
11352 	}
11353 
11354 	return ret;
11355 }
11356 
11357 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
11358 				  struct hclge_desc *desc_src, int bd_num,
11359 				  enum hclge_opcode_type cmd)
11360 {
11361 	struct hclge_desc *desc = desc_src;
11362 	int i, ret;
11363 
11364 	hclge_cmd_setup_basic_desc(desc, cmd, true);
11365 	for (i = 0; i < bd_num - 1; i++) {
11366 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11367 		desc++;
11368 		hclge_cmd_setup_basic_desc(desc, cmd, true);
11369 	}
11370 
11371 	desc = desc_src;
11372 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
11373 	if (ret)
11374 		dev_err(&hdev->pdev->dev,
11375 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
11376 			cmd, ret);
11377 
11378 	return ret;
11379 }
11380 
11381 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
11382 				    void *data)
11383 {
11384 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
11385 	struct hclge_desc *desc = desc_src;
11386 	u32 *reg = data;
11387 
11388 	entries_per_desc = ARRAY_SIZE(desc->data);
11389 	reg_num = entries_per_desc * bd_num;
11390 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
11391 	for (i = 0; i < reg_num; i++) {
11392 		index = i % entries_per_desc;
11393 		desc_index = i / entries_per_desc;
11394 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
11395 	}
11396 	for (i = 0; i < separator_num; i++)
11397 		*reg++ = SEPARATOR_VALUE;
11398 
11399 	return reg_num + separator_num;
11400 }
11401 
11402 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
11403 {
11404 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11405 	int data_len_per_desc, bd_num, i;
11406 	int bd_num_list[BD_LIST_MAX_NUM];
11407 	u32 data_len;
11408 	int ret;
11409 
11410 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11411 	if (ret) {
11412 		dev_err(&hdev->pdev->dev,
11413 			"Get dfx reg bd num fail, status is %d.\n", ret);
11414 		return ret;
11415 	}
11416 
11417 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
11418 	*len = 0;
11419 	for (i = 0; i < dfx_reg_type_num; i++) {
11420 		bd_num = bd_num_list[i];
11421 		data_len = data_len_per_desc * bd_num;
11422 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
11423 	}
11424 
11425 	return ret;
11426 }
11427 
11428 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
11429 {
11430 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
11431 	int bd_num, bd_num_max, buf_len, i;
11432 	int bd_num_list[BD_LIST_MAX_NUM];
11433 	struct hclge_desc *desc_src;
11434 	u32 *reg = data;
11435 	int ret;
11436 
11437 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
11438 	if (ret) {
11439 		dev_err(&hdev->pdev->dev,
11440 			"Get dfx reg bd num fail, status is %d.\n", ret);
11441 		return ret;
11442 	}
11443 
11444 	bd_num_max = bd_num_list[0];
11445 	for (i = 1; i < dfx_reg_type_num; i++)
11446 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
11447 
11448 	buf_len = sizeof(*desc_src) * bd_num_max;
11449 	desc_src = kzalloc(buf_len, GFP_KERNEL);
11450 	if (!desc_src)
11451 		return -ENOMEM;
11452 
11453 	for (i = 0; i < dfx_reg_type_num; i++) {
11454 		bd_num = bd_num_list[i];
11455 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
11456 					     hclge_dfx_reg_opcode_list[i]);
11457 		if (ret) {
11458 			dev_err(&hdev->pdev->dev,
11459 				"Get dfx reg fail, status is %d.\n", ret);
11460 			break;
11461 		}
11462 
11463 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
11464 	}
11465 
11466 	kfree(desc_src);
11467 	return ret;
11468 }
11469 
11470 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
11471 			      struct hnae3_knic_private_info *kinfo)
11472 {
11473 #define HCLGE_RING_REG_OFFSET		0x200
11474 #define HCLGE_RING_INT_REG_OFFSET	0x4
11475 
11476 	int i, j, reg_num, separator_num;
11477 	int data_num_sum;
11478 	u32 *reg = data;
11479 
11480 	/* fetching per-PF registers valus from PF PCIe register space */
11481 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
11482 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11483 	for (i = 0; i < reg_num; i++)
11484 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
11485 	for (i = 0; i < separator_num; i++)
11486 		*reg++ = SEPARATOR_VALUE;
11487 	data_num_sum = reg_num + separator_num;
11488 
11489 	reg_num = ARRAY_SIZE(common_reg_addr_list);
11490 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11491 	for (i = 0; i < reg_num; i++)
11492 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
11493 	for (i = 0; i < separator_num; i++)
11494 		*reg++ = SEPARATOR_VALUE;
11495 	data_num_sum += reg_num + separator_num;
11496 
11497 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
11498 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11499 	for (j = 0; j < kinfo->num_tqps; j++) {
11500 		for (i = 0; i < reg_num; i++)
11501 			*reg++ = hclge_read_dev(&hdev->hw,
11502 						ring_reg_addr_list[i] +
11503 						HCLGE_RING_REG_OFFSET * j);
11504 		for (i = 0; i < separator_num; i++)
11505 			*reg++ = SEPARATOR_VALUE;
11506 	}
11507 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
11508 
11509 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
11510 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11511 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
11512 		for (i = 0; i < reg_num; i++)
11513 			*reg++ = hclge_read_dev(&hdev->hw,
11514 						tqp_intr_reg_addr_list[i] +
11515 						HCLGE_RING_INT_REG_OFFSET * j);
11516 		for (i = 0; i < separator_num; i++)
11517 			*reg++ = SEPARATOR_VALUE;
11518 	}
11519 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
11520 
11521 	return data_num_sum;
11522 }
11523 
11524 static int hclge_get_regs_len(struct hnae3_handle *handle)
11525 {
11526 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
11527 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11528 	struct hclge_vport *vport = hclge_get_vport(handle);
11529 	struct hclge_dev *hdev = vport->back;
11530 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
11531 	int regs_lines_32_bit, regs_lines_64_bit;
11532 	int ret;
11533 
11534 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11535 	if (ret) {
11536 		dev_err(&hdev->pdev->dev,
11537 			"Get register number failed, ret = %d.\n", ret);
11538 		return ret;
11539 	}
11540 
11541 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
11542 	if (ret) {
11543 		dev_err(&hdev->pdev->dev,
11544 			"Get dfx reg len failed, ret = %d.\n", ret);
11545 		return ret;
11546 	}
11547 
11548 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
11549 		REG_SEPARATOR_LINE;
11550 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
11551 		REG_SEPARATOR_LINE;
11552 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
11553 		REG_SEPARATOR_LINE;
11554 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
11555 		REG_SEPARATOR_LINE;
11556 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
11557 		REG_SEPARATOR_LINE;
11558 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
11559 		REG_SEPARATOR_LINE;
11560 
11561 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
11562 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
11563 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
11564 }
11565 
11566 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
11567 			   void *data)
11568 {
11569 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
11570 	struct hclge_vport *vport = hclge_get_vport(handle);
11571 	struct hclge_dev *hdev = vport->back;
11572 	u32 regs_num_32_bit, regs_num_64_bit;
11573 	int i, reg_num, separator_num, ret;
11574 	u32 *reg = data;
11575 
11576 	*version = hdev->fw_version;
11577 
11578 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
11579 	if (ret) {
11580 		dev_err(&hdev->pdev->dev,
11581 			"Get register number failed, ret = %d.\n", ret);
11582 		return;
11583 	}
11584 
11585 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
11586 
11587 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
11588 	if (ret) {
11589 		dev_err(&hdev->pdev->dev,
11590 			"Get 32 bit register failed, ret = %d.\n", ret);
11591 		return;
11592 	}
11593 	reg_num = regs_num_32_bit;
11594 	reg += reg_num;
11595 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11596 	for (i = 0; i < separator_num; i++)
11597 		*reg++ = SEPARATOR_VALUE;
11598 
11599 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11600 	if (ret) {
11601 		dev_err(&hdev->pdev->dev,
11602 			"Get 64 bit register failed, ret = %d.\n", ret);
11603 		return;
11604 	}
11605 	reg_num = regs_num_64_bit * 2;
11606 	reg += reg_num;
11607 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11608 	for (i = 0; i < separator_num; i++)
11609 		*reg++ = SEPARATOR_VALUE;
11610 
11611 	ret = hclge_get_dfx_reg(hdev, reg);
11612 	if (ret)
11613 		dev_err(&hdev->pdev->dev,
11614 			"Get dfx register failed, ret = %d.\n", ret);
11615 }
11616 
11617 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11618 {
11619 	struct hclge_set_led_state_cmd *req;
11620 	struct hclge_desc desc;
11621 	int ret;
11622 
11623 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11624 
11625 	req = (struct hclge_set_led_state_cmd *)desc.data;
11626 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11627 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11628 
11629 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11630 	if (ret)
11631 		dev_err(&hdev->pdev->dev,
11632 			"Send set led state cmd error, ret =%d\n", ret);
11633 
11634 	return ret;
11635 }
11636 
11637 enum hclge_led_status {
11638 	HCLGE_LED_OFF,
11639 	HCLGE_LED_ON,
11640 	HCLGE_LED_NO_CHANGE = 0xFF,
11641 };
11642 
11643 static int hclge_set_led_id(struct hnae3_handle *handle,
11644 			    enum ethtool_phys_id_state status)
11645 {
11646 	struct hclge_vport *vport = hclge_get_vport(handle);
11647 	struct hclge_dev *hdev = vport->back;
11648 
11649 	switch (status) {
11650 	case ETHTOOL_ID_ACTIVE:
11651 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
11652 	case ETHTOOL_ID_INACTIVE:
11653 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11654 	default:
11655 		return -EINVAL;
11656 	}
11657 }
11658 
11659 static void hclge_get_link_mode(struct hnae3_handle *handle,
11660 				unsigned long *supported,
11661 				unsigned long *advertising)
11662 {
11663 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11664 	struct hclge_vport *vport = hclge_get_vport(handle);
11665 	struct hclge_dev *hdev = vport->back;
11666 	unsigned int idx = 0;
11667 
11668 	for (; idx < size; idx++) {
11669 		supported[idx] = hdev->hw.mac.supported[idx];
11670 		advertising[idx] = hdev->hw.mac.advertising[idx];
11671 	}
11672 }
11673 
11674 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11675 {
11676 	struct hclge_vport *vport = hclge_get_vport(handle);
11677 	struct hclge_dev *hdev = vport->back;
11678 
11679 	return hclge_config_gro(hdev, enable);
11680 }
11681 
11682 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11683 {
11684 	struct hclge_vport *vport = &hdev->vport[0];
11685 	struct hnae3_handle *handle = &vport->nic;
11686 	u8 tmp_flags;
11687 	int ret;
11688 
11689 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11690 		set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11691 		vport->last_promisc_flags = vport->overflow_promisc_flags;
11692 	}
11693 
11694 	if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11695 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11696 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11697 					     tmp_flags & HNAE3_MPE);
11698 		if (!ret) {
11699 			clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11700 			hclge_enable_vlan_filter(handle,
11701 						 tmp_flags & HNAE3_VLAN_FLTR);
11702 		}
11703 	}
11704 }
11705 
11706 static bool hclge_module_existed(struct hclge_dev *hdev)
11707 {
11708 	struct hclge_desc desc;
11709 	u32 existed;
11710 	int ret;
11711 
11712 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11713 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11714 	if (ret) {
11715 		dev_err(&hdev->pdev->dev,
11716 			"failed to get SFP exist state, ret = %d\n", ret);
11717 		return false;
11718 	}
11719 
11720 	existed = le32_to_cpu(desc.data[0]);
11721 
11722 	return existed != 0;
11723 }
11724 
11725 /* need 6 bds(total 140 bytes) in one reading
11726  * return the number of bytes actually read, 0 means read failed.
11727  */
11728 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11729 				     u32 len, u8 *data)
11730 {
11731 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11732 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11733 	u16 read_len;
11734 	u16 copy_len;
11735 	int ret;
11736 	int i;
11737 
11738 	/* setup all 6 bds to read module eeprom info. */
11739 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11740 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11741 					   true);
11742 
11743 		/* bd0~bd4 need next flag */
11744 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11745 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11746 	}
11747 
11748 	/* setup bd0, this bd contains offset and read length. */
11749 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11750 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11751 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11752 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
11753 
11754 	ret = hclge_cmd_send(&hdev->hw, desc, i);
11755 	if (ret) {
11756 		dev_err(&hdev->pdev->dev,
11757 			"failed to get SFP eeprom info, ret = %d\n", ret);
11758 		return 0;
11759 	}
11760 
11761 	/* copy sfp info from bd0 to out buffer. */
11762 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11763 	memcpy(data, sfp_info_bd0->data, copy_len);
11764 	read_len = copy_len;
11765 
11766 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
11767 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11768 		if (read_len >= len)
11769 			return read_len;
11770 
11771 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11772 		memcpy(data + read_len, desc[i].data, copy_len);
11773 		read_len += copy_len;
11774 	}
11775 
11776 	return read_len;
11777 }
11778 
11779 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11780 				   u32 len, u8 *data)
11781 {
11782 	struct hclge_vport *vport = hclge_get_vport(handle);
11783 	struct hclge_dev *hdev = vport->back;
11784 	u32 read_len = 0;
11785 	u16 data_len;
11786 
11787 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11788 		return -EOPNOTSUPP;
11789 
11790 	if (!hclge_module_existed(hdev))
11791 		return -ENXIO;
11792 
11793 	while (read_len < len) {
11794 		data_len = hclge_get_sfp_eeprom_info(hdev,
11795 						     offset + read_len,
11796 						     len - read_len,
11797 						     data + read_len);
11798 		if (!data_len)
11799 			return -EIO;
11800 
11801 		read_len += data_len;
11802 	}
11803 
11804 	return 0;
11805 }
11806 
11807 static const struct hnae3_ae_ops hclge_ops = {
11808 	.init_ae_dev = hclge_init_ae_dev,
11809 	.uninit_ae_dev = hclge_uninit_ae_dev,
11810 	.flr_prepare = hclge_flr_prepare,
11811 	.flr_done = hclge_flr_done,
11812 	.init_client_instance = hclge_init_client_instance,
11813 	.uninit_client_instance = hclge_uninit_client_instance,
11814 	.map_ring_to_vector = hclge_map_ring_to_vector,
11815 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11816 	.get_vector = hclge_get_vector,
11817 	.put_vector = hclge_put_vector,
11818 	.set_promisc_mode = hclge_set_promisc_mode,
11819 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
11820 	.set_loopback = hclge_set_loopback,
11821 	.start = hclge_ae_start,
11822 	.stop = hclge_ae_stop,
11823 	.client_start = hclge_client_start,
11824 	.client_stop = hclge_client_stop,
11825 	.get_status = hclge_get_status,
11826 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
11827 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11828 	.get_media_type = hclge_get_media_type,
11829 	.check_port_speed = hclge_check_port_speed,
11830 	.get_fec = hclge_get_fec,
11831 	.set_fec = hclge_set_fec,
11832 	.get_rss_key_size = hclge_get_rss_key_size,
11833 	.get_rss = hclge_get_rss,
11834 	.set_rss = hclge_set_rss,
11835 	.set_rss_tuple = hclge_set_rss_tuple,
11836 	.get_rss_tuple = hclge_get_rss_tuple,
11837 	.get_tc_size = hclge_get_tc_size,
11838 	.get_mac_addr = hclge_get_mac_addr,
11839 	.set_mac_addr = hclge_set_mac_addr,
11840 	.do_ioctl = hclge_do_ioctl,
11841 	.add_uc_addr = hclge_add_uc_addr,
11842 	.rm_uc_addr = hclge_rm_uc_addr,
11843 	.add_mc_addr = hclge_add_mc_addr,
11844 	.rm_mc_addr = hclge_rm_mc_addr,
11845 	.set_autoneg = hclge_set_autoneg,
11846 	.get_autoneg = hclge_get_autoneg,
11847 	.restart_autoneg = hclge_restart_autoneg,
11848 	.halt_autoneg = hclge_halt_autoneg,
11849 	.get_pauseparam = hclge_get_pauseparam,
11850 	.set_pauseparam = hclge_set_pauseparam,
11851 	.set_mtu = hclge_set_mtu,
11852 	.reset_queue = hclge_reset_tqp,
11853 	.get_stats = hclge_get_stats,
11854 	.get_mac_stats = hclge_get_mac_stat,
11855 	.update_stats = hclge_update_stats,
11856 	.get_strings = hclge_get_strings,
11857 	.get_sset_count = hclge_get_sset_count,
11858 	.get_fw_version = hclge_get_fw_version,
11859 	.get_mdix_mode = hclge_get_mdix_mode,
11860 	.enable_vlan_filter = hclge_enable_vlan_filter,
11861 	.set_vlan_filter = hclge_set_vlan_filter,
11862 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11863 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11864 	.reset_event = hclge_reset_event,
11865 	.get_reset_level = hclge_get_reset_level,
11866 	.set_default_reset_request = hclge_set_def_reset_request,
11867 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11868 	.set_channels = hclge_set_channels,
11869 	.get_channels = hclge_get_channels,
11870 	.get_regs_len = hclge_get_regs_len,
11871 	.get_regs = hclge_get_regs,
11872 	.set_led_id = hclge_set_led_id,
11873 	.get_link_mode = hclge_get_link_mode,
11874 	.add_fd_entry = hclge_add_fd_entry,
11875 	.del_fd_entry = hclge_del_fd_entry,
11876 	.del_all_fd_entries = hclge_del_all_fd_entries,
11877 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11878 	.get_fd_rule_info = hclge_get_fd_rule_info,
11879 	.get_fd_all_rules = hclge_get_all_rules,
11880 	.enable_fd = hclge_enable_fd,
11881 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
11882 	.dbg_run_cmd = hclge_dbg_run_cmd,
11883 	.dbg_read_cmd = hclge_dbg_read_cmd,
11884 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
11885 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
11886 	.ae_dev_resetting = hclge_ae_dev_resetting,
11887 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11888 	.set_gro_en = hclge_gro_en,
11889 	.get_global_queue_id = hclge_covert_handle_qid_global,
11890 	.set_timer_task = hclge_set_timer_task,
11891 	.mac_connect_phy = hclge_mac_connect_phy,
11892 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
11893 	.get_vf_config = hclge_get_vf_config,
11894 	.set_vf_link_state = hclge_set_vf_link_state,
11895 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
11896 	.set_vf_trust = hclge_set_vf_trust,
11897 	.set_vf_rate = hclge_set_vf_rate,
11898 	.set_vf_mac = hclge_set_vf_mac,
11899 	.get_module_eeprom = hclge_get_module_eeprom,
11900 	.get_cmdq_stat = hclge_get_cmdq_stat,
11901 	.add_cls_flower = hclge_add_cls_flower,
11902 	.del_cls_flower = hclge_del_cls_flower,
11903 	.cls_flower_active = hclge_is_cls_flower_active,
11904 };
11905 
11906 static struct hnae3_ae_algo ae_algo = {
11907 	.ops = &hclge_ops,
11908 	.pdev_id_table = ae_algo_pci_tbl,
11909 };
11910 
11911 static int hclge_init(void)
11912 {
11913 	pr_info("%s is initializing\n", HCLGE_NAME);
11914 
11915 	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11916 	if (!hclge_wq) {
11917 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11918 		return -ENOMEM;
11919 	}
11920 
11921 	hnae3_register_ae_algo(&ae_algo);
11922 
11923 	return 0;
11924 }
11925 
11926 static void hclge_exit(void)
11927 {
11928 	hnae3_unregister_ae_algo(&ae_algo);
11929 	destroy_workqueue(hclge_wq);
11930 }
11931 module_init(hclge_init);
11932 module_exit(hclge_exit);
11933 
11934 MODULE_LICENSE("GPL");
11935 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11936 MODULE_DESCRIPTION("HCLGE Driver");
11937 MODULE_VERSION(HCLGE_MOD_VERSION);
11938