1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26 
27 #define HCLGE_NAME			"hclge"
28 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
29 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 
31 #define HCLGE_BUF_SIZE_UNIT	256U
32 #define HCLGE_BUF_MUL_BY	2
33 #define HCLGE_BUF_DIV_BY	2
34 #define NEED_RESERVE_TC_NUM	2
35 #define BUF_MAX_PERCENT		100
36 #define BUF_RESERVE_PERCENT	90
37 
38 #define HCLGE_RESET_MAX_FAIL_CNT	5
39 #define HCLGE_RESET_SYNC_TIME		100
40 #define HCLGE_PF_RESET_SYNC_TIME	20
41 #define HCLGE_PF_RESET_SYNC_CNT		1500
42 
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET        1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
47 #define HCLGE_DFX_IGU_BD_OFFSET         4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
50 #define HCLGE_DFX_NCSI_BD_OFFSET        7
51 #define HCLGE_DFX_RTC_BD_OFFSET         8
52 #define HCLGE_DFX_PPP_BD_OFFSET         9
53 #define HCLGE_DFX_RCB_BD_OFFSET         10
54 #define HCLGE_DFX_TQP_BD_OFFSET         11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
56 
57 #define HCLGE_LINK_STATUS_MS	10
58 
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
67 						   unsigned long *addr);
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
69 
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73 static void hclge_sync_fd_table(struct hclge_dev *hdev);
74 
75 static struct hnae3_ae_algo ae_algo;
76 
77 static struct workqueue_struct *hclge_wq;
78 
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
88 	/* required last entry */
89 	{0, }
90 };
91 
92 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
93 
94 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
95 					 HCLGE_CMDQ_TX_ADDR_H_REG,
96 					 HCLGE_CMDQ_TX_DEPTH_REG,
97 					 HCLGE_CMDQ_TX_TAIL_REG,
98 					 HCLGE_CMDQ_TX_HEAD_REG,
99 					 HCLGE_CMDQ_RX_ADDR_L_REG,
100 					 HCLGE_CMDQ_RX_ADDR_H_REG,
101 					 HCLGE_CMDQ_RX_DEPTH_REG,
102 					 HCLGE_CMDQ_RX_TAIL_REG,
103 					 HCLGE_CMDQ_RX_HEAD_REG,
104 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
105 					 HCLGE_CMDQ_INTR_STS_REG,
106 					 HCLGE_CMDQ_INTR_EN_REG,
107 					 HCLGE_CMDQ_INTR_GEN_REG};
108 
109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
110 					   HCLGE_VECTOR0_OTER_EN_REG,
111 					   HCLGE_MISC_RESET_STS_REG,
112 					   HCLGE_MISC_VECTOR_INT_STS,
113 					   HCLGE_GLOBAL_RESET_REG,
114 					   HCLGE_FUN_RST_ING,
115 					   HCLGE_GRO_EN_REG};
116 
117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
118 					 HCLGE_RING_RX_ADDR_H_REG,
119 					 HCLGE_RING_RX_BD_NUM_REG,
120 					 HCLGE_RING_RX_BD_LENGTH_REG,
121 					 HCLGE_RING_RX_MERGE_EN_REG,
122 					 HCLGE_RING_RX_TAIL_REG,
123 					 HCLGE_RING_RX_HEAD_REG,
124 					 HCLGE_RING_RX_FBD_NUM_REG,
125 					 HCLGE_RING_RX_OFFSET_REG,
126 					 HCLGE_RING_RX_FBD_OFFSET_REG,
127 					 HCLGE_RING_RX_STASH_REG,
128 					 HCLGE_RING_RX_BD_ERR_REG,
129 					 HCLGE_RING_TX_ADDR_L_REG,
130 					 HCLGE_RING_TX_ADDR_H_REG,
131 					 HCLGE_RING_TX_BD_NUM_REG,
132 					 HCLGE_RING_TX_PRIORITY_REG,
133 					 HCLGE_RING_TX_TC_REG,
134 					 HCLGE_RING_TX_MERGE_EN_REG,
135 					 HCLGE_RING_TX_TAIL_REG,
136 					 HCLGE_RING_TX_HEAD_REG,
137 					 HCLGE_RING_TX_FBD_NUM_REG,
138 					 HCLGE_RING_TX_OFFSET_REG,
139 					 HCLGE_RING_TX_EBD_NUM_REG,
140 					 HCLGE_RING_TX_EBD_OFFSET_REG,
141 					 HCLGE_RING_TX_BD_ERR_REG,
142 					 HCLGE_RING_EN_REG};
143 
144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
145 					     HCLGE_TQP_INTR_GL0_REG,
146 					     HCLGE_TQP_INTR_GL1_REG,
147 					     HCLGE_TQP_INTR_GL2_REG,
148 					     HCLGE_TQP_INTR_RL_REG};
149 
150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
151 	"App    Loopback test",
152 	"Serdes serial Loopback test",
153 	"Serdes parallel Loopback test",
154 	"Phy    Loopback test"
155 };
156 
157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
158 	{"mac_tx_mac_pause_num",
159 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
160 	{"mac_rx_mac_pause_num",
161 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
162 	{"mac_tx_control_pkt_num",
163 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
164 	{"mac_rx_control_pkt_num",
165 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
166 	{"mac_tx_pfc_pkt_num",
167 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
168 	{"mac_tx_pfc_pri0_pkt_num",
169 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
170 	{"mac_tx_pfc_pri1_pkt_num",
171 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
172 	{"mac_tx_pfc_pri2_pkt_num",
173 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
174 	{"mac_tx_pfc_pri3_pkt_num",
175 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
176 	{"mac_tx_pfc_pri4_pkt_num",
177 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
178 	{"mac_tx_pfc_pri5_pkt_num",
179 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
180 	{"mac_tx_pfc_pri6_pkt_num",
181 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
182 	{"mac_tx_pfc_pri7_pkt_num",
183 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
184 	{"mac_rx_pfc_pkt_num",
185 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
186 	{"mac_rx_pfc_pri0_pkt_num",
187 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
188 	{"mac_rx_pfc_pri1_pkt_num",
189 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
190 	{"mac_rx_pfc_pri2_pkt_num",
191 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
192 	{"mac_rx_pfc_pri3_pkt_num",
193 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
194 	{"mac_rx_pfc_pri4_pkt_num",
195 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
196 	{"mac_rx_pfc_pri5_pkt_num",
197 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
198 	{"mac_rx_pfc_pri6_pkt_num",
199 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
200 	{"mac_rx_pfc_pri7_pkt_num",
201 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
202 	{"mac_tx_total_pkt_num",
203 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
204 	{"mac_tx_total_oct_num",
205 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
206 	{"mac_tx_good_pkt_num",
207 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
208 	{"mac_tx_bad_pkt_num",
209 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
210 	{"mac_tx_good_oct_num",
211 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
212 	{"mac_tx_bad_oct_num",
213 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
214 	{"mac_tx_uni_pkt_num",
215 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
216 	{"mac_tx_multi_pkt_num",
217 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
218 	{"mac_tx_broad_pkt_num",
219 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
220 	{"mac_tx_undersize_pkt_num",
221 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
222 	{"mac_tx_oversize_pkt_num",
223 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
224 	{"mac_tx_64_oct_pkt_num",
225 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
226 	{"mac_tx_65_127_oct_pkt_num",
227 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
228 	{"mac_tx_128_255_oct_pkt_num",
229 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
230 	{"mac_tx_256_511_oct_pkt_num",
231 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
232 	{"mac_tx_512_1023_oct_pkt_num",
233 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
234 	{"mac_tx_1024_1518_oct_pkt_num",
235 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
236 	{"mac_tx_1519_2047_oct_pkt_num",
237 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
238 	{"mac_tx_2048_4095_oct_pkt_num",
239 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
240 	{"mac_tx_4096_8191_oct_pkt_num",
241 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
242 	{"mac_tx_8192_9216_oct_pkt_num",
243 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
244 	{"mac_tx_9217_12287_oct_pkt_num",
245 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
246 	{"mac_tx_12288_16383_oct_pkt_num",
247 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
248 	{"mac_tx_1519_max_good_pkt_num",
249 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
250 	{"mac_tx_1519_max_bad_pkt_num",
251 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
252 	{"mac_rx_total_pkt_num",
253 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
254 	{"mac_rx_total_oct_num",
255 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
256 	{"mac_rx_good_pkt_num",
257 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
258 	{"mac_rx_bad_pkt_num",
259 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
260 	{"mac_rx_good_oct_num",
261 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
262 	{"mac_rx_bad_oct_num",
263 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
264 	{"mac_rx_uni_pkt_num",
265 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
266 	{"mac_rx_multi_pkt_num",
267 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
268 	{"mac_rx_broad_pkt_num",
269 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
270 	{"mac_rx_undersize_pkt_num",
271 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
272 	{"mac_rx_oversize_pkt_num",
273 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
274 	{"mac_rx_64_oct_pkt_num",
275 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
276 	{"mac_rx_65_127_oct_pkt_num",
277 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
278 	{"mac_rx_128_255_oct_pkt_num",
279 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
280 	{"mac_rx_256_511_oct_pkt_num",
281 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
282 	{"mac_rx_512_1023_oct_pkt_num",
283 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
284 	{"mac_rx_1024_1518_oct_pkt_num",
285 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
286 	{"mac_rx_1519_2047_oct_pkt_num",
287 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
288 	{"mac_rx_2048_4095_oct_pkt_num",
289 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
290 	{"mac_rx_4096_8191_oct_pkt_num",
291 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
292 	{"mac_rx_8192_9216_oct_pkt_num",
293 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
294 	{"mac_rx_9217_12287_oct_pkt_num",
295 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
296 	{"mac_rx_12288_16383_oct_pkt_num",
297 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
298 	{"mac_rx_1519_max_good_pkt_num",
299 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
300 	{"mac_rx_1519_max_bad_pkt_num",
301 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
302 
303 	{"mac_tx_fragment_pkt_num",
304 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
305 	{"mac_tx_undermin_pkt_num",
306 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
307 	{"mac_tx_jabber_pkt_num",
308 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
309 	{"mac_tx_err_all_pkt_num",
310 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
311 	{"mac_tx_from_app_good_pkt_num",
312 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
313 	{"mac_tx_from_app_bad_pkt_num",
314 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
315 	{"mac_rx_fragment_pkt_num",
316 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
317 	{"mac_rx_undermin_pkt_num",
318 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
319 	{"mac_rx_jabber_pkt_num",
320 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
321 	{"mac_rx_fcs_err_pkt_num",
322 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
323 	{"mac_rx_send_app_good_pkt_num",
324 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
325 	{"mac_rx_send_app_bad_pkt_num",
326 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
327 };
328 
329 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
330 	{
331 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
332 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
333 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
334 		.i_port_bitmap = 0x1,
335 	},
336 };
337 
338 static const u8 hclge_hash_key[] = {
339 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
340 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
341 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
342 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
343 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
344 };
345 
346 static const u32 hclge_dfx_bd_offset_list[] = {
347 	HCLGE_DFX_BIOS_BD_OFFSET,
348 	HCLGE_DFX_SSU_0_BD_OFFSET,
349 	HCLGE_DFX_SSU_1_BD_OFFSET,
350 	HCLGE_DFX_IGU_BD_OFFSET,
351 	HCLGE_DFX_RPU_0_BD_OFFSET,
352 	HCLGE_DFX_RPU_1_BD_OFFSET,
353 	HCLGE_DFX_NCSI_BD_OFFSET,
354 	HCLGE_DFX_RTC_BD_OFFSET,
355 	HCLGE_DFX_PPP_BD_OFFSET,
356 	HCLGE_DFX_RCB_BD_OFFSET,
357 	HCLGE_DFX_TQP_BD_OFFSET,
358 	HCLGE_DFX_SSU_2_BD_OFFSET
359 };
360 
361 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
362 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
363 	HCLGE_OPC_DFX_SSU_REG_0,
364 	HCLGE_OPC_DFX_SSU_REG_1,
365 	HCLGE_OPC_DFX_IGU_EGU_REG,
366 	HCLGE_OPC_DFX_RPU_REG_0,
367 	HCLGE_OPC_DFX_RPU_REG_1,
368 	HCLGE_OPC_DFX_NCSI_REG,
369 	HCLGE_OPC_DFX_RTC_REG,
370 	HCLGE_OPC_DFX_PPP_REG,
371 	HCLGE_OPC_DFX_RCB_REG,
372 	HCLGE_OPC_DFX_TQP_REG,
373 	HCLGE_OPC_DFX_SSU_REG_2
374 };
375 
376 static const struct key_info meta_data_key_info[] = {
377 	{ PACKET_TYPE_ID, 6},
378 	{ IP_FRAGEMENT, 1},
379 	{ ROCE_TYPE, 1},
380 	{ NEXT_KEY, 5},
381 	{ VLAN_NUMBER, 2},
382 	{ SRC_VPORT, 12},
383 	{ DST_VPORT, 12},
384 	{ TUNNEL_PACKET, 1},
385 };
386 
387 static const struct key_info tuple_key_info[] = {
388 	{ OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
389 	{ OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
390 	{ OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
391 	{ OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
392 	{ OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
393 	{ OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
394 	{ OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
395 	{ OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
396 	{ OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
397 	{ OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
398 	{ OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
399 	{ OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
400 	{ OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
401 	{ OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
402 	{ OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
403 	{ OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
404 	{ INNER_DST_MAC, 48, KEY_OPT_MAC,
405 	  offsetof(struct hclge_fd_rule, tuples.dst_mac),
406 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
407 	{ INNER_SRC_MAC, 48, KEY_OPT_MAC,
408 	  offsetof(struct hclge_fd_rule, tuples.src_mac),
409 	  offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
410 	{ INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
411 	  offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
412 	  offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
413 	{ INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
414 	{ INNER_ETH_TYPE, 16, KEY_OPT_LE16,
415 	  offsetof(struct hclge_fd_rule, tuples.ether_proto),
416 	  offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
417 	{ INNER_L2_RSV, 16, KEY_OPT_LE16,
418 	  offsetof(struct hclge_fd_rule, tuples.l2_user_def),
419 	  offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
420 	{ INNER_IP_TOS, 8, KEY_OPT_U8,
421 	  offsetof(struct hclge_fd_rule, tuples.ip_tos),
422 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
423 	{ INNER_IP_PROTO, 8, KEY_OPT_U8,
424 	  offsetof(struct hclge_fd_rule, tuples.ip_proto),
425 	  offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
426 	{ INNER_SRC_IP, 32, KEY_OPT_IP,
427 	  offsetof(struct hclge_fd_rule, tuples.src_ip),
428 	  offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
429 	{ INNER_DST_IP, 32, KEY_OPT_IP,
430 	  offsetof(struct hclge_fd_rule, tuples.dst_ip),
431 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
432 	{ INNER_L3_RSV, 16, KEY_OPT_LE16,
433 	  offsetof(struct hclge_fd_rule, tuples.l3_user_def),
434 	  offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
435 	{ INNER_SRC_PORT, 16, KEY_OPT_LE16,
436 	  offsetof(struct hclge_fd_rule, tuples.src_port),
437 	  offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
438 	{ INNER_DST_PORT, 16, KEY_OPT_LE16,
439 	  offsetof(struct hclge_fd_rule, tuples.dst_port),
440 	  offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
441 	{ INNER_L4_RSV, 32, KEY_OPT_LE32,
442 	  offsetof(struct hclge_fd_rule, tuples.l4_user_def),
443 	  offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
444 };
445 
446 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
447 {
448 #define HCLGE_MAC_CMD_NUM 21
449 
450 	u64 *data = (u64 *)(&hdev->mac_stats);
451 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
452 	__le64 *desc_data;
453 	int i, k, n;
454 	int ret;
455 
456 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
457 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
458 	if (ret) {
459 		dev_err(&hdev->pdev->dev,
460 			"Get MAC pkt stats fail, status = %d.\n", ret);
461 
462 		return ret;
463 	}
464 
465 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
466 		/* for special opcode 0032, only the first desc has the head */
467 		if (unlikely(i == 0)) {
468 			desc_data = (__le64 *)(&desc[i].data[0]);
469 			n = HCLGE_RD_FIRST_STATS_NUM;
470 		} else {
471 			desc_data = (__le64 *)(&desc[i]);
472 			n = HCLGE_RD_OTHER_STATS_NUM;
473 		}
474 
475 		for (k = 0; k < n; k++) {
476 			*data += le64_to_cpu(*desc_data);
477 			data++;
478 			desc_data++;
479 		}
480 	}
481 
482 	return 0;
483 }
484 
485 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
486 {
487 	u64 *data = (u64 *)(&hdev->mac_stats);
488 	struct hclge_desc *desc;
489 	__le64 *desc_data;
490 	u16 i, k, n;
491 	int ret;
492 
493 	/* This may be called inside atomic sections,
494 	 * so GFP_ATOMIC is more suitalbe here
495 	 */
496 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
497 	if (!desc)
498 		return -ENOMEM;
499 
500 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
501 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
502 	if (ret) {
503 		kfree(desc);
504 		return ret;
505 	}
506 
507 	for (i = 0; i < desc_num; i++) {
508 		/* for special opcode 0034, only the first desc has the head */
509 		if (i == 0) {
510 			desc_data = (__le64 *)(&desc[i].data[0]);
511 			n = HCLGE_RD_FIRST_STATS_NUM;
512 		} else {
513 			desc_data = (__le64 *)(&desc[i]);
514 			n = HCLGE_RD_OTHER_STATS_NUM;
515 		}
516 
517 		for (k = 0; k < n; k++) {
518 			*data += le64_to_cpu(*desc_data);
519 			data++;
520 			desc_data++;
521 		}
522 	}
523 
524 	kfree(desc);
525 
526 	return 0;
527 }
528 
529 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
530 {
531 	struct hclge_desc desc;
532 	__le32 *desc_data;
533 	u32 reg_num;
534 	int ret;
535 
536 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
537 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
538 	if (ret)
539 		return ret;
540 
541 	desc_data = (__le32 *)(&desc.data[0]);
542 	reg_num = le32_to_cpu(*desc_data);
543 
544 	*desc_num = 1 + ((reg_num - 3) >> 2) +
545 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
546 
547 	return 0;
548 }
549 
550 static int hclge_mac_update_stats(struct hclge_dev *hdev)
551 {
552 	u32 desc_num;
553 	int ret;
554 
555 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
556 	/* The firmware supports the new statistics acquisition method */
557 	if (!ret)
558 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
559 	else if (ret == -EOPNOTSUPP)
560 		ret = hclge_mac_update_stats_defective(hdev);
561 	else
562 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
563 
564 	return ret;
565 }
566 
567 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
568 {
569 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
570 	struct hclge_vport *vport = hclge_get_vport(handle);
571 	struct hclge_dev *hdev = vport->back;
572 	struct hnae3_queue *queue;
573 	struct hclge_desc desc[1];
574 	struct hclge_tqp *tqp;
575 	int ret, i;
576 
577 	for (i = 0; i < kinfo->num_tqps; i++) {
578 		queue = handle->kinfo.tqp[i];
579 		tqp = container_of(queue, struct hclge_tqp, q);
580 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
581 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
582 					   true);
583 
584 		desc[0].data[0] = cpu_to_le32(tqp->index);
585 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
586 		if (ret) {
587 			dev_err(&hdev->pdev->dev,
588 				"Query tqp stat fail, status = %d,queue = %d\n",
589 				ret, i);
590 			return ret;
591 		}
592 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
593 			le32_to_cpu(desc[0].data[1]);
594 	}
595 
596 	for (i = 0; i < kinfo->num_tqps; i++) {
597 		queue = handle->kinfo.tqp[i];
598 		tqp = container_of(queue, struct hclge_tqp, q);
599 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
600 		hclge_cmd_setup_basic_desc(&desc[0],
601 					   HCLGE_OPC_QUERY_TX_STATS,
602 					   true);
603 
604 		desc[0].data[0] = cpu_to_le32(tqp->index);
605 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
606 		if (ret) {
607 			dev_err(&hdev->pdev->dev,
608 				"Query tqp stat fail, status = %d,queue = %d\n",
609 				ret, i);
610 			return ret;
611 		}
612 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
613 			le32_to_cpu(desc[0].data[1]);
614 	}
615 
616 	return 0;
617 }
618 
619 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
620 {
621 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622 	struct hclge_tqp *tqp;
623 	u64 *buff = data;
624 	int i;
625 
626 	for (i = 0; i < kinfo->num_tqps; i++) {
627 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
628 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
629 	}
630 
631 	for (i = 0; i < kinfo->num_tqps; i++) {
632 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
633 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
634 	}
635 
636 	return buff;
637 }
638 
639 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
640 {
641 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
642 
643 	/* each tqp has TX & RX two queues */
644 	return kinfo->num_tqps * (2);
645 }
646 
647 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
648 {
649 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
650 	u8 *buff = data;
651 	int i;
652 
653 	for (i = 0; i < kinfo->num_tqps; i++) {
654 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
655 			struct hclge_tqp, q);
656 		snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
657 			 tqp->index);
658 		buff = buff + ETH_GSTRING_LEN;
659 	}
660 
661 	for (i = 0; i < kinfo->num_tqps; i++) {
662 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
663 			struct hclge_tqp, q);
664 		snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
665 			 tqp->index);
666 		buff = buff + ETH_GSTRING_LEN;
667 	}
668 
669 	return buff;
670 }
671 
672 static u64 *hclge_comm_get_stats(const void *comm_stats,
673 				 const struct hclge_comm_stats_str strs[],
674 				 int size, u64 *data)
675 {
676 	u64 *buf = data;
677 	u32 i;
678 
679 	for (i = 0; i < size; i++)
680 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
681 
682 	return buf + size;
683 }
684 
685 static u8 *hclge_comm_get_strings(u32 stringset,
686 				  const struct hclge_comm_stats_str strs[],
687 				  int size, u8 *data)
688 {
689 	char *buff = (char *)data;
690 	u32 i;
691 
692 	if (stringset != ETH_SS_STATS)
693 		return buff;
694 
695 	for (i = 0; i < size; i++) {
696 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
697 		buff = buff + ETH_GSTRING_LEN;
698 	}
699 
700 	return (u8 *)buff;
701 }
702 
703 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
704 {
705 	struct hnae3_handle *handle;
706 	int status;
707 
708 	handle = &hdev->vport[0].nic;
709 	if (handle->client) {
710 		status = hclge_tqps_update_stats(handle);
711 		if (status) {
712 			dev_err(&hdev->pdev->dev,
713 				"Update TQPS stats fail, status = %d.\n",
714 				status);
715 		}
716 	}
717 
718 	status = hclge_mac_update_stats(hdev);
719 	if (status)
720 		dev_err(&hdev->pdev->dev,
721 			"Update MAC stats fail, status = %d.\n", status);
722 }
723 
724 static void hclge_update_stats(struct hnae3_handle *handle,
725 			       struct net_device_stats *net_stats)
726 {
727 	struct hclge_vport *vport = hclge_get_vport(handle);
728 	struct hclge_dev *hdev = vport->back;
729 	int status;
730 
731 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
732 		return;
733 
734 	status = hclge_mac_update_stats(hdev);
735 	if (status)
736 		dev_err(&hdev->pdev->dev,
737 			"Update MAC stats fail, status = %d.\n",
738 			status);
739 
740 	status = hclge_tqps_update_stats(handle);
741 	if (status)
742 		dev_err(&hdev->pdev->dev,
743 			"Update TQPS stats fail, status = %d.\n",
744 			status);
745 
746 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
747 }
748 
749 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
750 {
751 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
752 		HNAE3_SUPPORT_PHY_LOOPBACK |\
753 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
754 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
755 
756 	struct hclge_vport *vport = hclge_get_vport(handle);
757 	struct hclge_dev *hdev = vport->back;
758 	int count = 0;
759 
760 	/* Loopback test support rules:
761 	 * mac: only GE mode support
762 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
763 	 * phy: only support when phy device exist on board
764 	 */
765 	if (stringset == ETH_SS_TEST) {
766 		/* clear loopback bit flags at first */
767 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
768 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
769 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
770 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
771 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
772 			count += 1;
773 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
774 		}
775 
776 		count += 2;
777 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
778 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
779 
780 		if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
781 		     hdev->hw.mac.phydev->drv->set_loopback) ||
782 		    hnae3_dev_phy_imp_supported(hdev)) {
783 			count += 1;
784 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
785 		}
786 	} else if (stringset == ETH_SS_STATS) {
787 		count = ARRAY_SIZE(g_mac_stats_string) +
788 			hclge_tqps_get_sset_count(handle, stringset);
789 	}
790 
791 	return count;
792 }
793 
794 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
795 			      u8 *data)
796 {
797 	u8 *p = (char *)data;
798 	int size;
799 
800 	if (stringset == ETH_SS_STATS) {
801 		size = ARRAY_SIZE(g_mac_stats_string);
802 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
803 					   size, p);
804 		p = hclge_tqps_get_strings(handle, p);
805 	} else if (stringset == ETH_SS_TEST) {
806 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
807 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
808 			       ETH_GSTRING_LEN);
809 			p += ETH_GSTRING_LEN;
810 		}
811 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
812 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
813 			       ETH_GSTRING_LEN);
814 			p += ETH_GSTRING_LEN;
815 		}
816 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
817 			memcpy(p,
818 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
819 			       ETH_GSTRING_LEN);
820 			p += ETH_GSTRING_LEN;
821 		}
822 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
823 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
824 			       ETH_GSTRING_LEN);
825 			p += ETH_GSTRING_LEN;
826 		}
827 	}
828 }
829 
830 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
831 {
832 	struct hclge_vport *vport = hclge_get_vport(handle);
833 	struct hclge_dev *hdev = vport->back;
834 	u64 *p;
835 
836 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
837 				 ARRAY_SIZE(g_mac_stats_string), data);
838 	p = hclge_tqps_get_stats(handle, p);
839 }
840 
841 static void hclge_get_mac_stat(struct hnae3_handle *handle,
842 			       struct hns3_mac_stats *mac_stats)
843 {
844 	struct hclge_vport *vport = hclge_get_vport(handle);
845 	struct hclge_dev *hdev = vport->back;
846 
847 	hclge_update_stats(handle, NULL);
848 
849 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
850 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
851 }
852 
853 static int hclge_parse_func_status(struct hclge_dev *hdev,
854 				   struct hclge_func_status_cmd *status)
855 {
856 #define HCLGE_MAC_ID_MASK	0xF
857 
858 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
859 		return -EINVAL;
860 
861 	/* Set the pf to main pf */
862 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
863 		hdev->flag |= HCLGE_FLAG_MAIN;
864 	else
865 		hdev->flag &= ~HCLGE_FLAG_MAIN;
866 
867 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
868 	return 0;
869 }
870 
871 static int hclge_query_function_status(struct hclge_dev *hdev)
872 {
873 #define HCLGE_QUERY_MAX_CNT	5
874 
875 	struct hclge_func_status_cmd *req;
876 	struct hclge_desc desc;
877 	int timeout = 0;
878 	int ret;
879 
880 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
881 	req = (struct hclge_func_status_cmd *)desc.data;
882 
883 	do {
884 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
885 		if (ret) {
886 			dev_err(&hdev->pdev->dev,
887 				"query function status failed %d.\n", ret);
888 			return ret;
889 		}
890 
891 		/* Check pf reset is done */
892 		if (req->pf_state)
893 			break;
894 		usleep_range(1000, 2000);
895 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
896 
897 	return hclge_parse_func_status(hdev, req);
898 }
899 
900 static int hclge_query_pf_resource(struct hclge_dev *hdev)
901 {
902 	struct hclge_pf_res_cmd *req;
903 	struct hclge_desc desc;
904 	int ret;
905 
906 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
907 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
908 	if (ret) {
909 		dev_err(&hdev->pdev->dev,
910 			"query pf resource failed %d.\n", ret);
911 		return ret;
912 	}
913 
914 	req = (struct hclge_pf_res_cmd *)desc.data;
915 	hdev->num_tqps = le16_to_cpu(req->tqp_num) +
916 			 le16_to_cpu(req->ext_tqp_num);
917 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
918 
919 	if (req->tx_buf_size)
920 		hdev->tx_buf_size =
921 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
922 	else
923 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
924 
925 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
926 
927 	if (req->dv_buf_size)
928 		hdev->dv_buf_size =
929 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
930 	else
931 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
932 
933 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
934 
935 	hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
936 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
937 		dev_err(&hdev->pdev->dev,
938 			"only %u msi resources available, not enough for pf(min:2).\n",
939 			hdev->num_nic_msi);
940 		return -EINVAL;
941 	}
942 
943 	if (hnae3_dev_roce_supported(hdev)) {
944 		hdev->num_roce_msi =
945 			le16_to_cpu(req->pf_intr_vector_number_roce);
946 
947 		/* PF should have NIC vectors and Roce vectors,
948 		 * NIC vectors are queued before Roce vectors.
949 		 */
950 		hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
951 	} else {
952 		hdev->num_msi = hdev->num_nic_msi;
953 	}
954 
955 	return 0;
956 }
957 
958 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
959 {
960 	switch (speed_cmd) {
961 	case 6:
962 		*speed = HCLGE_MAC_SPEED_10M;
963 		break;
964 	case 7:
965 		*speed = HCLGE_MAC_SPEED_100M;
966 		break;
967 	case 0:
968 		*speed = HCLGE_MAC_SPEED_1G;
969 		break;
970 	case 1:
971 		*speed = HCLGE_MAC_SPEED_10G;
972 		break;
973 	case 2:
974 		*speed = HCLGE_MAC_SPEED_25G;
975 		break;
976 	case 3:
977 		*speed = HCLGE_MAC_SPEED_40G;
978 		break;
979 	case 4:
980 		*speed = HCLGE_MAC_SPEED_50G;
981 		break;
982 	case 5:
983 		*speed = HCLGE_MAC_SPEED_100G;
984 		break;
985 	case 8:
986 		*speed = HCLGE_MAC_SPEED_200G;
987 		break;
988 	default:
989 		return -EINVAL;
990 	}
991 
992 	return 0;
993 }
994 
995 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
996 {
997 	struct hclge_vport *vport = hclge_get_vport(handle);
998 	struct hclge_dev *hdev = vport->back;
999 	u32 speed_ability = hdev->hw.mac.speed_ability;
1000 	u32 speed_bit = 0;
1001 
1002 	switch (speed) {
1003 	case HCLGE_MAC_SPEED_10M:
1004 		speed_bit = HCLGE_SUPPORT_10M_BIT;
1005 		break;
1006 	case HCLGE_MAC_SPEED_100M:
1007 		speed_bit = HCLGE_SUPPORT_100M_BIT;
1008 		break;
1009 	case HCLGE_MAC_SPEED_1G:
1010 		speed_bit = HCLGE_SUPPORT_1G_BIT;
1011 		break;
1012 	case HCLGE_MAC_SPEED_10G:
1013 		speed_bit = HCLGE_SUPPORT_10G_BIT;
1014 		break;
1015 	case HCLGE_MAC_SPEED_25G:
1016 		speed_bit = HCLGE_SUPPORT_25G_BIT;
1017 		break;
1018 	case HCLGE_MAC_SPEED_40G:
1019 		speed_bit = HCLGE_SUPPORT_40G_BIT;
1020 		break;
1021 	case HCLGE_MAC_SPEED_50G:
1022 		speed_bit = HCLGE_SUPPORT_50G_BIT;
1023 		break;
1024 	case HCLGE_MAC_SPEED_100G:
1025 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1026 		break;
1027 	case HCLGE_MAC_SPEED_200G:
1028 		speed_bit = HCLGE_SUPPORT_200G_BIT;
1029 		break;
1030 	default:
1031 		return -EINVAL;
1032 	}
1033 
1034 	if (speed_bit & speed_ability)
1035 		return 0;
1036 
1037 	return -EINVAL;
1038 }
1039 
1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1041 {
1042 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1044 				 mac->supported);
1045 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1047 				 mac->supported);
1048 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1050 				 mac->supported);
1051 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1053 				 mac->supported);
1054 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1056 				 mac->supported);
1057 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1059 				 mac->supported);
1060 }
1061 
1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1063 {
1064 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1066 				 mac->supported);
1067 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1069 				 mac->supported);
1070 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1072 				 mac->supported);
1073 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1075 				 mac->supported);
1076 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1078 				 mac->supported);
1079 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1080 		linkmode_set_bit(
1081 			ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1082 			mac->supported);
1083 }
1084 
1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1086 {
1087 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1089 				 mac->supported);
1090 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1092 				 mac->supported);
1093 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1095 				 mac->supported);
1096 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1098 				 mac->supported);
1099 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1101 				 mac->supported);
1102 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1104 				 mac->supported);
1105 }
1106 
1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1108 {
1109 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1111 				 mac->supported);
1112 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1114 				 mac->supported);
1115 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1117 				 mac->supported);
1118 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1120 				 mac->supported);
1121 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1123 				 mac->supported);
1124 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1126 				 mac->supported);
1127 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128 		linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1129 				 mac->supported);
1130 }
1131 
1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1133 {
1134 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1136 
1137 	switch (mac->speed) {
1138 	case HCLGE_MAC_SPEED_10G:
1139 	case HCLGE_MAC_SPEED_40G:
1140 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1141 				 mac->supported);
1142 		mac->fec_ability =
1143 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1144 		break;
1145 	case HCLGE_MAC_SPEED_25G:
1146 	case HCLGE_MAC_SPEED_50G:
1147 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1148 				 mac->supported);
1149 		mac->fec_ability =
1150 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151 			BIT(HNAE3_FEC_AUTO);
1152 		break;
1153 	case HCLGE_MAC_SPEED_100G:
1154 	case HCLGE_MAC_SPEED_200G:
1155 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1157 		break;
1158 	default:
1159 		mac->fec_ability = 0;
1160 		break;
1161 	}
1162 }
1163 
1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1165 					u16 speed_ability)
1166 {
1167 	struct hclge_mac *mac = &hdev->hw.mac;
1168 
1169 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1171 				 mac->supported);
1172 
1173 	hclge_convert_setting_sr(mac, speed_ability);
1174 	hclge_convert_setting_lr(mac, speed_ability);
1175 	hclge_convert_setting_cr(mac, speed_ability);
1176 	if (hnae3_dev_fec_supported(hdev))
1177 		hclge_convert_setting_fec(mac);
1178 
1179 	if (hnae3_dev_pause_supported(hdev))
1180 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1181 
1182 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1184 }
1185 
1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1187 					    u16 speed_ability)
1188 {
1189 	struct hclge_mac *mac = &hdev->hw.mac;
1190 
1191 	hclge_convert_setting_kr(mac, speed_ability);
1192 	if (hnae3_dev_fec_supported(hdev))
1193 		hclge_convert_setting_fec(mac);
1194 
1195 	if (hnae3_dev_pause_supported(hdev))
1196 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1197 
1198 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1200 }
1201 
1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1203 					 u16 speed_ability)
1204 {
1205 	unsigned long *supported = hdev->hw.mac.supported;
1206 
1207 	/* default to support all speed for GE port */
1208 	if (!speed_ability)
1209 		speed_ability = HCLGE_SUPPORT_GE;
1210 
1211 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1213 				 supported);
1214 
1215 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1217 				 supported);
1218 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1219 				 supported);
1220 	}
1221 
1222 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1225 	}
1226 
1227 	if (hnae3_dev_pause_supported(hdev)) {
1228 		linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229 		linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1230 	}
1231 
1232 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1234 }
1235 
1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1237 {
1238 	u8 media_type = hdev->hw.mac.media_type;
1239 
1240 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1242 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243 		hclge_parse_copper_link_mode(hdev, speed_ability);
1244 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1246 }
1247 
1248 static u32 hclge_get_max_speed(u16 speed_ability)
1249 {
1250 	if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251 		return HCLGE_MAC_SPEED_200G;
1252 
1253 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254 		return HCLGE_MAC_SPEED_100G;
1255 
1256 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257 		return HCLGE_MAC_SPEED_50G;
1258 
1259 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260 		return HCLGE_MAC_SPEED_40G;
1261 
1262 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263 		return HCLGE_MAC_SPEED_25G;
1264 
1265 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266 		return HCLGE_MAC_SPEED_10G;
1267 
1268 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269 		return HCLGE_MAC_SPEED_1G;
1270 
1271 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272 		return HCLGE_MAC_SPEED_100M;
1273 
1274 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275 		return HCLGE_MAC_SPEED_10M;
1276 
1277 	return HCLGE_MAC_SPEED_1G;
1278 }
1279 
1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1281 {
1282 #define HCLGE_TX_SPARE_SIZE_UNIT		4096
1283 #define SPEED_ABILITY_EXT_SHIFT			8
1284 
1285 	struct hclge_cfg_param_cmd *req;
1286 	u64 mac_addr_tmp_high;
1287 	u16 speed_ability_ext;
1288 	u64 mac_addr_tmp;
1289 	unsigned int i;
1290 
1291 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1292 
1293 	/* get the configuration */
1294 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1295 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1296 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1297 					    HCLGE_CFG_TQP_DESC_N_M,
1298 					    HCLGE_CFG_TQP_DESC_N_S);
1299 
1300 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1301 					HCLGE_CFG_PHY_ADDR_M,
1302 					HCLGE_CFG_PHY_ADDR_S);
1303 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1304 					  HCLGE_CFG_MEDIA_TP_M,
1305 					  HCLGE_CFG_MEDIA_TP_S);
1306 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307 					  HCLGE_CFG_RX_BUF_LEN_M,
1308 					  HCLGE_CFG_RX_BUF_LEN_S);
1309 	/* get mac_address */
1310 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1311 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1312 					    HCLGE_CFG_MAC_ADDR_H_M,
1313 					    HCLGE_CFG_MAC_ADDR_H_S);
1314 
1315 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1316 
1317 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1318 					     HCLGE_CFG_DEFAULT_SPEED_M,
1319 					     HCLGE_CFG_DEFAULT_SPEED_S);
1320 	cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1321 					       HCLGE_CFG_RSS_SIZE_M,
1322 					       HCLGE_CFG_RSS_SIZE_S);
1323 
1324 	for (i = 0; i < ETH_ALEN; i++)
1325 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1326 
1327 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1328 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1329 
1330 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1331 					     HCLGE_CFG_SPEED_ABILITY_M,
1332 					     HCLGE_CFG_SPEED_ABILITY_S);
1333 	speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1334 					    HCLGE_CFG_SPEED_ABILITY_EXT_M,
1335 					    HCLGE_CFG_SPEED_ABILITY_EXT_S);
1336 	cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1337 
1338 	cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1339 					       HCLGE_CFG_VLAN_FLTR_CAP_M,
1340 					       HCLGE_CFG_VLAN_FLTR_CAP_S);
1341 
1342 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1343 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1344 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1345 	if (!cfg->umv_space)
1346 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1347 
1348 	cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1349 					       HCLGE_CFG_PF_RSS_SIZE_M,
1350 					       HCLGE_CFG_PF_RSS_SIZE_S);
1351 
1352 	/* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1353 	 * power of 2, instead of reading out directly. This would
1354 	 * be more flexible for future changes and expansions.
1355 	 * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1356 	 * it does not make sense if PF's field is 0. In this case, PF and VF
1357 	 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1358 	 */
1359 	cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1360 			       1U << cfg->pf_rss_size_max :
1361 			       cfg->vf_rss_size_max;
1362 
1363 	/* The unit of the tx spare buffer size queried from configuration
1364 	 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1365 	 * needed here.
1366 	 */
1367 	cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1368 						 HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1369 						 HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1370 	cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1371 }
1372 
1373 /* hclge_get_cfg: query the static parameter from flash
1374  * @hdev: pointer to struct hclge_dev
1375  * @hcfg: the config structure to be getted
1376  */
1377 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1378 {
1379 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1380 	struct hclge_cfg_param_cmd *req;
1381 	unsigned int i;
1382 	int ret;
1383 
1384 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1385 		u32 offset = 0;
1386 
1387 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1388 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1389 					   true);
1390 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1391 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1392 		/* Len should be united by 4 bytes when send to hardware */
1393 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1394 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1395 		req->offset = cpu_to_le32(offset);
1396 	}
1397 
1398 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1399 	if (ret) {
1400 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1401 		return ret;
1402 	}
1403 
1404 	hclge_parse_cfg(hcfg, desc);
1405 
1406 	return 0;
1407 }
1408 
1409 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1410 {
1411 #define HCLGE_MAX_NON_TSO_BD_NUM			8U
1412 
1413 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1414 
1415 	ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1416 	ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1417 	ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1418 	ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1419 	ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1420 	ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1421 	ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1422 }
1423 
1424 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1425 				  struct hclge_desc *desc)
1426 {
1427 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1428 	struct hclge_dev_specs_0_cmd *req0;
1429 	struct hclge_dev_specs_1_cmd *req1;
1430 
1431 	req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1432 	req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1433 
1434 	ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1435 	ae_dev->dev_specs.rss_ind_tbl_size =
1436 		le16_to_cpu(req0->rss_ind_tbl_size);
1437 	ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1438 	ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1439 	ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1440 	ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1441 	ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1442 	ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1443 }
1444 
1445 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1446 {
1447 	struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1448 
1449 	if (!dev_specs->max_non_tso_bd_num)
1450 		dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1451 	if (!dev_specs->rss_ind_tbl_size)
1452 		dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1453 	if (!dev_specs->rss_key_size)
1454 		dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1455 	if (!dev_specs->max_tm_rate)
1456 		dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1457 	if (!dev_specs->max_qset_num)
1458 		dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1459 	if (!dev_specs->max_int_gl)
1460 		dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1461 	if (!dev_specs->max_frm_size)
1462 		dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1463 }
1464 
1465 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1466 {
1467 	struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1468 	int ret;
1469 	int i;
1470 
1471 	/* set default specifications as devices lower than version V3 do not
1472 	 * support querying specifications from firmware.
1473 	 */
1474 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1475 		hclge_set_default_dev_specs(hdev);
1476 		return 0;
1477 	}
1478 
1479 	for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1480 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1481 					   true);
1482 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1483 	}
1484 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1485 
1486 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1487 	if (ret)
1488 		return ret;
1489 
1490 	hclge_parse_dev_specs(hdev, desc);
1491 	hclge_check_dev_specs(hdev);
1492 
1493 	return 0;
1494 }
1495 
1496 static int hclge_get_cap(struct hclge_dev *hdev)
1497 {
1498 	int ret;
1499 
1500 	ret = hclge_query_function_status(hdev);
1501 	if (ret) {
1502 		dev_err(&hdev->pdev->dev,
1503 			"query function status error %d.\n", ret);
1504 		return ret;
1505 	}
1506 
1507 	/* get pf resource */
1508 	return hclge_query_pf_resource(hdev);
1509 }
1510 
1511 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1512 {
1513 #define HCLGE_MIN_TX_DESC	64
1514 #define HCLGE_MIN_RX_DESC	64
1515 
1516 	if (!is_kdump_kernel())
1517 		return;
1518 
1519 	dev_info(&hdev->pdev->dev,
1520 		 "Running kdump kernel. Using minimal resources\n");
1521 
1522 	/* minimal queue pairs equals to the number of vports */
1523 	hdev->num_tqps = hdev->num_req_vfs + 1;
1524 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1525 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1526 }
1527 
1528 static int hclge_configure(struct hclge_dev *hdev)
1529 {
1530 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1531 	struct hclge_cfg cfg;
1532 	unsigned int i;
1533 	int ret;
1534 
1535 	ret = hclge_get_cfg(hdev, &cfg);
1536 	if (ret)
1537 		return ret;
1538 
1539 	hdev->base_tqp_pid = 0;
1540 	hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1541 	hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1542 	hdev->rx_buf_len = cfg.rx_buf_len;
1543 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1544 	hdev->hw.mac.media_type = cfg.media_type;
1545 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1546 	hdev->num_tx_desc = cfg.tqp_desc_num;
1547 	hdev->num_rx_desc = cfg.tqp_desc_num;
1548 	hdev->tm_info.num_pg = 1;
1549 	hdev->tc_max = cfg.tc_num;
1550 	hdev->tm_info.hw_pfc_map = 0;
1551 	hdev->wanted_umv_size = cfg.umv_space;
1552 	hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1553 	if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1554 		set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1555 
1556 	if (hnae3_dev_fd_supported(hdev)) {
1557 		hdev->fd_en = true;
1558 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1559 	}
1560 
1561 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1562 	if (ret) {
1563 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1564 			cfg.default_speed, ret);
1565 		return ret;
1566 	}
1567 
1568 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1569 
1570 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1571 
1572 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1573 	    (hdev->tc_max < 1)) {
1574 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1575 			 hdev->tc_max);
1576 		hdev->tc_max = 1;
1577 	}
1578 
1579 	/* Dev does not support DCB */
1580 	if (!hnae3_dev_dcb_supported(hdev)) {
1581 		hdev->tc_max = 1;
1582 		hdev->pfc_max = 0;
1583 	} else {
1584 		hdev->pfc_max = hdev->tc_max;
1585 	}
1586 
1587 	hdev->tm_info.num_tc = 1;
1588 
1589 	/* Currently not support uncontiuous tc */
1590 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1591 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1592 
1593 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1594 
1595 	hclge_init_kdump_kernel_config(hdev);
1596 
1597 	/* Set the init affinity based on pci func number */
1598 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1599 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1600 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1601 			&hdev->affinity_mask);
1602 
1603 	return ret;
1604 }
1605 
1606 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1607 			    u16 tso_mss_max)
1608 {
1609 	struct hclge_cfg_tso_status_cmd *req;
1610 	struct hclge_desc desc;
1611 
1612 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1613 
1614 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1615 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1616 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1617 
1618 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1619 }
1620 
1621 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1622 {
1623 	struct hclge_cfg_gro_status_cmd *req;
1624 	struct hclge_desc desc;
1625 	int ret;
1626 
1627 	if (!hnae3_dev_gro_supported(hdev))
1628 		return 0;
1629 
1630 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1631 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1632 
1633 	req->gro_en = en ? 1 : 0;
1634 
1635 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1636 	if (ret)
1637 		dev_err(&hdev->pdev->dev,
1638 			"GRO hardware config cmd failed, ret = %d\n", ret);
1639 
1640 	return ret;
1641 }
1642 
1643 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1644 {
1645 	struct hclge_tqp *tqp;
1646 	int i;
1647 
1648 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1649 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1650 	if (!hdev->htqp)
1651 		return -ENOMEM;
1652 
1653 	tqp = hdev->htqp;
1654 
1655 	for (i = 0; i < hdev->num_tqps; i++) {
1656 		tqp->dev = &hdev->pdev->dev;
1657 		tqp->index = i;
1658 
1659 		tqp->q.ae_algo = &ae_algo;
1660 		tqp->q.buf_size = hdev->rx_buf_len;
1661 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1662 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1663 
1664 		/* need an extended offset to configure queues >=
1665 		 * HCLGE_TQP_MAX_SIZE_DEV_V2
1666 		 */
1667 		if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1668 			tqp->q.io_base = hdev->hw.io_base +
1669 					 HCLGE_TQP_REG_OFFSET +
1670 					 i * HCLGE_TQP_REG_SIZE;
1671 		else
1672 			tqp->q.io_base = hdev->hw.io_base +
1673 					 HCLGE_TQP_REG_OFFSET +
1674 					 HCLGE_TQP_EXT_REG_OFFSET +
1675 					 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1676 					 HCLGE_TQP_REG_SIZE;
1677 
1678 		tqp++;
1679 	}
1680 
1681 	return 0;
1682 }
1683 
1684 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1685 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1686 {
1687 	struct hclge_tqp_map_cmd *req;
1688 	struct hclge_desc desc;
1689 	int ret;
1690 
1691 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1692 
1693 	req = (struct hclge_tqp_map_cmd *)desc.data;
1694 	req->tqp_id = cpu_to_le16(tqp_pid);
1695 	req->tqp_vf = func_id;
1696 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1697 	if (!is_pf)
1698 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1699 	req->tqp_vid = cpu_to_le16(tqp_vid);
1700 
1701 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1702 	if (ret)
1703 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1704 
1705 	return ret;
1706 }
1707 
1708 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1709 {
1710 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1711 	struct hclge_dev *hdev = vport->back;
1712 	int i, alloced;
1713 
1714 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1715 	     alloced < num_tqps; i++) {
1716 		if (!hdev->htqp[i].alloced) {
1717 			hdev->htqp[i].q.handle = &vport->nic;
1718 			hdev->htqp[i].q.tqp_index = alloced;
1719 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1720 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1721 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1722 			hdev->htqp[i].alloced = true;
1723 			alloced++;
1724 		}
1725 	}
1726 	vport->alloc_tqps = alloced;
1727 	kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1728 				vport->alloc_tqps / hdev->tm_info.num_tc);
1729 
1730 	/* ensure one to one mapping between irq and queue at default */
1731 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1732 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1733 
1734 	return 0;
1735 }
1736 
1737 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1738 			    u16 num_tx_desc, u16 num_rx_desc)
1739 
1740 {
1741 	struct hnae3_handle *nic = &vport->nic;
1742 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1743 	struct hclge_dev *hdev = vport->back;
1744 	int ret;
1745 
1746 	kinfo->num_tx_desc = num_tx_desc;
1747 	kinfo->num_rx_desc = num_rx_desc;
1748 
1749 	kinfo->rx_buf_len = hdev->rx_buf_len;
1750 	kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1751 
1752 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1753 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1754 	if (!kinfo->tqp)
1755 		return -ENOMEM;
1756 
1757 	ret = hclge_assign_tqp(vport, num_tqps);
1758 	if (ret)
1759 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1760 
1761 	return ret;
1762 }
1763 
1764 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1765 				  struct hclge_vport *vport)
1766 {
1767 	struct hnae3_handle *nic = &vport->nic;
1768 	struct hnae3_knic_private_info *kinfo;
1769 	u16 i;
1770 
1771 	kinfo = &nic->kinfo;
1772 	for (i = 0; i < vport->alloc_tqps; i++) {
1773 		struct hclge_tqp *q =
1774 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1775 		bool is_pf;
1776 		int ret;
1777 
1778 		is_pf = !(vport->vport_id);
1779 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1780 					     i, is_pf);
1781 		if (ret)
1782 			return ret;
1783 	}
1784 
1785 	return 0;
1786 }
1787 
1788 static int hclge_map_tqp(struct hclge_dev *hdev)
1789 {
1790 	struct hclge_vport *vport = hdev->vport;
1791 	u16 i, num_vport;
1792 
1793 	num_vport = hdev->num_req_vfs + 1;
1794 	for (i = 0; i < num_vport; i++)	{
1795 		int ret;
1796 
1797 		ret = hclge_map_tqp_to_vport(hdev, vport);
1798 		if (ret)
1799 			return ret;
1800 
1801 		vport++;
1802 	}
1803 
1804 	return 0;
1805 }
1806 
1807 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1808 {
1809 	struct hnae3_handle *nic = &vport->nic;
1810 	struct hclge_dev *hdev = vport->back;
1811 	int ret;
1812 
1813 	nic->pdev = hdev->pdev;
1814 	nic->ae_algo = &ae_algo;
1815 	nic->numa_node_mask = hdev->numa_node_mask;
1816 
1817 	ret = hclge_knic_setup(vport, num_tqps,
1818 			       hdev->num_tx_desc, hdev->num_rx_desc);
1819 	if (ret)
1820 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1821 
1822 	return ret;
1823 }
1824 
1825 static int hclge_alloc_vport(struct hclge_dev *hdev)
1826 {
1827 	struct pci_dev *pdev = hdev->pdev;
1828 	struct hclge_vport *vport;
1829 	u32 tqp_main_vport;
1830 	u32 tqp_per_vport;
1831 	int num_vport, i;
1832 	int ret;
1833 
1834 	/* We need to alloc a vport for main NIC of PF */
1835 	num_vport = hdev->num_req_vfs + 1;
1836 
1837 	if (hdev->num_tqps < num_vport) {
1838 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1839 			hdev->num_tqps, num_vport);
1840 		return -EINVAL;
1841 	}
1842 
1843 	/* Alloc the same number of TQPs for every vport */
1844 	tqp_per_vport = hdev->num_tqps / num_vport;
1845 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1846 
1847 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1848 			     GFP_KERNEL);
1849 	if (!vport)
1850 		return -ENOMEM;
1851 
1852 	hdev->vport = vport;
1853 	hdev->num_alloc_vport = num_vport;
1854 
1855 	if (IS_ENABLED(CONFIG_PCI_IOV))
1856 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1857 
1858 	for (i = 0; i < num_vport; i++) {
1859 		vport->back = hdev;
1860 		vport->vport_id = i;
1861 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1862 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1863 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1864 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1865 		vport->req_vlan_fltr_en = true;
1866 		INIT_LIST_HEAD(&vport->vlan_list);
1867 		INIT_LIST_HEAD(&vport->uc_mac_list);
1868 		INIT_LIST_HEAD(&vport->mc_mac_list);
1869 		spin_lock_init(&vport->mac_list_lock);
1870 
1871 		if (i == 0)
1872 			ret = hclge_vport_setup(vport, tqp_main_vport);
1873 		else
1874 			ret = hclge_vport_setup(vport, tqp_per_vport);
1875 		if (ret) {
1876 			dev_err(&pdev->dev,
1877 				"vport setup failed for vport %d, %d\n",
1878 				i, ret);
1879 			return ret;
1880 		}
1881 
1882 		vport++;
1883 	}
1884 
1885 	return 0;
1886 }
1887 
1888 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1889 				    struct hclge_pkt_buf_alloc *buf_alloc)
1890 {
1891 /* TX buffer size is unit by 128 byte */
1892 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1893 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1894 	struct hclge_tx_buff_alloc_cmd *req;
1895 	struct hclge_desc desc;
1896 	int ret;
1897 	u8 i;
1898 
1899 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1900 
1901 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1902 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1903 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1904 
1905 		req->tx_pkt_buff[i] =
1906 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1907 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1908 	}
1909 
1910 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1911 	if (ret)
1912 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1913 			ret);
1914 
1915 	return ret;
1916 }
1917 
1918 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1919 				 struct hclge_pkt_buf_alloc *buf_alloc)
1920 {
1921 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1922 
1923 	if (ret)
1924 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1925 
1926 	return ret;
1927 }
1928 
1929 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1930 {
1931 	unsigned int i;
1932 	u32 cnt = 0;
1933 
1934 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1935 		if (hdev->hw_tc_map & BIT(i))
1936 			cnt++;
1937 	return cnt;
1938 }
1939 
1940 /* Get the number of pfc enabled TCs, which have private buffer */
1941 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1942 				  struct hclge_pkt_buf_alloc *buf_alloc)
1943 {
1944 	struct hclge_priv_buf *priv;
1945 	unsigned int i;
1946 	int cnt = 0;
1947 
1948 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1949 		priv = &buf_alloc->priv_buf[i];
1950 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1951 		    priv->enable)
1952 			cnt++;
1953 	}
1954 
1955 	return cnt;
1956 }
1957 
1958 /* Get the number of pfc disabled TCs, which have private buffer */
1959 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1960 				     struct hclge_pkt_buf_alloc *buf_alloc)
1961 {
1962 	struct hclge_priv_buf *priv;
1963 	unsigned int i;
1964 	int cnt = 0;
1965 
1966 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1967 		priv = &buf_alloc->priv_buf[i];
1968 		if (hdev->hw_tc_map & BIT(i) &&
1969 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1970 		    priv->enable)
1971 			cnt++;
1972 	}
1973 
1974 	return cnt;
1975 }
1976 
1977 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1978 {
1979 	struct hclge_priv_buf *priv;
1980 	u32 rx_priv = 0;
1981 	int i;
1982 
1983 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1984 		priv = &buf_alloc->priv_buf[i];
1985 		if (priv->enable)
1986 			rx_priv += priv->buf_size;
1987 	}
1988 	return rx_priv;
1989 }
1990 
1991 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1992 {
1993 	u32 i, total_tx_size = 0;
1994 
1995 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1996 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1997 
1998 	return total_tx_size;
1999 }
2000 
2001 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2002 				struct hclge_pkt_buf_alloc *buf_alloc,
2003 				u32 rx_all)
2004 {
2005 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2006 	u32 tc_num = hclge_get_tc_num(hdev);
2007 	u32 shared_buf, aligned_mps;
2008 	u32 rx_priv;
2009 	int i;
2010 
2011 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2012 
2013 	if (hnae3_dev_dcb_supported(hdev))
2014 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2015 					hdev->dv_buf_size;
2016 	else
2017 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2018 					+ hdev->dv_buf_size;
2019 
2020 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2021 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2022 			     HCLGE_BUF_SIZE_UNIT);
2023 
2024 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2025 	if (rx_all < rx_priv + shared_std)
2026 		return false;
2027 
2028 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2029 	buf_alloc->s_buf.buf_size = shared_buf;
2030 	if (hnae3_dev_dcb_supported(hdev)) {
2031 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2032 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2033 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2034 				  HCLGE_BUF_SIZE_UNIT);
2035 	} else {
2036 		buf_alloc->s_buf.self.high = aligned_mps +
2037 						HCLGE_NON_DCB_ADDITIONAL_BUF;
2038 		buf_alloc->s_buf.self.low = aligned_mps;
2039 	}
2040 
2041 	if (hnae3_dev_dcb_supported(hdev)) {
2042 		hi_thrd = shared_buf - hdev->dv_buf_size;
2043 
2044 		if (tc_num <= NEED_RESERVE_TC_NUM)
2045 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2046 					/ BUF_MAX_PERCENT;
2047 
2048 		if (tc_num)
2049 			hi_thrd = hi_thrd / tc_num;
2050 
2051 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2052 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2053 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2054 	} else {
2055 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2056 		lo_thrd = aligned_mps;
2057 	}
2058 
2059 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2060 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2061 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2062 	}
2063 
2064 	return true;
2065 }
2066 
2067 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2068 				struct hclge_pkt_buf_alloc *buf_alloc)
2069 {
2070 	u32 i, total_size;
2071 
2072 	total_size = hdev->pkt_buf_size;
2073 
2074 	/* alloc tx buffer for all enabled tc */
2075 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2076 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2077 
2078 		if (hdev->hw_tc_map & BIT(i)) {
2079 			if (total_size < hdev->tx_buf_size)
2080 				return -ENOMEM;
2081 
2082 			priv->tx_buf_size = hdev->tx_buf_size;
2083 		} else {
2084 			priv->tx_buf_size = 0;
2085 		}
2086 
2087 		total_size -= priv->tx_buf_size;
2088 	}
2089 
2090 	return 0;
2091 }
2092 
2093 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2094 				  struct hclge_pkt_buf_alloc *buf_alloc)
2095 {
2096 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2097 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2098 	unsigned int i;
2099 
2100 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2101 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2102 
2103 		priv->enable = 0;
2104 		priv->wl.low = 0;
2105 		priv->wl.high = 0;
2106 		priv->buf_size = 0;
2107 
2108 		if (!(hdev->hw_tc_map & BIT(i)))
2109 			continue;
2110 
2111 		priv->enable = 1;
2112 
2113 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2114 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2115 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
2116 						HCLGE_BUF_SIZE_UNIT);
2117 		} else {
2118 			priv->wl.low = 0;
2119 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2120 					aligned_mps;
2121 		}
2122 
2123 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2124 	}
2125 
2126 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2127 }
2128 
2129 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2130 					  struct hclge_pkt_buf_alloc *buf_alloc)
2131 {
2132 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2133 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2134 	int i;
2135 
2136 	/* let the last to be cleared first */
2137 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2138 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2139 		unsigned int mask = BIT((unsigned int)i);
2140 
2141 		if (hdev->hw_tc_map & mask &&
2142 		    !(hdev->tm_info.hw_pfc_map & mask)) {
2143 			/* Clear the no pfc TC private buffer */
2144 			priv->wl.low = 0;
2145 			priv->wl.high = 0;
2146 			priv->buf_size = 0;
2147 			priv->enable = 0;
2148 			no_pfc_priv_num--;
2149 		}
2150 
2151 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2152 		    no_pfc_priv_num == 0)
2153 			break;
2154 	}
2155 
2156 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2157 }
2158 
2159 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2160 					struct hclge_pkt_buf_alloc *buf_alloc)
2161 {
2162 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2163 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2164 	int i;
2165 
2166 	/* let the last to be cleared first */
2167 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2168 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2169 		unsigned int mask = BIT((unsigned int)i);
2170 
2171 		if (hdev->hw_tc_map & mask &&
2172 		    hdev->tm_info.hw_pfc_map & mask) {
2173 			/* Reduce the number of pfc TC with private buffer */
2174 			priv->wl.low = 0;
2175 			priv->enable = 0;
2176 			priv->wl.high = 0;
2177 			priv->buf_size = 0;
2178 			pfc_priv_num--;
2179 		}
2180 
2181 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2182 		    pfc_priv_num == 0)
2183 			break;
2184 	}
2185 
2186 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2187 }
2188 
2189 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2190 				      struct hclge_pkt_buf_alloc *buf_alloc)
2191 {
2192 #define COMPENSATE_BUFFER	0x3C00
2193 #define COMPENSATE_HALF_MPS_NUM	5
2194 #define PRIV_WL_GAP		0x1800
2195 
2196 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2197 	u32 tc_num = hclge_get_tc_num(hdev);
2198 	u32 half_mps = hdev->mps >> 1;
2199 	u32 min_rx_priv;
2200 	unsigned int i;
2201 
2202 	if (tc_num)
2203 		rx_priv = rx_priv / tc_num;
2204 
2205 	if (tc_num <= NEED_RESERVE_TC_NUM)
2206 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2207 
2208 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2209 			COMPENSATE_HALF_MPS_NUM * half_mps;
2210 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2211 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2212 	if (rx_priv < min_rx_priv)
2213 		return false;
2214 
2215 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2216 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2217 
2218 		priv->enable = 0;
2219 		priv->wl.low = 0;
2220 		priv->wl.high = 0;
2221 		priv->buf_size = 0;
2222 
2223 		if (!(hdev->hw_tc_map & BIT(i)))
2224 			continue;
2225 
2226 		priv->enable = 1;
2227 		priv->buf_size = rx_priv;
2228 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2229 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2230 	}
2231 
2232 	buf_alloc->s_buf.buf_size = 0;
2233 
2234 	return true;
2235 }
2236 
2237 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2238  * @hdev: pointer to struct hclge_dev
2239  * @buf_alloc: pointer to buffer calculation data
2240  * @return: 0: calculate successful, negative: fail
2241  */
2242 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2243 				struct hclge_pkt_buf_alloc *buf_alloc)
2244 {
2245 	/* When DCB is not supported, rx private buffer is not allocated. */
2246 	if (!hnae3_dev_dcb_supported(hdev)) {
2247 		u32 rx_all = hdev->pkt_buf_size;
2248 
2249 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2250 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2251 			return -ENOMEM;
2252 
2253 		return 0;
2254 	}
2255 
2256 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2257 		return 0;
2258 
2259 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2260 		return 0;
2261 
2262 	/* try to decrease the buffer size */
2263 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2264 		return 0;
2265 
2266 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2267 		return 0;
2268 
2269 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2270 		return 0;
2271 
2272 	return -ENOMEM;
2273 }
2274 
2275 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2276 				   struct hclge_pkt_buf_alloc *buf_alloc)
2277 {
2278 	struct hclge_rx_priv_buff_cmd *req;
2279 	struct hclge_desc desc;
2280 	int ret;
2281 	int i;
2282 
2283 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2284 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2285 
2286 	/* Alloc private buffer TCs */
2287 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2288 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2289 
2290 		req->buf_num[i] =
2291 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2292 		req->buf_num[i] |=
2293 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2294 	}
2295 
2296 	req->shared_buf =
2297 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2298 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2299 
2300 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2301 	if (ret)
2302 		dev_err(&hdev->pdev->dev,
2303 			"rx private buffer alloc cmd failed %d\n", ret);
2304 
2305 	return ret;
2306 }
2307 
2308 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2309 				   struct hclge_pkt_buf_alloc *buf_alloc)
2310 {
2311 	struct hclge_rx_priv_wl_buf *req;
2312 	struct hclge_priv_buf *priv;
2313 	struct hclge_desc desc[2];
2314 	int i, j;
2315 	int ret;
2316 
2317 	for (i = 0; i < 2; i++) {
2318 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2319 					   false);
2320 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2321 
2322 		/* The first descriptor set the NEXT bit to 1 */
2323 		if (i == 0)
2324 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2325 		else
2326 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2327 
2328 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2329 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2330 
2331 			priv = &buf_alloc->priv_buf[idx];
2332 			req->tc_wl[j].high =
2333 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2334 			req->tc_wl[j].high |=
2335 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2336 			req->tc_wl[j].low =
2337 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2338 			req->tc_wl[j].low |=
2339 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2340 		}
2341 	}
2342 
2343 	/* Send 2 descriptor at one time */
2344 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2345 	if (ret)
2346 		dev_err(&hdev->pdev->dev,
2347 			"rx private waterline config cmd failed %d\n",
2348 			ret);
2349 	return ret;
2350 }
2351 
2352 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2353 				    struct hclge_pkt_buf_alloc *buf_alloc)
2354 {
2355 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2356 	struct hclge_rx_com_thrd *req;
2357 	struct hclge_desc desc[2];
2358 	struct hclge_tc_thrd *tc;
2359 	int i, j;
2360 	int ret;
2361 
2362 	for (i = 0; i < 2; i++) {
2363 		hclge_cmd_setup_basic_desc(&desc[i],
2364 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2365 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2366 
2367 		/* The first descriptor set the NEXT bit to 1 */
2368 		if (i == 0)
2369 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2370 		else
2371 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2372 
2373 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2374 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2375 
2376 			req->com_thrd[j].high =
2377 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2378 			req->com_thrd[j].high |=
2379 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2380 			req->com_thrd[j].low =
2381 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2382 			req->com_thrd[j].low |=
2383 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2384 		}
2385 	}
2386 
2387 	/* Send 2 descriptors at one time */
2388 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2389 	if (ret)
2390 		dev_err(&hdev->pdev->dev,
2391 			"common threshold config cmd failed %d\n", ret);
2392 	return ret;
2393 }
2394 
2395 static int hclge_common_wl_config(struct hclge_dev *hdev,
2396 				  struct hclge_pkt_buf_alloc *buf_alloc)
2397 {
2398 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2399 	struct hclge_rx_com_wl *req;
2400 	struct hclge_desc desc;
2401 	int ret;
2402 
2403 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2404 
2405 	req = (struct hclge_rx_com_wl *)desc.data;
2406 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2407 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2408 
2409 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2410 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2411 
2412 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2413 	if (ret)
2414 		dev_err(&hdev->pdev->dev,
2415 			"common waterline config cmd failed %d\n", ret);
2416 
2417 	return ret;
2418 }
2419 
2420 int hclge_buffer_alloc(struct hclge_dev *hdev)
2421 {
2422 	struct hclge_pkt_buf_alloc *pkt_buf;
2423 	int ret;
2424 
2425 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2426 	if (!pkt_buf)
2427 		return -ENOMEM;
2428 
2429 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2430 	if (ret) {
2431 		dev_err(&hdev->pdev->dev,
2432 			"could not calc tx buffer size for all TCs %d\n", ret);
2433 		goto out;
2434 	}
2435 
2436 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2437 	if (ret) {
2438 		dev_err(&hdev->pdev->dev,
2439 			"could not alloc tx buffers %d\n", ret);
2440 		goto out;
2441 	}
2442 
2443 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2444 	if (ret) {
2445 		dev_err(&hdev->pdev->dev,
2446 			"could not calc rx priv buffer size for all TCs %d\n",
2447 			ret);
2448 		goto out;
2449 	}
2450 
2451 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2452 	if (ret) {
2453 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2454 			ret);
2455 		goto out;
2456 	}
2457 
2458 	if (hnae3_dev_dcb_supported(hdev)) {
2459 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2460 		if (ret) {
2461 			dev_err(&hdev->pdev->dev,
2462 				"could not configure rx private waterline %d\n",
2463 				ret);
2464 			goto out;
2465 		}
2466 
2467 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2468 		if (ret) {
2469 			dev_err(&hdev->pdev->dev,
2470 				"could not configure common threshold %d\n",
2471 				ret);
2472 			goto out;
2473 		}
2474 	}
2475 
2476 	ret = hclge_common_wl_config(hdev, pkt_buf);
2477 	if (ret)
2478 		dev_err(&hdev->pdev->dev,
2479 			"could not configure common waterline %d\n", ret);
2480 
2481 out:
2482 	kfree(pkt_buf);
2483 	return ret;
2484 }
2485 
2486 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2487 {
2488 	struct hnae3_handle *roce = &vport->roce;
2489 	struct hnae3_handle *nic = &vport->nic;
2490 	struct hclge_dev *hdev = vport->back;
2491 
2492 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2493 
2494 	if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2495 		return -EINVAL;
2496 
2497 	roce->rinfo.base_vector = hdev->roce_base_vector;
2498 
2499 	roce->rinfo.netdev = nic->kinfo.netdev;
2500 	roce->rinfo.roce_io_base = hdev->hw.io_base;
2501 	roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2502 
2503 	roce->pdev = nic->pdev;
2504 	roce->ae_algo = nic->ae_algo;
2505 	roce->numa_node_mask = nic->numa_node_mask;
2506 
2507 	return 0;
2508 }
2509 
2510 static int hclge_init_msi(struct hclge_dev *hdev)
2511 {
2512 	struct pci_dev *pdev = hdev->pdev;
2513 	int vectors;
2514 	int i;
2515 
2516 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2517 					hdev->num_msi,
2518 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2519 	if (vectors < 0) {
2520 		dev_err(&pdev->dev,
2521 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2522 			vectors);
2523 		return vectors;
2524 	}
2525 	if (vectors < hdev->num_msi)
2526 		dev_warn(&hdev->pdev->dev,
2527 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2528 			 hdev->num_msi, vectors);
2529 
2530 	hdev->num_msi = vectors;
2531 	hdev->num_msi_left = vectors;
2532 
2533 	hdev->base_msi_vector = pdev->irq;
2534 	hdev->roce_base_vector = hdev->base_msi_vector +
2535 				hdev->num_nic_msi;
2536 
2537 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2538 					   sizeof(u16), GFP_KERNEL);
2539 	if (!hdev->vector_status) {
2540 		pci_free_irq_vectors(pdev);
2541 		return -ENOMEM;
2542 	}
2543 
2544 	for (i = 0; i < hdev->num_msi; i++)
2545 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2546 
2547 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2548 					sizeof(int), GFP_KERNEL);
2549 	if (!hdev->vector_irq) {
2550 		pci_free_irq_vectors(pdev);
2551 		return -ENOMEM;
2552 	}
2553 
2554 	return 0;
2555 }
2556 
2557 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2558 {
2559 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2560 		duplex = HCLGE_MAC_FULL;
2561 
2562 	return duplex;
2563 }
2564 
2565 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2566 				      u8 duplex)
2567 {
2568 	struct hclge_config_mac_speed_dup_cmd *req;
2569 	struct hclge_desc desc;
2570 	int ret;
2571 
2572 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2573 
2574 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2575 
2576 	if (duplex)
2577 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2578 
2579 	switch (speed) {
2580 	case HCLGE_MAC_SPEED_10M:
2581 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2582 				HCLGE_CFG_SPEED_S, 6);
2583 		break;
2584 	case HCLGE_MAC_SPEED_100M:
2585 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2586 				HCLGE_CFG_SPEED_S, 7);
2587 		break;
2588 	case HCLGE_MAC_SPEED_1G:
2589 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2590 				HCLGE_CFG_SPEED_S, 0);
2591 		break;
2592 	case HCLGE_MAC_SPEED_10G:
2593 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2594 				HCLGE_CFG_SPEED_S, 1);
2595 		break;
2596 	case HCLGE_MAC_SPEED_25G:
2597 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2598 				HCLGE_CFG_SPEED_S, 2);
2599 		break;
2600 	case HCLGE_MAC_SPEED_40G:
2601 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2602 				HCLGE_CFG_SPEED_S, 3);
2603 		break;
2604 	case HCLGE_MAC_SPEED_50G:
2605 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2606 				HCLGE_CFG_SPEED_S, 4);
2607 		break;
2608 	case HCLGE_MAC_SPEED_100G:
2609 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2610 				HCLGE_CFG_SPEED_S, 5);
2611 		break;
2612 	case HCLGE_MAC_SPEED_200G:
2613 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2614 				HCLGE_CFG_SPEED_S, 8);
2615 		break;
2616 	default:
2617 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2618 		return -EINVAL;
2619 	}
2620 
2621 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2622 		      1);
2623 
2624 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2625 	if (ret) {
2626 		dev_err(&hdev->pdev->dev,
2627 			"mac speed/duplex config cmd failed %d.\n", ret);
2628 		return ret;
2629 	}
2630 
2631 	return 0;
2632 }
2633 
2634 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2635 {
2636 	struct hclge_mac *mac = &hdev->hw.mac;
2637 	int ret;
2638 
2639 	duplex = hclge_check_speed_dup(duplex, speed);
2640 	if (!mac->support_autoneg && mac->speed == speed &&
2641 	    mac->duplex == duplex)
2642 		return 0;
2643 
2644 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2645 	if (ret)
2646 		return ret;
2647 
2648 	hdev->hw.mac.speed = speed;
2649 	hdev->hw.mac.duplex = duplex;
2650 
2651 	return 0;
2652 }
2653 
2654 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2655 				     u8 duplex)
2656 {
2657 	struct hclge_vport *vport = hclge_get_vport(handle);
2658 	struct hclge_dev *hdev = vport->back;
2659 
2660 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2661 }
2662 
2663 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2664 {
2665 	struct hclge_config_auto_neg_cmd *req;
2666 	struct hclge_desc desc;
2667 	u32 flag = 0;
2668 	int ret;
2669 
2670 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2671 
2672 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2673 	if (enable)
2674 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2675 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2676 
2677 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2678 	if (ret)
2679 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2680 			ret);
2681 
2682 	return ret;
2683 }
2684 
2685 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2686 {
2687 	struct hclge_vport *vport = hclge_get_vport(handle);
2688 	struct hclge_dev *hdev = vport->back;
2689 
2690 	if (!hdev->hw.mac.support_autoneg) {
2691 		if (enable) {
2692 			dev_err(&hdev->pdev->dev,
2693 				"autoneg is not supported by current port\n");
2694 			return -EOPNOTSUPP;
2695 		} else {
2696 			return 0;
2697 		}
2698 	}
2699 
2700 	return hclge_set_autoneg_en(hdev, enable);
2701 }
2702 
2703 static int hclge_get_autoneg(struct hnae3_handle *handle)
2704 {
2705 	struct hclge_vport *vport = hclge_get_vport(handle);
2706 	struct hclge_dev *hdev = vport->back;
2707 	struct phy_device *phydev = hdev->hw.mac.phydev;
2708 
2709 	if (phydev)
2710 		return phydev->autoneg;
2711 
2712 	return hdev->hw.mac.autoneg;
2713 }
2714 
2715 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2716 {
2717 	struct hclge_vport *vport = hclge_get_vport(handle);
2718 	struct hclge_dev *hdev = vport->back;
2719 	int ret;
2720 
2721 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2722 
2723 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2724 	if (ret)
2725 		return ret;
2726 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2727 }
2728 
2729 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2730 {
2731 	struct hclge_vport *vport = hclge_get_vport(handle);
2732 	struct hclge_dev *hdev = vport->back;
2733 
2734 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2735 		return hclge_set_autoneg_en(hdev, !halt);
2736 
2737 	return 0;
2738 }
2739 
2740 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2741 {
2742 	struct hclge_config_fec_cmd *req;
2743 	struct hclge_desc desc;
2744 	int ret;
2745 
2746 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2747 
2748 	req = (struct hclge_config_fec_cmd *)desc.data;
2749 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2750 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2751 	if (fec_mode & BIT(HNAE3_FEC_RS))
2752 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2753 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2754 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2755 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2756 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2757 
2758 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2759 	if (ret)
2760 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2761 
2762 	return ret;
2763 }
2764 
2765 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2766 {
2767 	struct hclge_vport *vport = hclge_get_vport(handle);
2768 	struct hclge_dev *hdev = vport->back;
2769 	struct hclge_mac *mac = &hdev->hw.mac;
2770 	int ret;
2771 
2772 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2773 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2774 		return -EINVAL;
2775 	}
2776 
2777 	ret = hclge_set_fec_hw(hdev, fec_mode);
2778 	if (ret)
2779 		return ret;
2780 
2781 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2782 	return 0;
2783 }
2784 
2785 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2786 			  u8 *fec_mode)
2787 {
2788 	struct hclge_vport *vport = hclge_get_vport(handle);
2789 	struct hclge_dev *hdev = vport->back;
2790 	struct hclge_mac *mac = &hdev->hw.mac;
2791 
2792 	if (fec_ability)
2793 		*fec_ability = mac->fec_ability;
2794 	if (fec_mode)
2795 		*fec_mode = mac->fec_mode;
2796 }
2797 
2798 static int hclge_mac_init(struct hclge_dev *hdev)
2799 {
2800 	struct hclge_mac *mac = &hdev->hw.mac;
2801 	int ret;
2802 
2803 	hdev->support_sfp_query = true;
2804 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2805 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2806 					 hdev->hw.mac.duplex);
2807 	if (ret)
2808 		return ret;
2809 
2810 	if (hdev->hw.mac.support_autoneg) {
2811 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2812 		if (ret)
2813 			return ret;
2814 	}
2815 
2816 	mac->link = 0;
2817 
2818 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2819 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2820 		if (ret)
2821 			return ret;
2822 	}
2823 
2824 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2825 	if (ret) {
2826 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2827 		return ret;
2828 	}
2829 
2830 	ret = hclge_set_default_loopback(hdev);
2831 	if (ret)
2832 		return ret;
2833 
2834 	ret = hclge_buffer_alloc(hdev);
2835 	if (ret)
2836 		dev_err(&hdev->pdev->dev,
2837 			"allocate buffer fail, ret=%d\n", ret);
2838 
2839 	return ret;
2840 }
2841 
2842 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2843 {
2844 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2845 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2846 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2847 				    hclge_wq, &hdev->service_task, 0);
2848 }
2849 
2850 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2851 {
2852 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2853 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2854 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2855 				    hclge_wq, &hdev->service_task, 0);
2856 }
2857 
2858 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2859 {
2860 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2861 	    !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2862 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2863 				    hclge_wq, &hdev->service_task, 0);
2864 }
2865 
2866 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2867 {
2868 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2869 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2870 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2871 				    hclge_wq, &hdev->service_task,
2872 				    delay_time);
2873 }
2874 
2875 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2876 {
2877 	struct hclge_link_status_cmd *req;
2878 	struct hclge_desc desc;
2879 	int ret;
2880 
2881 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2882 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2883 	if (ret) {
2884 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2885 			ret);
2886 		return ret;
2887 	}
2888 
2889 	req = (struct hclge_link_status_cmd *)desc.data;
2890 	*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2891 		HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2892 
2893 	return 0;
2894 }
2895 
2896 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2897 {
2898 	struct phy_device *phydev = hdev->hw.mac.phydev;
2899 
2900 	*link_status = HCLGE_LINK_STATUS_DOWN;
2901 
2902 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2903 		return 0;
2904 
2905 	if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2906 		return 0;
2907 
2908 	return hclge_get_mac_link_status(hdev, link_status);
2909 }
2910 
2911 static void hclge_push_link_status(struct hclge_dev *hdev)
2912 {
2913 	struct hclge_vport *vport;
2914 	int ret;
2915 	u16 i;
2916 
2917 	for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2918 		vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2919 
2920 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2921 		    vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2922 			continue;
2923 
2924 		ret = hclge_push_vf_link_status(vport);
2925 		if (ret) {
2926 			dev_err(&hdev->pdev->dev,
2927 				"failed to push link status to vf%u, ret = %d\n",
2928 				i, ret);
2929 		}
2930 	}
2931 }
2932 
2933 static void hclge_update_link_status(struct hclge_dev *hdev)
2934 {
2935 	struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2936 	struct hnae3_handle *handle = &hdev->vport[0].nic;
2937 	struct hnae3_client *rclient = hdev->roce_client;
2938 	struct hnae3_client *client = hdev->nic_client;
2939 	int state;
2940 	int ret;
2941 
2942 	if (!client)
2943 		return;
2944 
2945 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2946 		return;
2947 
2948 	ret = hclge_get_mac_phy_link(hdev, &state);
2949 	if (ret) {
2950 		clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2951 		return;
2952 	}
2953 
2954 	if (state != hdev->hw.mac.link) {
2955 		client->ops->link_status_change(handle, state);
2956 		hclge_config_mac_tnl_int(hdev, state);
2957 		if (rclient && rclient->ops->link_status_change)
2958 			rclient->ops->link_status_change(rhandle, state);
2959 
2960 		hdev->hw.mac.link = state;
2961 		hclge_push_link_status(hdev);
2962 	}
2963 
2964 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2965 }
2966 
2967 static void hclge_update_port_capability(struct hclge_dev *hdev,
2968 					 struct hclge_mac *mac)
2969 {
2970 	if (hnae3_dev_fec_supported(hdev))
2971 		/* update fec ability by speed */
2972 		hclge_convert_setting_fec(mac);
2973 
2974 	/* firmware can not identify back plane type, the media type
2975 	 * read from configuration can help deal it
2976 	 */
2977 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2978 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2979 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2980 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2981 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2982 
2983 	if (mac->support_autoneg) {
2984 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2985 		linkmode_copy(mac->advertising, mac->supported);
2986 	} else {
2987 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2988 				   mac->supported);
2989 		linkmode_zero(mac->advertising);
2990 	}
2991 }
2992 
2993 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2994 {
2995 	struct hclge_sfp_info_cmd *resp;
2996 	struct hclge_desc desc;
2997 	int ret;
2998 
2999 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3000 	resp = (struct hclge_sfp_info_cmd *)desc.data;
3001 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3002 	if (ret == -EOPNOTSUPP) {
3003 		dev_warn(&hdev->pdev->dev,
3004 			 "IMP do not support get SFP speed %d\n", ret);
3005 		return ret;
3006 	} else if (ret) {
3007 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3008 		return ret;
3009 	}
3010 
3011 	*speed = le32_to_cpu(resp->speed);
3012 
3013 	return 0;
3014 }
3015 
3016 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3017 {
3018 	struct hclge_sfp_info_cmd *resp;
3019 	struct hclge_desc desc;
3020 	int ret;
3021 
3022 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3023 	resp = (struct hclge_sfp_info_cmd *)desc.data;
3024 
3025 	resp->query_type = QUERY_ACTIVE_SPEED;
3026 
3027 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3028 	if (ret == -EOPNOTSUPP) {
3029 		dev_warn(&hdev->pdev->dev,
3030 			 "IMP does not support get SFP info %d\n", ret);
3031 		return ret;
3032 	} else if (ret) {
3033 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3034 		return ret;
3035 	}
3036 
3037 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
3038 	 * set to mac->speed.
3039 	 */
3040 	if (!le32_to_cpu(resp->speed))
3041 		return 0;
3042 
3043 	mac->speed = le32_to_cpu(resp->speed);
3044 	/* if resp->speed_ability is 0, it means it's an old version
3045 	 * firmware, do not update these params
3046 	 */
3047 	if (resp->speed_ability) {
3048 		mac->module_type = le32_to_cpu(resp->module_type);
3049 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
3050 		mac->autoneg = resp->autoneg;
3051 		mac->support_autoneg = resp->autoneg_ability;
3052 		mac->speed_type = QUERY_ACTIVE_SPEED;
3053 		if (!resp->active_fec)
3054 			mac->fec_mode = 0;
3055 		else
3056 			mac->fec_mode = BIT(resp->active_fec);
3057 	} else {
3058 		mac->speed_type = QUERY_SFP_SPEED;
3059 	}
3060 
3061 	return 0;
3062 }
3063 
3064 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3065 					struct ethtool_link_ksettings *cmd)
3066 {
3067 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3068 	struct hclge_vport *vport = hclge_get_vport(handle);
3069 	struct hclge_phy_link_ksetting_0_cmd *req0;
3070 	struct hclge_phy_link_ksetting_1_cmd *req1;
3071 	u32 supported, advertising, lp_advertising;
3072 	struct hclge_dev *hdev = vport->back;
3073 	int ret;
3074 
3075 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3076 				   true);
3077 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3078 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3079 				   true);
3080 
3081 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3082 	if (ret) {
3083 		dev_err(&hdev->pdev->dev,
3084 			"failed to get phy link ksetting, ret = %d.\n", ret);
3085 		return ret;
3086 	}
3087 
3088 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3089 	cmd->base.autoneg = req0->autoneg;
3090 	cmd->base.speed = le32_to_cpu(req0->speed);
3091 	cmd->base.duplex = req0->duplex;
3092 	cmd->base.port = req0->port;
3093 	cmd->base.transceiver = req0->transceiver;
3094 	cmd->base.phy_address = req0->phy_address;
3095 	cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3096 	cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3097 	supported = le32_to_cpu(req0->supported);
3098 	advertising = le32_to_cpu(req0->advertising);
3099 	lp_advertising = le32_to_cpu(req0->lp_advertising);
3100 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3101 						supported);
3102 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3103 						advertising);
3104 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3105 						lp_advertising);
3106 
3107 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3108 	cmd->base.master_slave_cfg = req1->master_slave_cfg;
3109 	cmd->base.master_slave_state = req1->master_slave_state;
3110 
3111 	return 0;
3112 }
3113 
3114 static int
3115 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3116 			     const struct ethtool_link_ksettings *cmd)
3117 {
3118 	struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3119 	struct hclge_vport *vport = hclge_get_vport(handle);
3120 	struct hclge_phy_link_ksetting_0_cmd *req0;
3121 	struct hclge_phy_link_ksetting_1_cmd *req1;
3122 	struct hclge_dev *hdev = vport->back;
3123 	u32 advertising;
3124 	int ret;
3125 
3126 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
3127 	    ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3128 	     (cmd->base.duplex != DUPLEX_HALF &&
3129 	      cmd->base.duplex != DUPLEX_FULL)))
3130 		return -EINVAL;
3131 
3132 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3133 				   false);
3134 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3135 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3136 				   false);
3137 
3138 	req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3139 	req0->autoneg = cmd->base.autoneg;
3140 	req0->speed = cpu_to_le32(cmd->base.speed);
3141 	req0->duplex = cmd->base.duplex;
3142 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
3143 						cmd->link_modes.advertising);
3144 	req0->advertising = cpu_to_le32(advertising);
3145 	req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3146 
3147 	req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3148 	req1->master_slave_cfg = cmd->base.master_slave_cfg;
3149 
3150 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3151 	if (ret) {
3152 		dev_err(&hdev->pdev->dev,
3153 			"failed to set phy link ksettings, ret = %d.\n", ret);
3154 		return ret;
3155 	}
3156 
3157 	hdev->hw.mac.autoneg = cmd->base.autoneg;
3158 	hdev->hw.mac.speed = cmd->base.speed;
3159 	hdev->hw.mac.duplex = cmd->base.duplex;
3160 	linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3161 
3162 	return 0;
3163 }
3164 
3165 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3166 {
3167 	struct ethtool_link_ksettings cmd;
3168 	int ret;
3169 
3170 	if (!hnae3_dev_phy_imp_supported(hdev))
3171 		return 0;
3172 
3173 	ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3174 	if (ret)
3175 		return ret;
3176 
3177 	hdev->hw.mac.autoneg = cmd.base.autoneg;
3178 	hdev->hw.mac.speed = cmd.base.speed;
3179 	hdev->hw.mac.duplex = cmd.base.duplex;
3180 
3181 	return 0;
3182 }
3183 
3184 static int hclge_tp_port_init(struct hclge_dev *hdev)
3185 {
3186 	struct ethtool_link_ksettings cmd;
3187 
3188 	if (!hnae3_dev_phy_imp_supported(hdev))
3189 		return 0;
3190 
3191 	cmd.base.autoneg = hdev->hw.mac.autoneg;
3192 	cmd.base.speed = hdev->hw.mac.speed;
3193 	cmd.base.duplex = hdev->hw.mac.duplex;
3194 	linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3195 
3196 	return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3197 }
3198 
3199 static int hclge_update_port_info(struct hclge_dev *hdev)
3200 {
3201 	struct hclge_mac *mac = &hdev->hw.mac;
3202 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
3203 	int ret;
3204 
3205 	/* get the port info from SFP cmd if not copper port */
3206 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3207 		return hclge_update_tp_port_info(hdev);
3208 
3209 	/* if IMP does not support get SFP/qSFP info, return directly */
3210 	if (!hdev->support_sfp_query)
3211 		return 0;
3212 
3213 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3214 		ret = hclge_get_sfp_info(hdev, mac);
3215 	else
3216 		ret = hclge_get_sfp_speed(hdev, &speed);
3217 
3218 	if (ret == -EOPNOTSUPP) {
3219 		hdev->support_sfp_query = false;
3220 		return ret;
3221 	} else if (ret) {
3222 		return ret;
3223 	}
3224 
3225 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3226 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3227 			hclge_update_port_capability(hdev, mac);
3228 			return 0;
3229 		}
3230 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3231 					       HCLGE_MAC_FULL);
3232 	} else {
3233 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3234 			return 0; /* do nothing if no SFP */
3235 
3236 		/* must config full duplex for SFP */
3237 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3238 	}
3239 }
3240 
3241 static int hclge_get_status(struct hnae3_handle *handle)
3242 {
3243 	struct hclge_vport *vport = hclge_get_vport(handle);
3244 	struct hclge_dev *hdev = vport->back;
3245 
3246 	hclge_update_link_status(hdev);
3247 
3248 	return hdev->hw.mac.link;
3249 }
3250 
3251 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3252 {
3253 	if (!pci_num_vf(hdev->pdev)) {
3254 		dev_err(&hdev->pdev->dev,
3255 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
3256 		return NULL;
3257 	}
3258 
3259 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3260 		dev_err(&hdev->pdev->dev,
3261 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
3262 			vf, pci_num_vf(hdev->pdev));
3263 		return NULL;
3264 	}
3265 
3266 	/* VF start from 1 in vport */
3267 	vf += HCLGE_VF_VPORT_START_NUM;
3268 	return &hdev->vport[vf];
3269 }
3270 
3271 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3272 			       struct ifla_vf_info *ivf)
3273 {
3274 	struct hclge_vport *vport = hclge_get_vport(handle);
3275 	struct hclge_dev *hdev = vport->back;
3276 
3277 	vport = hclge_get_vf_vport(hdev, vf);
3278 	if (!vport)
3279 		return -EINVAL;
3280 
3281 	ivf->vf = vf;
3282 	ivf->linkstate = vport->vf_info.link_state;
3283 	ivf->spoofchk = vport->vf_info.spoofchk;
3284 	ivf->trusted = vport->vf_info.trusted;
3285 	ivf->min_tx_rate = 0;
3286 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3287 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3288 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3289 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3290 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
3291 
3292 	return 0;
3293 }
3294 
3295 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3296 				   int link_state)
3297 {
3298 	struct hclge_vport *vport = hclge_get_vport(handle);
3299 	struct hclge_dev *hdev = vport->back;
3300 	int link_state_old;
3301 	int ret;
3302 
3303 	vport = hclge_get_vf_vport(hdev, vf);
3304 	if (!vport)
3305 		return -EINVAL;
3306 
3307 	link_state_old = vport->vf_info.link_state;
3308 	vport->vf_info.link_state = link_state;
3309 
3310 	ret = hclge_push_vf_link_status(vport);
3311 	if (ret) {
3312 		vport->vf_info.link_state = link_state_old;
3313 		dev_err(&hdev->pdev->dev,
3314 			"failed to push vf%d link status, ret = %d\n", vf, ret);
3315 	}
3316 
3317 	return ret;
3318 }
3319 
3320 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3321 {
3322 	u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3323 
3324 	/* fetch the events from their corresponding regs */
3325 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3326 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3327 	hw_err_src_reg = hclge_read_dev(&hdev->hw,
3328 					HCLGE_RAS_PF_OTHER_INT_STS_REG);
3329 
3330 	/* Assumption: If by any chance reset and mailbox events are reported
3331 	 * together then we will only process reset event in this go and will
3332 	 * defer the processing of the mailbox events. Since, we would have not
3333 	 * cleared RX CMDQ event this time we would receive again another
3334 	 * interrupt from H/W just for the mailbox.
3335 	 *
3336 	 * check for vector0 reset event sources
3337 	 */
3338 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3339 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3340 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3341 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3342 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3343 		hdev->rst_stats.imp_rst_cnt++;
3344 		return HCLGE_VECTOR0_EVENT_RST;
3345 	}
3346 
3347 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3348 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3349 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3350 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3351 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3352 		hdev->rst_stats.global_rst_cnt++;
3353 		return HCLGE_VECTOR0_EVENT_RST;
3354 	}
3355 
3356 	/* check for vector0 msix event and hardware error event source */
3357 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3358 	    hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3359 		return HCLGE_VECTOR0_EVENT_ERR;
3360 
3361 	/* check for vector0 ptp event source */
3362 	if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3363 		*clearval = msix_src_reg;
3364 		return HCLGE_VECTOR0_EVENT_PTP;
3365 	}
3366 
3367 	/* check for vector0 mailbox(=CMDQ RX) event source */
3368 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3369 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3370 		*clearval = cmdq_src_reg;
3371 		return HCLGE_VECTOR0_EVENT_MBX;
3372 	}
3373 
3374 	/* print other vector0 event source */
3375 	dev_info(&hdev->pdev->dev,
3376 		 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3377 		 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3378 
3379 	return HCLGE_VECTOR0_EVENT_OTHER;
3380 }
3381 
3382 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3383 				    u32 regclr)
3384 {
3385 	switch (event_type) {
3386 	case HCLGE_VECTOR0_EVENT_PTP:
3387 	case HCLGE_VECTOR0_EVENT_RST:
3388 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3389 		break;
3390 	case HCLGE_VECTOR0_EVENT_MBX:
3391 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3392 		break;
3393 	default:
3394 		break;
3395 	}
3396 }
3397 
3398 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3399 {
3400 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3401 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3402 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3403 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3404 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3405 }
3406 
3407 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3408 {
3409 	writel(enable ? 1 : 0, vector->addr);
3410 }
3411 
3412 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3413 {
3414 	struct hclge_dev *hdev = data;
3415 	unsigned long flags;
3416 	u32 clearval = 0;
3417 	u32 event_cause;
3418 
3419 	hclge_enable_vector(&hdev->misc_vector, false);
3420 	event_cause = hclge_check_event_cause(hdev, &clearval);
3421 
3422 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
3423 	switch (event_cause) {
3424 	case HCLGE_VECTOR0_EVENT_ERR:
3425 		hclge_errhand_task_schedule(hdev);
3426 		break;
3427 	case HCLGE_VECTOR0_EVENT_RST:
3428 		hclge_reset_task_schedule(hdev);
3429 		break;
3430 	case HCLGE_VECTOR0_EVENT_PTP:
3431 		spin_lock_irqsave(&hdev->ptp->lock, flags);
3432 		hclge_ptp_clean_tx_hwts(hdev);
3433 		spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3434 		break;
3435 	case HCLGE_VECTOR0_EVENT_MBX:
3436 		/* If we are here then,
3437 		 * 1. Either we are not handling any mbx task and we are not
3438 		 *    scheduled as well
3439 		 *                        OR
3440 		 * 2. We could be handling a mbx task but nothing more is
3441 		 *    scheduled.
3442 		 * In both cases, we should schedule mbx task as there are more
3443 		 * mbx messages reported by this interrupt.
3444 		 */
3445 		hclge_mbx_task_schedule(hdev);
3446 		break;
3447 	default:
3448 		dev_warn(&hdev->pdev->dev,
3449 			 "received unknown or unhandled event of vector0\n");
3450 		break;
3451 	}
3452 
3453 	hclge_clear_event_cause(hdev, event_cause, clearval);
3454 
3455 	/* Enable interrupt if it is not caused by reset event or error event */
3456 	if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3457 	    event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3458 	    event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3459 		hclge_enable_vector(&hdev->misc_vector, true);
3460 
3461 	return IRQ_HANDLED;
3462 }
3463 
3464 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3465 {
3466 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3467 		dev_warn(&hdev->pdev->dev,
3468 			 "vector(vector_id %d) has been freed.\n", vector_id);
3469 		return;
3470 	}
3471 
3472 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3473 	hdev->num_msi_left += 1;
3474 	hdev->num_msi_used -= 1;
3475 }
3476 
3477 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3478 {
3479 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3480 
3481 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3482 
3483 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3484 	hdev->vector_status[0] = 0;
3485 
3486 	hdev->num_msi_left -= 1;
3487 	hdev->num_msi_used += 1;
3488 }
3489 
3490 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3491 				      const cpumask_t *mask)
3492 {
3493 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3494 					      affinity_notify);
3495 
3496 	cpumask_copy(&hdev->affinity_mask, mask);
3497 }
3498 
3499 static void hclge_irq_affinity_release(struct kref *ref)
3500 {
3501 }
3502 
3503 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3504 {
3505 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3506 			      &hdev->affinity_mask);
3507 
3508 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3509 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3510 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3511 				  &hdev->affinity_notify);
3512 }
3513 
3514 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3515 {
3516 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3517 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3518 }
3519 
3520 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3521 {
3522 	int ret;
3523 
3524 	hclge_get_misc_vector(hdev);
3525 
3526 	/* this would be explicitly freed in the end */
3527 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3528 		 HCLGE_NAME, pci_name(hdev->pdev));
3529 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3530 			  0, hdev->misc_vector.name, hdev);
3531 	if (ret) {
3532 		hclge_free_vector(hdev, 0);
3533 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3534 			hdev->misc_vector.vector_irq);
3535 	}
3536 
3537 	return ret;
3538 }
3539 
3540 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3541 {
3542 	free_irq(hdev->misc_vector.vector_irq, hdev);
3543 	hclge_free_vector(hdev, 0);
3544 }
3545 
3546 int hclge_notify_client(struct hclge_dev *hdev,
3547 			enum hnae3_reset_notify_type type)
3548 {
3549 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3550 	struct hnae3_client *client = hdev->nic_client;
3551 	int ret;
3552 
3553 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3554 		return 0;
3555 
3556 	if (!client->ops->reset_notify)
3557 		return -EOPNOTSUPP;
3558 
3559 	ret = client->ops->reset_notify(handle, type);
3560 	if (ret)
3561 		dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3562 			type, ret);
3563 
3564 	return ret;
3565 }
3566 
3567 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3568 				    enum hnae3_reset_notify_type type)
3569 {
3570 	struct hnae3_handle *handle = &hdev->vport[0].roce;
3571 	struct hnae3_client *client = hdev->roce_client;
3572 	int ret;
3573 
3574 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3575 		return 0;
3576 
3577 	if (!client->ops->reset_notify)
3578 		return -EOPNOTSUPP;
3579 
3580 	ret = client->ops->reset_notify(handle, type);
3581 	if (ret)
3582 		dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3583 			type, ret);
3584 
3585 	return ret;
3586 }
3587 
3588 static int hclge_reset_wait(struct hclge_dev *hdev)
3589 {
3590 #define HCLGE_RESET_WATI_MS	100
3591 #define HCLGE_RESET_WAIT_CNT	350
3592 
3593 	u32 val, reg, reg_bit;
3594 	u32 cnt = 0;
3595 
3596 	switch (hdev->reset_type) {
3597 	case HNAE3_IMP_RESET:
3598 		reg = HCLGE_GLOBAL_RESET_REG;
3599 		reg_bit = HCLGE_IMP_RESET_BIT;
3600 		break;
3601 	case HNAE3_GLOBAL_RESET:
3602 		reg = HCLGE_GLOBAL_RESET_REG;
3603 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3604 		break;
3605 	case HNAE3_FUNC_RESET:
3606 		reg = HCLGE_FUN_RST_ING;
3607 		reg_bit = HCLGE_FUN_RST_ING_B;
3608 		break;
3609 	default:
3610 		dev_err(&hdev->pdev->dev,
3611 			"Wait for unsupported reset type: %d\n",
3612 			hdev->reset_type);
3613 		return -EINVAL;
3614 	}
3615 
3616 	val = hclge_read_dev(&hdev->hw, reg);
3617 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3618 		msleep(HCLGE_RESET_WATI_MS);
3619 		val = hclge_read_dev(&hdev->hw, reg);
3620 		cnt++;
3621 	}
3622 
3623 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3624 		dev_warn(&hdev->pdev->dev,
3625 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3626 		return -EBUSY;
3627 	}
3628 
3629 	return 0;
3630 }
3631 
3632 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3633 {
3634 	struct hclge_vf_rst_cmd *req;
3635 	struct hclge_desc desc;
3636 
3637 	req = (struct hclge_vf_rst_cmd *)desc.data;
3638 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3639 	req->dest_vfid = func_id;
3640 
3641 	if (reset)
3642 		req->vf_rst = 0x1;
3643 
3644 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3645 }
3646 
3647 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3648 {
3649 	int i;
3650 
3651 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3652 		struct hclge_vport *vport = &hdev->vport[i];
3653 		int ret;
3654 
3655 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3656 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3657 		if (ret) {
3658 			dev_err(&hdev->pdev->dev,
3659 				"set vf(%u) rst failed %d!\n",
3660 				vport->vport_id, ret);
3661 			return ret;
3662 		}
3663 
3664 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3665 			continue;
3666 
3667 		/* Inform VF to process the reset.
3668 		 * hclge_inform_reset_assert_to_vf may fail if VF
3669 		 * driver is not loaded.
3670 		 */
3671 		ret = hclge_inform_reset_assert_to_vf(vport);
3672 		if (ret)
3673 			dev_warn(&hdev->pdev->dev,
3674 				 "inform reset to vf(%u) failed %d!\n",
3675 				 vport->vport_id, ret);
3676 	}
3677 
3678 	return 0;
3679 }
3680 
3681 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3682 {
3683 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3684 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3685 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3686 		return;
3687 
3688 	hclge_mbx_handler(hdev);
3689 
3690 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3691 }
3692 
3693 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3694 {
3695 	struct hclge_pf_rst_sync_cmd *req;
3696 	struct hclge_desc desc;
3697 	int cnt = 0;
3698 	int ret;
3699 
3700 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3701 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3702 
3703 	do {
3704 		/* vf need to down netdev by mbx during PF or FLR reset */
3705 		hclge_mailbox_service_task(hdev);
3706 
3707 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3708 		/* for compatible with old firmware, wait
3709 		 * 100 ms for VF to stop IO
3710 		 */
3711 		if (ret == -EOPNOTSUPP) {
3712 			msleep(HCLGE_RESET_SYNC_TIME);
3713 			return;
3714 		} else if (ret) {
3715 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3716 				 ret);
3717 			return;
3718 		} else if (req->all_vf_ready) {
3719 			return;
3720 		}
3721 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3722 		hclge_cmd_reuse_desc(&desc, true);
3723 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3724 
3725 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3726 }
3727 
3728 void hclge_report_hw_error(struct hclge_dev *hdev,
3729 			   enum hnae3_hw_error_type type)
3730 {
3731 	struct hnae3_client *client = hdev->nic_client;
3732 
3733 	if (!client || !client->ops->process_hw_error ||
3734 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3735 		return;
3736 
3737 	client->ops->process_hw_error(&hdev->vport[0].nic, type);
3738 }
3739 
3740 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3741 {
3742 	u32 reg_val;
3743 
3744 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3745 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3746 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3747 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3748 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3749 	}
3750 
3751 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3752 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3753 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3754 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3755 	}
3756 }
3757 
3758 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3759 {
3760 	struct hclge_desc desc;
3761 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3762 	int ret;
3763 
3764 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3765 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3766 	req->fun_reset_vfid = func_id;
3767 
3768 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3769 	if (ret)
3770 		dev_err(&hdev->pdev->dev,
3771 			"send function reset cmd fail, status =%d\n", ret);
3772 
3773 	return ret;
3774 }
3775 
3776 static void hclge_do_reset(struct hclge_dev *hdev)
3777 {
3778 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3779 	struct pci_dev *pdev = hdev->pdev;
3780 	u32 val;
3781 
3782 	if (hclge_get_hw_reset_stat(handle)) {
3783 		dev_info(&pdev->dev, "hardware reset not finish\n");
3784 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3785 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3786 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3787 		return;
3788 	}
3789 
3790 	switch (hdev->reset_type) {
3791 	case HNAE3_GLOBAL_RESET:
3792 		dev_info(&pdev->dev, "global reset requested\n");
3793 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3794 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3795 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3796 		break;
3797 	case HNAE3_FUNC_RESET:
3798 		dev_info(&pdev->dev, "PF reset requested\n");
3799 		/* schedule again to check later */
3800 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3801 		hclge_reset_task_schedule(hdev);
3802 		break;
3803 	default:
3804 		dev_warn(&pdev->dev,
3805 			 "unsupported reset type: %d\n", hdev->reset_type);
3806 		break;
3807 	}
3808 }
3809 
3810 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3811 						   unsigned long *addr)
3812 {
3813 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3814 	struct hclge_dev *hdev = ae_dev->priv;
3815 
3816 	/* return the highest priority reset level amongst all */
3817 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3818 		rst_level = HNAE3_IMP_RESET;
3819 		clear_bit(HNAE3_IMP_RESET, addr);
3820 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3821 		clear_bit(HNAE3_FUNC_RESET, addr);
3822 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3823 		rst_level = HNAE3_GLOBAL_RESET;
3824 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3825 		clear_bit(HNAE3_FUNC_RESET, addr);
3826 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3827 		rst_level = HNAE3_FUNC_RESET;
3828 		clear_bit(HNAE3_FUNC_RESET, addr);
3829 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3830 		rst_level = HNAE3_FLR_RESET;
3831 		clear_bit(HNAE3_FLR_RESET, addr);
3832 	}
3833 
3834 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3835 	    rst_level < hdev->reset_type)
3836 		return HNAE3_NONE_RESET;
3837 
3838 	return rst_level;
3839 }
3840 
3841 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3842 {
3843 	u32 clearval = 0;
3844 
3845 	switch (hdev->reset_type) {
3846 	case HNAE3_IMP_RESET:
3847 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3848 		break;
3849 	case HNAE3_GLOBAL_RESET:
3850 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3851 		break;
3852 	default:
3853 		break;
3854 	}
3855 
3856 	if (!clearval)
3857 		return;
3858 
3859 	/* For revision 0x20, the reset interrupt source
3860 	 * can only be cleared after hardware reset done
3861 	 */
3862 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3863 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3864 				clearval);
3865 
3866 	hclge_enable_vector(&hdev->misc_vector, true);
3867 }
3868 
3869 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3870 {
3871 	u32 reg_val;
3872 
3873 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3874 	if (enable)
3875 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3876 	else
3877 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3878 
3879 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3880 }
3881 
3882 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3883 {
3884 	int ret;
3885 
3886 	ret = hclge_set_all_vf_rst(hdev, true);
3887 	if (ret)
3888 		return ret;
3889 
3890 	hclge_func_reset_sync_vf(hdev);
3891 
3892 	return 0;
3893 }
3894 
3895 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3896 {
3897 	u32 reg_val;
3898 	int ret = 0;
3899 
3900 	switch (hdev->reset_type) {
3901 	case HNAE3_FUNC_RESET:
3902 		ret = hclge_func_reset_notify_vf(hdev);
3903 		if (ret)
3904 			return ret;
3905 
3906 		ret = hclge_func_reset_cmd(hdev, 0);
3907 		if (ret) {
3908 			dev_err(&hdev->pdev->dev,
3909 				"asserting function reset fail %d!\n", ret);
3910 			return ret;
3911 		}
3912 
3913 		/* After performaning pf reset, it is not necessary to do the
3914 		 * mailbox handling or send any command to firmware, because
3915 		 * any mailbox handling or command to firmware is only valid
3916 		 * after hclge_cmd_init is called.
3917 		 */
3918 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3919 		hdev->rst_stats.pf_rst_cnt++;
3920 		break;
3921 	case HNAE3_FLR_RESET:
3922 		ret = hclge_func_reset_notify_vf(hdev);
3923 		if (ret)
3924 			return ret;
3925 		break;
3926 	case HNAE3_IMP_RESET:
3927 		hclge_handle_imp_error(hdev);
3928 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3929 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3930 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3931 		break;
3932 	default:
3933 		break;
3934 	}
3935 
3936 	/* inform hardware that preparatory work is done */
3937 	msleep(HCLGE_RESET_SYNC_TIME);
3938 	hclge_reset_handshake(hdev, true);
3939 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3940 
3941 	return ret;
3942 }
3943 
3944 static void hclge_show_rst_info(struct hclge_dev *hdev)
3945 {
3946 	char *buf;
3947 
3948 	buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
3949 	if (!buf)
3950 		return;
3951 
3952 	hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
3953 
3954 	dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
3955 
3956 	kfree(buf);
3957 }
3958 
3959 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3960 {
3961 #define MAX_RESET_FAIL_CNT 5
3962 
3963 	if (hdev->reset_pending) {
3964 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3965 			 hdev->reset_pending);
3966 		return true;
3967 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3968 		   HCLGE_RESET_INT_M) {
3969 		dev_info(&hdev->pdev->dev,
3970 			 "reset failed because new reset interrupt\n");
3971 		hclge_clear_reset_cause(hdev);
3972 		return false;
3973 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3974 		hdev->rst_stats.reset_fail_cnt++;
3975 		set_bit(hdev->reset_type, &hdev->reset_pending);
3976 		dev_info(&hdev->pdev->dev,
3977 			 "re-schedule reset task(%u)\n",
3978 			 hdev->rst_stats.reset_fail_cnt);
3979 		return true;
3980 	}
3981 
3982 	hclge_clear_reset_cause(hdev);
3983 
3984 	/* recover the handshake status when reset fail */
3985 	hclge_reset_handshake(hdev, true);
3986 
3987 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3988 
3989 	hclge_show_rst_info(hdev);
3990 
3991 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3992 
3993 	return false;
3994 }
3995 
3996 static void hclge_update_reset_level(struct hclge_dev *hdev)
3997 {
3998 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3999 	enum hnae3_reset_type reset_level;
4000 
4001 	/* reset request will not be set during reset, so clear
4002 	 * pending reset request to avoid unnecessary reset
4003 	 * caused by the same reason.
4004 	 */
4005 	hclge_get_reset_level(ae_dev, &hdev->reset_request);
4006 
4007 	/* if default_reset_request has a higher level reset request,
4008 	 * it should be handled as soon as possible. since some errors
4009 	 * need this kind of reset to fix.
4010 	 */
4011 	reset_level = hclge_get_reset_level(ae_dev,
4012 					    &hdev->default_reset_request);
4013 	if (reset_level != HNAE3_NONE_RESET)
4014 		set_bit(reset_level, &hdev->reset_request);
4015 }
4016 
4017 static int hclge_set_rst_done(struct hclge_dev *hdev)
4018 {
4019 	struct hclge_pf_rst_done_cmd *req;
4020 	struct hclge_desc desc;
4021 	int ret;
4022 
4023 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
4024 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4025 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4026 
4027 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4028 	/* To be compatible with the old firmware, which does not support
4029 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4030 	 * return success
4031 	 */
4032 	if (ret == -EOPNOTSUPP) {
4033 		dev_warn(&hdev->pdev->dev,
4034 			 "current firmware does not support command(0x%x)!\n",
4035 			 HCLGE_OPC_PF_RST_DONE);
4036 		return 0;
4037 	} else if (ret) {
4038 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4039 			ret);
4040 	}
4041 
4042 	return ret;
4043 }
4044 
4045 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4046 {
4047 	int ret = 0;
4048 
4049 	switch (hdev->reset_type) {
4050 	case HNAE3_FUNC_RESET:
4051 	case HNAE3_FLR_RESET:
4052 		ret = hclge_set_all_vf_rst(hdev, false);
4053 		break;
4054 	case HNAE3_GLOBAL_RESET:
4055 	case HNAE3_IMP_RESET:
4056 		ret = hclge_set_rst_done(hdev);
4057 		break;
4058 	default:
4059 		break;
4060 	}
4061 
4062 	/* clear up the handshake status after re-initialize done */
4063 	hclge_reset_handshake(hdev, false);
4064 
4065 	return ret;
4066 }
4067 
4068 static int hclge_reset_stack(struct hclge_dev *hdev)
4069 {
4070 	int ret;
4071 
4072 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4073 	if (ret)
4074 		return ret;
4075 
4076 	ret = hclge_reset_ae_dev(hdev->ae_dev);
4077 	if (ret)
4078 		return ret;
4079 
4080 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4081 }
4082 
4083 static int hclge_reset_prepare(struct hclge_dev *hdev)
4084 {
4085 	int ret;
4086 
4087 	hdev->rst_stats.reset_cnt++;
4088 	/* perform reset of the stack & ae device for a client */
4089 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4090 	if (ret)
4091 		return ret;
4092 
4093 	rtnl_lock();
4094 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4095 	rtnl_unlock();
4096 	if (ret)
4097 		return ret;
4098 
4099 	return hclge_reset_prepare_wait(hdev);
4100 }
4101 
4102 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4103 {
4104 	int ret;
4105 
4106 	hdev->rst_stats.hw_reset_done_cnt++;
4107 
4108 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4109 	if (ret)
4110 		return ret;
4111 
4112 	rtnl_lock();
4113 	ret = hclge_reset_stack(hdev);
4114 	rtnl_unlock();
4115 	if (ret)
4116 		return ret;
4117 
4118 	hclge_clear_reset_cause(hdev);
4119 
4120 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4121 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4122 	 * times
4123 	 */
4124 	if (ret &&
4125 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4126 		return ret;
4127 
4128 	ret = hclge_reset_prepare_up(hdev);
4129 	if (ret)
4130 		return ret;
4131 
4132 	rtnl_lock();
4133 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4134 	rtnl_unlock();
4135 	if (ret)
4136 		return ret;
4137 
4138 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4139 	if (ret)
4140 		return ret;
4141 
4142 	hdev->last_reset_time = jiffies;
4143 	hdev->rst_stats.reset_fail_cnt = 0;
4144 	hdev->rst_stats.reset_done_cnt++;
4145 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4146 
4147 	hclge_update_reset_level(hdev);
4148 
4149 	return 0;
4150 }
4151 
4152 static void hclge_reset(struct hclge_dev *hdev)
4153 {
4154 	if (hclge_reset_prepare(hdev))
4155 		goto err_reset;
4156 
4157 	if (hclge_reset_wait(hdev))
4158 		goto err_reset;
4159 
4160 	if (hclge_reset_rebuild(hdev))
4161 		goto err_reset;
4162 
4163 	return;
4164 
4165 err_reset:
4166 	if (hclge_reset_err_handle(hdev))
4167 		hclge_reset_task_schedule(hdev);
4168 }
4169 
4170 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4171 {
4172 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4173 	struct hclge_dev *hdev = ae_dev->priv;
4174 
4175 	/* We might end up getting called broadly because of 2 below cases:
4176 	 * 1. Recoverable error was conveyed through APEI and only way to bring
4177 	 *    normalcy is to reset.
4178 	 * 2. A new reset request from the stack due to timeout
4179 	 *
4180 	 * check if this is a new reset request and we are not here just because
4181 	 * last reset attempt did not succeed and watchdog hit us again. We will
4182 	 * know this if last reset request did not occur very recently (watchdog
4183 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4184 	 * In case of new request we reset the "reset level" to PF reset.
4185 	 * And if it is a repeat reset request of the most recent one then we
4186 	 * want to make sure we throttle the reset request. Therefore, we will
4187 	 * not allow it again before 3*HZ times.
4188 	 */
4189 
4190 	if (time_before(jiffies, (hdev->last_reset_time +
4191 				  HCLGE_RESET_INTERVAL))) {
4192 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4193 		return;
4194 	}
4195 
4196 	if (hdev->default_reset_request) {
4197 		hdev->reset_level =
4198 			hclge_get_reset_level(ae_dev,
4199 					      &hdev->default_reset_request);
4200 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4201 		hdev->reset_level = HNAE3_FUNC_RESET;
4202 	}
4203 
4204 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4205 		 hdev->reset_level);
4206 
4207 	/* request reset & schedule reset task */
4208 	set_bit(hdev->reset_level, &hdev->reset_request);
4209 	hclge_reset_task_schedule(hdev);
4210 
4211 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4212 		hdev->reset_level++;
4213 }
4214 
4215 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4216 					enum hnae3_reset_type rst_type)
4217 {
4218 	struct hclge_dev *hdev = ae_dev->priv;
4219 
4220 	set_bit(rst_type, &hdev->default_reset_request);
4221 }
4222 
4223 static void hclge_reset_timer(struct timer_list *t)
4224 {
4225 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4226 
4227 	/* if default_reset_request has no value, it means that this reset
4228 	 * request has already be handled, so just return here
4229 	 */
4230 	if (!hdev->default_reset_request)
4231 		return;
4232 
4233 	dev_info(&hdev->pdev->dev,
4234 		 "triggering reset in reset timer\n");
4235 	hclge_reset_event(hdev->pdev, NULL);
4236 }
4237 
4238 static void hclge_reset_subtask(struct hclge_dev *hdev)
4239 {
4240 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4241 
4242 	/* check if there is any ongoing reset in the hardware. This status can
4243 	 * be checked from reset_pending. If there is then, we need to wait for
4244 	 * hardware to complete reset.
4245 	 *    a. If we are able to figure out in reasonable time that hardware
4246 	 *       has fully resetted then, we can proceed with driver, client
4247 	 *       reset.
4248 	 *    b. else, we can come back later to check this status so re-sched
4249 	 *       now.
4250 	 */
4251 	hdev->last_reset_time = jiffies;
4252 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4253 	if (hdev->reset_type != HNAE3_NONE_RESET)
4254 		hclge_reset(hdev);
4255 
4256 	/* check if we got any *new* reset requests to be honored */
4257 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4258 	if (hdev->reset_type != HNAE3_NONE_RESET)
4259 		hclge_do_reset(hdev);
4260 
4261 	hdev->reset_type = HNAE3_NONE_RESET;
4262 }
4263 
4264 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4265 {
4266 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4267 	enum hnae3_reset_type reset_type;
4268 
4269 	if (ae_dev->hw_err_reset_req) {
4270 		reset_type = hclge_get_reset_level(ae_dev,
4271 						   &ae_dev->hw_err_reset_req);
4272 		hclge_set_def_reset_request(ae_dev, reset_type);
4273 	}
4274 
4275 	if (hdev->default_reset_request && ae_dev->ops->reset_event)
4276 		ae_dev->ops->reset_event(hdev->pdev, NULL);
4277 
4278 	/* enable interrupt after error handling complete */
4279 	hclge_enable_vector(&hdev->misc_vector, true);
4280 }
4281 
4282 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4283 {
4284 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4285 
4286 	ae_dev->hw_err_reset_req = 0;
4287 
4288 	if (hclge_find_error_source(hdev)) {
4289 		hclge_handle_error_info_log(ae_dev);
4290 		hclge_handle_mac_tnl(hdev);
4291 	}
4292 
4293 	hclge_handle_err_reset_request(hdev);
4294 }
4295 
4296 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4297 {
4298 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4299 	struct device *dev = &hdev->pdev->dev;
4300 	u32 msix_sts_reg;
4301 
4302 	msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4303 	if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4304 		if (hclge_handle_hw_msix_error
4305 				(hdev, &hdev->default_reset_request))
4306 			dev_info(dev, "received msix interrupt 0x%x\n",
4307 				 msix_sts_reg);
4308 	}
4309 
4310 	hclge_handle_hw_ras_error(ae_dev);
4311 
4312 	hclge_handle_err_reset_request(hdev);
4313 }
4314 
4315 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4316 {
4317 	if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4318 		return;
4319 
4320 	if (hnae3_dev_ras_imp_supported(hdev))
4321 		hclge_handle_err_recovery(hdev);
4322 	else
4323 		hclge_misc_err_recovery(hdev);
4324 }
4325 
4326 static void hclge_reset_service_task(struct hclge_dev *hdev)
4327 {
4328 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4329 		return;
4330 
4331 	down(&hdev->reset_sem);
4332 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4333 
4334 	hclge_reset_subtask(hdev);
4335 
4336 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4337 	up(&hdev->reset_sem);
4338 }
4339 
4340 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4341 {
4342 	int i;
4343 
4344 	/* start from vport 1 for PF is always alive */
4345 	for (i = 1; i < hdev->num_alloc_vport; i++) {
4346 		struct hclge_vport *vport = &hdev->vport[i];
4347 
4348 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4349 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4350 
4351 		/* If vf is not alive, set to default value */
4352 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4353 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4354 	}
4355 }
4356 
4357 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4358 {
4359 	unsigned long delta = round_jiffies_relative(HZ);
4360 
4361 	if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4362 		return;
4363 
4364 	/* Always handle the link updating to make sure link state is
4365 	 * updated when it is triggered by mbx.
4366 	 */
4367 	hclge_update_link_status(hdev);
4368 	hclge_sync_mac_table(hdev);
4369 	hclge_sync_promisc_mode(hdev);
4370 	hclge_sync_fd_table(hdev);
4371 
4372 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4373 		delta = jiffies - hdev->last_serv_processed;
4374 
4375 		if (delta < round_jiffies_relative(HZ)) {
4376 			delta = round_jiffies_relative(HZ) - delta;
4377 			goto out;
4378 		}
4379 	}
4380 
4381 	hdev->serv_processed_cnt++;
4382 	hclge_update_vport_alive(hdev);
4383 
4384 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4385 		hdev->last_serv_processed = jiffies;
4386 		goto out;
4387 	}
4388 
4389 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4390 		hclge_update_stats_for_all(hdev);
4391 
4392 	hclge_update_port_info(hdev);
4393 	hclge_sync_vlan_filter(hdev);
4394 
4395 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4396 		hclge_rfs_filter_expire(hdev);
4397 
4398 	hdev->last_serv_processed = jiffies;
4399 
4400 out:
4401 	hclge_task_schedule(hdev, delta);
4402 }
4403 
4404 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4405 {
4406 	unsigned long flags;
4407 
4408 	if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4409 	    !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4410 	    !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4411 		return;
4412 
4413 	/* to prevent concurrence with the irq handler */
4414 	spin_lock_irqsave(&hdev->ptp->lock, flags);
4415 
4416 	/* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4417 	 * handler may handle it just before spin_lock_irqsave().
4418 	 */
4419 	if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4420 		hclge_ptp_clean_tx_hwts(hdev);
4421 
4422 	spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4423 }
4424 
4425 static void hclge_service_task(struct work_struct *work)
4426 {
4427 	struct hclge_dev *hdev =
4428 		container_of(work, struct hclge_dev, service_task.work);
4429 
4430 	hclge_errhand_service_task(hdev);
4431 	hclge_reset_service_task(hdev);
4432 	hclge_ptp_service_task(hdev);
4433 	hclge_mailbox_service_task(hdev);
4434 	hclge_periodic_service_task(hdev);
4435 
4436 	/* Handle error recovery, reset and mbx again in case periodical task
4437 	 * delays the handling by calling hclge_task_schedule() in
4438 	 * hclge_periodic_service_task().
4439 	 */
4440 	hclge_errhand_service_task(hdev);
4441 	hclge_reset_service_task(hdev);
4442 	hclge_mailbox_service_task(hdev);
4443 }
4444 
4445 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4446 {
4447 	/* VF handle has no client */
4448 	if (!handle->client)
4449 		return container_of(handle, struct hclge_vport, nic);
4450 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4451 		return container_of(handle, struct hclge_vport, roce);
4452 	else
4453 		return container_of(handle, struct hclge_vport, nic);
4454 }
4455 
4456 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4457 				  struct hnae3_vector_info *vector_info)
4458 {
4459 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2	64
4460 
4461 	vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4462 
4463 	/* need an extend offset to config vector >= 64 */
4464 	if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4465 		vector_info->io_addr = hdev->hw.io_base +
4466 				HCLGE_VECTOR_REG_BASE +
4467 				(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4468 	else
4469 		vector_info->io_addr = hdev->hw.io_base +
4470 				HCLGE_VECTOR_EXT_REG_BASE +
4471 				(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4472 				HCLGE_VECTOR_REG_OFFSET_H +
4473 				(idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4474 				HCLGE_VECTOR_REG_OFFSET;
4475 
4476 	hdev->vector_status[idx] = hdev->vport[0].vport_id;
4477 	hdev->vector_irq[idx] = vector_info->vector;
4478 }
4479 
4480 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4481 			    struct hnae3_vector_info *vector_info)
4482 {
4483 	struct hclge_vport *vport = hclge_get_vport(handle);
4484 	struct hnae3_vector_info *vector = vector_info;
4485 	struct hclge_dev *hdev = vport->back;
4486 	int alloc = 0;
4487 	u16 i = 0;
4488 	u16 j;
4489 
4490 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4491 	vector_num = min(hdev->num_msi_left, vector_num);
4492 
4493 	for (j = 0; j < vector_num; j++) {
4494 		while (++i < hdev->num_nic_msi) {
4495 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4496 				hclge_get_vector_info(hdev, i, vector);
4497 				vector++;
4498 				alloc++;
4499 
4500 				break;
4501 			}
4502 		}
4503 	}
4504 	hdev->num_msi_left -= alloc;
4505 	hdev->num_msi_used += alloc;
4506 
4507 	return alloc;
4508 }
4509 
4510 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4511 {
4512 	int i;
4513 
4514 	for (i = 0; i < hdev->num_msi; i++)
4515 		if (vector == hdev->vector_irq[i])
4516 			return i;
4517 
4518 	return -EINVAL;
4519 }
4520 
4521 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4522 {
4523 	struct hclge_vport *vport = hclge_get_vport(handle);
4524 	struct hclge_dev *hdev = vport->back;
4525 	int vector_id;
4526 
4527 	vector_id = hclge_get_vector_index(hdev, vector);
4528 	if (vector_id < 0) {
4529 		dev_err(&hdev->pdev->dev,
4530 			"Get vector index fail. vector = %d\n", vector);
4531 		return vector_id;
4532 	}
4533 
4534 	hclge_free_vector(hdev, vector_id);
4535 
4536 	return 0;
4537 }
4538 
4539 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4540 {
4541 	return HCLGE_RSS_KEY_SIZE;
4542 }
4543 
4544 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4545 				  const u8 hfunc, const u8 *key)
4546 {
4547 	struct hclge_rss_config_cmd *req;
4548 	unsigned int key_offset = 0;
4549 	struct hclge_desc desc;
4550 	int key_counts;
4551 	int key_size;
4552 	int ret;
4553 
4554 	key_counts = HCLGE_RSS_KEY_SIZE;
4555 	req = (struct hclge_rss_config_cmd *)desc.data;
4556 
4557 	while (key_counts) {
4558 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4559 					   false);
4560 
4561 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4562 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4563 
4564 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4565 		memcpy(req->hash_key,
4566 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4567 
4568 		key_counts -= key_size;
4569 		key_offset++;
4570 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4571 		if (ret) {
4572 			dev_err(&hdev->pdev->dev,
4573 				"Configure RSS config fail, status = %d\n",
4574 				ret);
4575 			return ret;
4576 		}
4577 	}
4578 	return 0;
4579 }
4580 
4581 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4582 {
4583 	struct hclge_rss_indirection_table_cmd *req;
4584 	struct hclge_desc desc;
4585 	int rss_cfg_tbl_num;
4586 	u8 rss_msb_oft;
4587 	u8 rss_msb_val;
4588 	int ret;
4589 	u16 qid;
4590 	int i;
4591 	u32 j;
4592 
4593 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4594 	rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4595 			  HCLGE_RSS_CFG_TBL_SIZE;
4596 
4597 	for (i = 0; i < rss_cfg_tbl_num; i++) {
4598 		hclge_cmd_setup_basic_desc
4599 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4600 
4601 		req->start_table_index =
4602 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4603 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4604 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4605 			qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4606 			req->rss_qid_l[j] = qid & 0xff;
4607 			rss_msb_oft =
4608 				j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4609 			rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4610 				(j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4611 			req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4612 		}
4613 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4614 		if (ret) {
4615 			dev_err(&hdev->pdev->dev,
4616 				"Configure rss indir table fail,status = %d\n",
4617 				ret);
4618 			return ret;
4619 		}
4620 	}
4621 	return 0;
4622 }
4623 
4624 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4625 				 u16 *tc_size, u16 *tc_offset)
4626 {
4627 	struct hclge_rss_tc_mode_cmd *req;
4628 	struct hclge_desc desc;
4629 	int ret;
4630 	int i;
4631 
4632 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4633 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4634 
4635 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4636 		u16 mode = 0;
4637 
4638 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4639 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4640 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4641 		hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4642 			      tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4643 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4644 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4645 
4646 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4647 	}
4648 
4649 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4650 	if (ret)
4651 		dev_err(&hdev->pdev->dev,
4652 			"Configure rss tc mode fail, status = %d\n", ret);
4653 
4654 	return ret;
4655 }
4656 
4657 static void hclge_get_rss_type(struct hclge_vport *vport)
4658 {
4659 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4660 	    vport->rss_tuple_sets.ipv4_udp_en ||
4661 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4662 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4663 	    vport->rss_tuple_sets.ipv6_udp_en ||
4664 	    vport->rss_tuple_sets.ipv6_sctp_en)
4665 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4666 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4667 		 vport->rss_tuple_sets.ipv6_fragment_en)
4668 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4669 	else
4670 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4671 }
4672 
4673 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4674 {
4675 	struct hclge_rss_input_tuple_cmd *req;
4676 	struct hclge_desc desc;
4677 	int ret;
4678 
4679 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4680 
4681 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4682 
4683 	/* Get the tuple cfg from pf */
4684 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4685 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4686 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4687 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4688 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4689 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4690 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4691 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4692 	hclge_get_rss_type(&hdev->vport[0]);
4693 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4694 	if (ret)
4695 		dev_err(&hdev->pdev->dev,
4696 			"Configure rss input fail, status = %d\n", ret);
4697 	return ret;
4698 }
4699 
4700 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4701 			 u8 *key, u8 *hfunc)
4702 {
4703 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4704 	struct hclge_vport *vport = hclge_get_vport(handle);
4705 	int i;
4706 
4707 	/* Get hash algorithm */
4708 	if (hfunc) {
4709 		switch (vport->rss_algo) {
4710 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4711 			*hfunc = ETH_RSS_HASH_TOP;
4712 			break;
4713 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4714 			*hfunc = ETH_RSS_HASH_XOR;
4715 			break;
4716 		default:
4717 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4718 			break;
4719 		}
4720 	}
4721 
4722 	/* Get the RSS Key required by the user */
4723 	if (key)
4724 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4725 
4726 	/* Get indirect table */
4727 	if (indir)
4728 		for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4729 			indir[i] =  vport->rss_indirection_tbl[i];
4730 
4731 	return 0;
4732 }
4733 
4734 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4735 			 const  u8 *key, const  u8 hfunc)
4736 {
4737 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4738 	struct hclge_vport *vport = hclge_get_vport(handle);
4739 	struct hclge_dev *hdev = vport->back;
4740 	u8 hash_algo;
4741 	int ret, i;
4742 
4743 	/* Set the RSS Hash Key if specififed by the user */
4744 	if (key) {
4745 		switch (hfunc) {
4746 		case ETH_RSS_HASH_TOP:
4747 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4748 			break;
4749 		case ETH_RSS_HASH_XOR:
4750 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4751 			break;
4752 		case ETH_RSS_HASH_NO_CHANGE:
4753 			hash_algo = vport->rss_algo;
4754 			break;
4755 		default:
4756 			return -EINVAL;
4757 		}
4758 
4759 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4760 		if (ret)
4761 			return ret;
4762 
4763 		/* Update the shadow RSS key with user specified qids */
4764 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4765 		vport->rss_algo = hash_algo;
4766 	}
4767 
4768 	/* Update the shadow RSS table with user specified qids */
4769 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4770 		vport->rss_indirection_tbl[i] = indir[i];
4771 
4772 	/* Update the hardware */
4773 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4774 }
4775 
4776 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4777 {
4778 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4779 
4780 	if (nfc->data & RXH_L4_B_2_3)
4781 		hash_sets |= HCLGE_D_PORT_BIT;
4782 	else
4783 		hash_sets &= ~HCLGE_D_PORT_BIT;
4784 
4785 	if (nfc->data & RXH_IP_SRC)
4786 		hash_sets |= HCLGE_S_IP_BIT;
4787 	else
4788 		hash_sets &= ~HCLGE_S_IP_BIT;
4789 
4790 	if (nfc->data & RXH_IP_DST)
4791 		hash_sets |= HCLGE_D_IP_BIT;
4792 	else
4793 		hash_sets &= ~HCLGE_D_IP_BIT;
4794 
4795 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4796 		hash_sets |= HCLGE_V_TAG_BIT;
4797 
4798 	return hash_sets;
4799 }
4800 
4801 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4802 				    struct ethtool_rxnfc *nfc,
4803 				    struct hclge_rss_input_tuple_cmd *req)
4804 {
4805 	struct hclge_dev *hdev = vport->back;
4806 	u8 tuple_sets;
4807 
4808 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4809 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4810 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4811 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4812 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4813 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4814 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4815 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4816 
4817 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4818 	switch (nfc->flow_type) {
4819 	case TCP_V4_FLOW:
4820 		req->ipv4_tcp_en = tuple_sets;
4821 		break;
4822 	case TCP_V6_FLOW:
4823 		req->ipv6_tcp_en = tuple_sets;
4824 		break;
4825 	case UDP_V4_FLOW:
4826 		req->ipv4_udp_en = tuple_sets;
4827 		break;
4828 	case UDP_V6_FLOW:
4829 		req->ipv6_udp_en = tuple_sets;
4830 		break;
4831 	case SCTP_V4_FLOW:
4832 		req->ipv4_sctp_en = tuple_sets;
4833 		break;
4834 	case SCTP_V6_FLOW:
4835 		if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4836 		    (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4837 			return -EINVAL;
4838 
4839 		req->ipv6_sctp_en = tuple_sets;
4840 		break;
4841 	case IPV4_FLOW:
4842 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4843 		break;
4844 	case IPV6_FLOW:
4845 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4846 		break;
4847 	default:
4848 		return -EINVAL;
4849 	}
4850 
4851 	return 0;
4852 }
4853 
4854 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4855 			       struct ethtool_rxnfc *nfc)
4856 {
4857 	struct hclge_vport *vport = hclge_get_vport(handle);
4858 	struct hclge_dev *hdev = vport->back;
4859 	struct hclge_rss_input_tuple_cmd *req;
4860 	struct hclge_desc desc;
4861 	int ret;
4862 
4863 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4864 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4865 		return -EINVAL;
4866 
4867 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4868 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4869 
4870 	ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4871 	if (ret) {
4872 		dev_err(&hdev->pdev->dev,
4873 			"failed to init rss tuple cmd, ret = %d\n", ret);
4874 		return ret;
4875 	}
4876 
4877 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4878 	if (ret) {
4879 		dev_err(&hdev->pdev->dev,
4880 			"Set rss tuple fail, status = %d\n", ret);
4881 		return ret;
4882 	}
4883 
4884 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4885 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4886 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4887 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4888 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4889 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4890 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4891 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4892 	hclge_get_rss_type(vport);
4893 	return 0;
4894 }
4895 
4896 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4897 				     u8 *tuple_sets)
4898 {
4899 	switch (flow_type) {
4900 	case TCP_V4_FLOW:
4901 		*tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4902 		break;
4903 	case UDP_V4_FLOW:
4904 		*tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4905 		break;
4906 	case TCP_V6_FLOW:
4907 		*tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4908 		break;
4909 	case UDP_V6_FLOW:
4910 		*tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4911 		break;
4912 	case SCTP_V4_FLOW:
4913 		*tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4914 		break;
4915 	case SCTP_V6_FLOW:
4916 		*tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4917 		break;
4918 	case IPV4_FLOW:
4919 	case IPV6_FLOW:
4920 		*tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4921 		break;
4922 	default:
4923 		return -EINVAL;
4924 	}
4925 
4926 	return 0;
4927 }
4928 
4929 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4930 {
4931 	u64 tuple_data = 0;
4932 
4933 	if (tuple_sets & HCLGE_D_PORT_BIT)
4934 		tuple_data |= RXH_L4_B_2_3;
4935 	if (tuple_sets & HCLGE_S_PORT_BIT)
4936 		tuple_data |= RXH_L4_B_0_1;
4937 	if (tuple_sets & HCLGE_D_IP_BIT)
4938 		tuple_data |= RXH_IP_DST;
4939 	if (tuple_sets & HCLGE_S_IP_BIT)
4940 		tuple_data |= RXH_IP_SRC;
4941 
4942 	return tuple_data;
4943 }
4944 
4945 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4946 			       struct ethtool_rxnfc *nfc)
4947 {
4948 	struct hclge_vport *vport = hclge_get_vport(handle);
4949 	u8 tuple_sets;
4950 	int ret;
4951 
4952 	nfc->data = 0;
4953 
4954 	ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4955 	if (ret || !tuple_sets)
4956 		return ret;
4957 
4958 	nfc->data = hclge_convert_rss_tuple(tuple_sets);
4959 
4960 	return 0;
4961 }
4962 
4963 static int hclge_get_tc_size(struct hnae3_handle *handle)
4964 {
4965 	struct hclge_vport *vport = hclge_get_vport(handle);
4966 	struct hclge_dev *hdev = vport->back;
4967 
4968 	return hdev->pf_rss_size_max;
4969 }
4970 
4971 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4972 {
4973 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4974 	struct hclge_vport *vport = hdev->vport;
4975 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4976 	u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4977 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4978 	struct hnae3_tc_info *tc_info;
4979 	u16 roundup_size;
4980 	u16 rss_size;
4981 	int i;
4982 
4983 	tc_info = &vport->nic.kinfo.tc_info;
4984 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4985 		rss_size = tc_info->tqp_count[i];
4986 		tc_valid[i] = 0;
4987 
4988 		if (!(hdev->hw_tc_map & BIT(i)))
4989 			continue;
4990 
4991 		/* tc_size set to hardware is the log2 of roundup power of two
4992 		 * of rss_size, the acutal queue size is limited by indirection
4993 		 * table.
4994 		 */
4995 		if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4996 		    rss_size == 0) {
4997 			dev_err(&hdev->pdev->dev,
4998 				"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4999 				rss_size);
5000 			return -EINVAL;
5001 		}
5002 
5003 		roundup_size = roundup_pow_of_two(rss_size);
5004 		roundup_size = ilog2(roundup_size);
5005 
5006 		tc_valid[i] = 1;
5007 		tc_size[i] = roundup_size;
5008 		tc_offset[i] = tc_info->tqp_offset[i];
5009 	}
5010 
5011 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5012 }
5013 
5014 int hclge_rss_init_hw(struct hclge_dev *hdev)
5015 {
5016 	struct hclge_vport *vport = hdev->vport;
5017 	u16 *rss_indir = vport[0].rss_indirection_tbl;
5018 	u8 *key = vport[0].rss_hash_key;
5019 	u8 hfunc = vport[0].rss_algo;
5020 	int ret;
5021 
5022 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
5023 	if (ret)
5024 		return ret;
5025 
5026 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
5027 	if (ret)
5028 		return ret;
5029 
5030 	ret = hclge_set_rss_input_tuple(hdev);
5031 	if (ret)
5032 		return ret;
5033 
5034 	return hclge_init_rss_tc_mode(hdev);
5035 }
5036 
5037 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
5038 {
5039 	struct hclge_vport *vport = &hdev->vport[0];
5040 	int i;
5041 
5042 	for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
5043 		vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
5044 }
5045 
5046 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
5047 {
5048 	u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
5049 	int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
5050 	struct hclge_vport *vport = &hdev->vport[0];
5051 	u16 *rss_ind_tbl;
5052 
5053 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
5054 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
5055 
5056 	vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5057 	vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5058 	vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
5059 	vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5060 	vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5061 	vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5062 	vport->rss_tuple_sets.ipv6_sctp_en =
5063 		hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
5064 		HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
5065 		HCLGE_RSS_INPUT_TUPLE_SCTP;
5066 	vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5067 
5068 	vport->rss_algo = rss_algo;
5069 
5070 	rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
5071 				   sizeof(*rss_ind_tbl), GFP_KERNEL);
5072 	if (!rss_ind_tbl)
5073 		return -ENOMEM;
5074 
5075 	vport->rss_indirection_tbl = rss_ind_tbl;
5076 	memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
5077 
5078 	hclge_rss_indir_init_cfg(hdev);
5079 
5080 	return 0;
5081 }
5082 
5083 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5084 				int vector_id, bool en,
5085 				struct hnae3_ring_chain_node *ring_chain)
5086 {
5087 	struct hclge_dev *hdev = vport->back;
5088 	struct hnae3_ring_chain_node *node;
5089 	struct hclge_desc desc;
5090 	struct hclge_ctrl_vector_chain_cmd *req =
5091 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
5092 	enum hclge_cmd_status status;
5093 	enum hclge_opcode_type op;
5094 	u16 tqp_type_and_id;
5095 	int i;
5096 
5097 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5098 	hclge_cmd_setup_basic_desc(&desc, op, false);
5099 	req->int_vector_id_l = hnae3_get_field(vector_id,
5100 					       HCLGE_VECTOR_ID_L_M,
5101 					       HCLGE_VECTOR_ID_L_S);
5102 	req->int_vector_id_h = hnae3_get_field(vector_id,
5103 					       HCLGE_VECTOR_ID_H_M,
5104 					       HCLGE_VECTOR_ID_H_S);
5105 
5106 	i = 0;
5107 	for (node = ring_chain; node; node = node->next) {
5108 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5109 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
5110 				HCLGE_INT_TYPE_S,
5111 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5112 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5113 				HCLGE_TQP_ID_S, node->tqp_index);
5114 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5115 				HCLGE_INT_GL_IDX_S,
5116 				hnae3_get_field(node->int_gl_idx,
5117 						HNAE3_RING_GL_IDX_M,
5118 						HNAE3_RING_GL_IDX_S));
5119 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5120 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5121 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5122 			req->vfid = vport->vport_id;
5123 
5124 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
5125 			if (status) {
5126 				dev_err(&hdev->pdev->dev,
5127 					"Map TQP fail, status is %d.\n",
5128 					status);
5129 				return -EIO;
5130 			}
5131 			i = 0;
5132 
5133 			hclge_cmd_setup_basic_desc(&desc,
5134 						   op,
5135 						   false);
5136 			req->int_vector_id_l =
5137 				hnae3_get_field(vector_id,
5138 						HCLGE_VECTOR_ID_L_M,
5139 						HCLGE_VECTOR_ID_L_S);
5140 			req->int_vector_id_h =
5141 				hnae3_get_field(vector_id,
5142 						HCLGE_VECTOR_ID_H_M,
5143 						HCLGE_VECTOR_ID_H_S);
5144 		}
5145 	}
5146 
5147 	if (i > 0) {
5148 		req->int_cause_num = i;
5149 		req->vfid = vport->vport_id;
5150 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
5151 		if (status) {
5152 			dev_err(&hdev->pdev->dev,
5153 				"Map TQP fail, status is %d.\n", status);
5154 			return -EIO;
5155 		}
5156 	}
5157 
5158 	return 0;
5159 }
5160 
5161 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5162 				    struct hnae3_ring_chain_node *ring_chain)
5163 {
5164 	struct hclge_vport *vport = hclge_get_vport(handle);
5165 	struct hclge_dev *hdev = vport->back;
5166 	int vector_id;
5167 
5168 	vector_id = hclge_get_vector_index(hdev, vector);
5169 	if (vector_id < 0) {
5170 		dev_err(&hdev->pdev->dev,
5171 			"failed to get vector index. vector=%d\n", vector);
5172 		return vector_id;
5173 	}
5174 
5175 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5176 }
5177 
5178 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5179 				       struct hnae3_ring_chain_node *ring_chain)
5180 {
5181 	struct hclge_vport *vport = hclge_get_vport(handle);
5182 	struct hclge_dev *hdev = vport->back;
5183 	int vector_id, ret;
5184 
5185 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5186 		return 0;
5187 
5188 	vector_id = hclge_get_vector_index(hdev, vector);
5189 	if (vector_id < 0) {
5190 		dev_err(&handle->pdev->dev,
5191 			"Get vector index fail. ret =%d\n", vector_id);
5192 		return vector_id;
5193 	}
5194 
5195 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5196 	if (ret)
5197 		dev_err(&handle->pdev->dev,
5198 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5199 			vector_id, ret);
5200 
5201 	return ret;
5202 }
5203 
5204 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5205 				      bool en_uc, bool en_mc, bool en_bc)
5206 {
5207 	struct hclge_vport *vport = &hdev->vport[vf_id];
5208 	struct hnae3_handle *handle = &vport->nic;
5209 	struct hclge_promisc_cfg_cmd *req;
5210 	struct hclge_desc desc;
5211 	bool uc_tx_en = en_uc;
5212 	u8 promisc_cfg = 0;
5213 	int ret;
5214 
5215 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5216 
5217 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
5218 	req->vf_id = vf_id;
5219 
5220 	if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5221 		uc_tx_en = false;
5222 
5223 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5224 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5225 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5226 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5227 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5228 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5229 	req->extend_promisc = promisc_cfg;
5230 
5231 	/* to be compatible with DEVICE_VERSION_V1/2 */
5232 	promisc_cfg = 0;
5233 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5234 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5235 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5236 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5237 	hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5238 	req->promisc = promisc_cfg;
5239 
5240 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5241 	if (ret)
5242 		dev_err(&hdev->pdev->dev,
5243 			"failed to set vport %u promisc mode, ret = %d.\n",
5244 			vf_id, ret);
5245 
5246 	return ret;
5247 }
5248 
5249 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5250 				 bool en_mc_pmc, bool en_bc_pmc)
5251 {
5252 	return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5253 					  en_uc_pmc, en_mc_pmc, en_bc_pmc);
5254 }
5255 
5256 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5257 				  bool en_mc_pmc)
5258 {
5259 	struct hclge_vport *vport = hclge_get_vport(handle);
5260 	struct hclge_dev *hdev = vport->back;
5261 	bool en_bc_pmc = true;
5262 
5263 	/* For device whose version below V2, if broadcast promisc enabled,
5264 	 * vlan filter is always bypassed. So broadcast promisc should be
5265 	 * disabled until user enable promisc mode
5266 	 */
5267 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5268 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5269 
5270 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5271 					    en_bc_pmc);
5272 }
5273 
5274 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5275 {
5276 	struct hclge_vport *vport = hclge_get_vport(handle);
5277 
5278 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5279 }
5280 
5281 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5282 {
5283 	if (hlist_empty(&hdev->fd_rule_list))
5284 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5285 }
5286 
5287 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5288 {
5289 	if (!test_bit(location, hdev->fd_bmap)) {
5290 		set_bit(location, hdev->fd_bmap);
5291 		hdev->hclge_fd_rule_num++;
5292 	}
5293 }
5294 
5295 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5296 {
5297 	if (test_bit(location, hdev->fd_bmap)) {
5298 		clear_bit(location, hdev->fd_bmap);
5299 		hdev->hclge_fd_rule_num--;
5300 	}
5301 }
5302 
5303 static void hclge_fd_free_node(struct hclge_dev *hdev,
5304 			       struct hclge_fd_rule *rule)
5305 {
5306 	hlist_del(&rule->rule_node);
5307 	kfree(rule);
5308 	hclge_sync_fd_state(hdev);
5309 }
5310 
5311 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5312 				      struct hclge_fd_rule *old_rule,
5313 				      struct hclge_fd_rule *new_rule,
5314 				      enum HCLGE_FD_NODE_STATE state)
5315 {
5316 	switch (state) {
5317 	case HCLGE_FD_TO_ADD:
5318 	case HCLGE_FD_ACTIVE:
5319 		/* 1) if the new state is TO_ADD, just replace the old rule
5320 		 * with the same location, no matter its state, because the
5321 		 * new rule will be configured to the hardware.
5322 		 * 2) if the new state is ACTIVE, it means the new rule
5323 		 * has been configured to the hardware, so just replace
5324 		 * the old rule node with the same location.
5325 		 * 3) for it doesn't add a new node to the list, so it's
5326 		 * unnecessary to update the rule number and fd_bmap.
5327 		 */
5328 		new_rule->rule_node.next = old_rule->rule_node.next;
5329 		new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5330 		memcpy(old_rule, new_rule, sizeof(*old_rule));
5331 		kfree(new_rule);
5332 		break;
5333 	case HCLGE_FD_DELETED:
5334 		hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5335 		hclge_fd_free_node(hdev, old_rule);
5336 		break;
5337 	case HCLGE_FD_TO_DEL:
5338 		/* if new request is TO_DEL, and old rule is existent
5339 		 * 1) the state of old rule is TO_DEL, we need do nothing,
5340 		 * because we delete rule by location, other rule content
5341 		 * is unncessary.
5342 		 * 2) the state of old rule is ACTIVE, we need to change its
5343 		 * state to TO_DEL, so the rule will be deleted when periodic
5344 		 * task being scheduled.
5345 		 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5346 		 * been added to hardware, so we just delete the rule node from
5347 		 * fd_rule_list directly.
5348 		 */
5349 		if (old_rule->state == HCLGE_FD_TO_ADD) {
5350 			hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5351 			hclge_fd_free_node(hdev, old_rule);
5352 			return;
5353 		}
5354 		old_rule->state = HCLGE_FD_TO_DEL;
5355 		break;
5356 	}
5357 }
5358 
5359 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5360 						u16 location,
5361 						struct hclge_fd_rule **parent)
5362 {
5363 	struct hclge_fd_rule *rule;
5364 	struct hlist_node *node;
5365 
5366 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5367 		if (rule->location == location)
5368 			return rule;
5369 		else if (rule->location > location)
5370 			return NULL;
5371 		/* record the parent node, use to keep the nodes in fd_rule_list
5372 		 * in ascend order.
5373 		 */
5374 		*parent = rule;
5375 	}
5376 
5377 	return NULL;
5378 }
5379 
5380 /* insert fd rule node in ascend order according to rule->location */
5381 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5382 				      struct hclge_fd_rule *rule,
5383 				      struct hclge_fd_rule *parent)
5384 {
5385 	INIT_HLIST_NODE(&rule->rule_node);
5386 
5387 	if (parent)
5388 		hlist_add_behind(&rule->rule_node, &parent->rule_node);
5389 	else
5390 		hlist_add_head(&rule->rule_node, hlist);
5391 }
5392 
5393 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5394 				     struct hclge_fd_user_def_cfg *cfg)
5395 {
5396 	struct hclge_fd_user_def_cfg_cmd *req;
5397 	struct hclge_desc desc;
5398 	u16 data = 0;
5399 	int ret;
5400 
5401 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5402 
5403 	req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5404 
5405 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5406 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5407 			HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5408 	req->ol2_cfg = cpu_to_le16(data);
5409 
5410 	data = 0;
5411 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5412 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5413 			HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5414 	req->ol3_cfg = cpu_to_le16(data);
5415 
5416 	data = 0;
5417 	hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5418 	hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5419 			HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5420 	req->ol4_cfg = cpu_to_le16(data);
5421 
5422 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5423 	if (ret)
5424 		dev_err(&hdev->pdev->dev,
5425 			"failed to set fd user def data, ret= %d\n", ret);
5426 	return ret;
5427 }
5428 
5429 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5430 {
5431 	int ret;
5432 
5433 	if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5434 		return;
5435 
5436 	if (!locked)
5437 		spin_lock_bh(&hdev->fd_rule_lock);
5438 
5439 	ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5440 	if (ret)
5441 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5442 
5443 	if (!locked)
5444 		spin_unlock_bh(&hdev->fd_rule_lock);
5445 }
5446 
5447 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5448 					  struct hclge_fd_rule *rule)
5449 {
5450 	struct hlist_head *hlist = &hdev->fd_rule_list;
5451 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5452 	struct hclge_fd_user_def_info *info, *old_info;
5453 	struct hclge_fd_user_def_cfg *cfg;
5454 
5455 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5456 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5457 		return 0;
5458 
5459 	/* for valid layer is start from 1, so need minus 1 to get the cfg */
5460 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5461 	info = &rule->ep.user_def;
5462 
5463 	if (!cfg->ref_cnt || cfg->offset == info->offset)
5464 		return 0;
5465 
5466 	if (cfg->ref_cnt > 1)
5467 		goto error;
5468 
5469 	fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5470 	if (fd_rule) {
5471 		old_info = &fd_rule->ep.user_def;
5472 		if (info->layer == old_info->layer)
5473 			return 0;
5474 	}
5475 
5476 error:
5477 	dev_err(&hdev->pdev->dev,
5478 		"No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5479 		info->layer + 1);
5480 	return -ENOSPC;
5481 }
5482 
5483 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5484 					 struct hclge_fd_rule *rule)
5485 {
5486 	struct hclge_fd_user_def_cfg *cfg;
5487 
5488 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5489 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5490 		return;
5491 
5492 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5493 	if (!cfg->ref_cnt) {
5494 		cfg->offset = rule->ep.user_def.offset;
5495 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5496 	}
5497 	cfg->ref_cnt++;
5498 }
5499 
5500 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5501 					 struct hclge_fd_rule *rule)
5502 {
5503 	struct hclge_fd_user_def_cfg *cfg;
5504 
5505 	if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5506 	    rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5507 		return;
5508 
5509 	cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5510 	if (!cfg->ref_cnt)
5511 		return;
5512 
5513 	cfg->ref_cnt--;
5514 	if (!cfg->ref_cnt) {
5515 		cfg->offset = 0;
5516 		set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5517 	}
5518 }
5519 
5520 static void hclge_update_fd_list(struct hclge_dev *hdev,
5521 				 enum HCLGE_FD_NODE_STATE state, u16 location,
5522 				 struct hclge_fd_rule *new_rule)
5523 {
5524 	struct hlist_head *hlist = &hdev->fd_rule_list;
5525 	struct hclge_fd_rule *fd_rule, *parent = NULL;
5526 
5527 	fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5528 	if (fd_rule) {
5529 		hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5530 		if (state == HCLGE_FD_ACTIVE)
5531 			hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5532 		hclge_sync_fd_user_def_cfg(hdev, true);
5533 
5534 		hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5535 		return;
5536 	}
5537 
5538 	/* it's unlikely to fail here, because we have checked the rule
5539 	 * exist before.
5540 	 */
5541 	if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5542 		dev_warn(&hdev->pdev->dev,
5543 			 "failed to delete fd rule %u, it's inexistent\n",
5544 			 location);
5545 		return;
5546 	}
5547 
5548 	hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5549 	hclge_sync_fd_user_def_cfg(hdev, true);
5550 
5551 	hclge_fd_insert_rule_node(hlist, new_rule, parent);
5552 	hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5553 
5554 	if (state == HCLGE_FD_TO_ADD) {
5555 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5556 		hclge_task_schedule(hdev, 0);
5557 	}
5558 }
5559 
5560 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5561 {
5562 	struct hclge_get_fd_mode_cmd *req;
5563 	struct hclge_desc desc;
5564 	int ret;
5565 
5566 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5567 
5568 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
5569 
5570 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5571 	if (ret) {
5572 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5573 		return ret;
5574 	}
5575 
5576 	*fd_mode = req->mode;
5577 
5578 	return ret;
5579 }
5580 
5581 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5582 				   u32 *stage1_entry_num,
5583 				   u32 *stage2_entry_num,
5584 				   u16 *stage1_counter_num,
5585 				   u16 *stage2_counter_num)
5586 {
5587 	struct hclge_get_fd_allocation_cmd *req;
5588 	struct hclge_desc desc;
5589 	int ret;
5590 
5591 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5592 
5593 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5594 
5595 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5596 	if (ret) {
5597 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5598 			ret);
5599 		return ret;
5600 	}
5601 
5602 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5603 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5604 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5605 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5606 
5607 	return ret;
5608 }
5609 
5610 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5611 				   enum HCLGE_FD_STAGE stage_num)
5612 {
5613 	struct hclge_set_fd_key_config_cmd *req;
5614 	struct hclge_fd_key_cfg *stage;
5615 	struct hclge_desc desc;
5616 	int ret;
5617 
5618 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5619 
5620 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5621 	stage = &hdev->fd_cfg.key_cfg[stage_num];
5622 	req->stage = stage_num;
5623 	req->key_select = stage->key_sel;
5624 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5625 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5626 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5627 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5628 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5629 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5630 
5631 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5632 	if (ret)
5633 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5634 
5635 	return ret;
5636 }
5637 
5638 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5639 {
5640 	struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5641 
5642 	spin_lock_bh(&hdev->fd_rule_lock);
5643 	memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5644 	spin_unlock_bh(&hdev->fd_rule_lock);
5645 
5646 	hclge_fd_set_user_def_cmd(hdev, cfg);
5647 }
5648 
5649 static int hclge_init_fd_config(struct hclge_dev *hdev)
5650 {
5651 #define LOW_2_WORDS		0x03
5652 	struct hclge_fd_key_cfg *key_cfg;
5653 	int ret;
5654 
5655 	if (!hnae3_dev_fd_supported(hdev))
5656 		return 0;
5657 
5658 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5659 	if (ret)
5660 		return ret;
5661 
5662 	switch (hdev->fd_cfg.fd_mode) {
5663 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5664 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5665 		break;
5666 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5667 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5668 		break;
5669 	default:
5670 		dev_err(&hdev->pdev->dev,
5671 			"Unsupported flow director mode %u\n",
5672 			hdev->fd_cfg.fd_mode);
5673 		return -EOPNOTSUPP;
5674 	}
5675 
5676 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5677 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5678 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5679 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5680 	key_cfg->outer_sipv6_word_en = 0;
5681 	key_cfg->outer_dipv6_word_en = 0;
5682 
5683 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5684 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5685 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5686 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5687 
5688 	/* If use max 400bit key, we can support tuples for ether type */
5689 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5690 		key_cfg->tuple_active |=
5691 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5692 		if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5693 			key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5694 	}
5695 
5696 	/* roce_type is used to filter roce frames
5697 	 * dst_vport is used to specify the rule
5698 	 */
5699 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5700 
5701 	ret = hclge_get_fd_allocation(hdev,
5702 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5703 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5704 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5705 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5706 	if (ret)
5707 		return ret;
5708 
5709 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5710 }
5711 
5712 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5713 				int loc, u8 *key, bool is_add)
5714 {
5715 	struct hclge_fd_tcam_config_1_cmd *req1;
5716 	struct hclge_fd_tcam_config_2_cmd *req2;
5717 	struct hclge_fd_tcam_config_3_cmd *req3;
5718 	struct hclge_desc desc[3];
5719 	int ret;
5720 
5721 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5722 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5723 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5724 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5725 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5726 
5727 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5728 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5729 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5730 
5731 	req1->stage = stage;
5732 	req1->xy_sel = sel_x ? 1 : 0;
5733 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5734 	req1->index = cpu_to_le32(loc);
5735 	req1->entry_vld = sel_x ? is_add : 0;
5736 
5737 	if (key) {
5738 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5739 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5740 		       sizeof(req2->tcam_data));
5741 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5742 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5743 	}
5744 
5745 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
5746 	if (ret)
5747 		dev_err(&hdev->pdev->dev,
5748 			"config tcam key fail, ret=%d\n",
5749 			ret);
5750 
5751 	return ret;
5752 }
5753 
5754 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5755 			      struct hclge_fd_ad_data *action)
5756 {
5757 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5758 	struct hclge_fd_ad_config_cmd *req;
5759 	struct hclge_desc desc;
5760 	u64 ad_data = 0;
5761 	int ret;
5762 
5763 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5764 
5765 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
5766 	req->index = cpu_to_le32(loc);
5767 	req->stage = stage;
5768 
5769 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5770 		      action->write_rule_id_to_bd);
5771 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5772 			action->rule_id);
5773 	if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5774 		hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5775 			      action->override_tc);
5776 		hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5777 				HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5778 	}
5779 	ad_data <<= 32;
5780 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5781 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5782 		      action->forward_to_direct_queue);
5783 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5784 			action->queue_id);
5785 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5786 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5787 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5788 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5789 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5790 			action->counter_id);
5791 
5792 	req->ad_data = cpu_to_le64(ad_data);
5793 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5794 	if (ret)
5795 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5796 
5797 	return ret;
5798 }
5799 
5800 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5801 				   struct hclge_fd_rule *rule)
5802 {
5803 	int offset, moffset, ip_offset;
5804 	enum HCLGE_FD_KEY_OPT key_opt;
5805 	u16 tmp_x_s, tmp_y_s;
5806 	u32 tmp_x_l, tmp_y_l;
5807 	u8 *p = (u8 *)rule;
5808 	int i;
5809 
5810 	if (rule->unused_tuple & BIT(tuple_bit))
5811 		return true;
5812 
5813 	key_opt = tuple_key_info[tuple_bit].key_opt;
5814 	offset = tuple_key_info[tuple_bit].offset;
5815 	moffset = tuple_key_info[tuple_bit].moffset;
5816 
5817 	switch (key_opt) {
5818 	case KEY_OPT_U8:
5819 		calc_x(*key_x, p[offset], p[moffset]);
5820 		calc_y(*key_y, p[offset], p[moffset]);
5821 
5822 		return true;
5823 	case KEY_OPT_LE16:
5824 		calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5825 		calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5826 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5827 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5828 
5829 		return true;
5830 	case KEY_OPT_LE32:
5831 		calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5832 		calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5833 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5834 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5835 
5836 		return true;
5837 	case KEY_OPT_MAC:
5838 		for (i = 0; i < ETH_ALEN; i++) {
5839 			calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5840 			       p[moffset + i]);
5841 			calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5842 			       p[moffset + i]);
5843 		}
5844 
5845 		return true;
5846 	case KEY_OPT_IP:
5847 		ip_offset = IPV4_INDEX * sizeof(u32);
5848 		calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5849 		       *(u32 *)(&p[moffset + ip_offset]));
5850 		calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5851 		       *(u32 *)(&p[moffset + ip_offset]));
5852 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5853 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5854 
5855 		return true;
5856 	default:
5857 		return false;
5858 	}
5859 }
5860 
5861 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5862 				 u8 vf_id, u8 network_port_id)
5863 {
5864 	u32 port_number = 0;
5865 
5866 	if (port_type == HOST_PORT) {
5867 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5868 				pf_id);
5869 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5870 				vf_id);
5871 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5872 	} else {
5873 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5874 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5875 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5876 	}
5877 
5878 	return port_number;
5879 }
5880 
5881 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5882 				       __le32 *key_x, __le32 *key_y,
5883 				       struct hclge_fd_rule *rule)
5884 {
5885 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5886 	u8 cur_pos = 0, tuple_size, shift_bits;
5887 	unsigned int i;
5888 
5889 	for (i = 0; i < MAX_META_DATA; i++) {
5890 		tuple_size = meta_data_key_info[i].key_length;
5891 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5892 
5893 		switch (tuple_bit) {
5894 		case BIT(ROCE_TYPE):
5895 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5896 			cur_pos += tuple_size;
5897 			break;
5898 		case BIT(DST_VPORT):
5899 			port_number = hclge_get_port_number(HOST_PORT, 0,
5900 							    rule->vf_id, 0);
5901 			hnae3_set_field(meta_data,
5902 					GENMASK(cur_pos + tuple_size, cur_pos),
5903 					cur_pos, port_number);
5904 			cur_pos += tuple_size;
5905 			break;
5906 		default:
5907 			break;
5908 		}
5909 	}
5910 
5911 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5912 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5913 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5914 
5915 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5916 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5917 }
5918 
5919 /* A complete key is combined with meta data key and tuple key.
5920  * Meta data key is stored at the MSB region, and tuple key is stored at
5921  * the LSB region, unused bits will be filled 0.
5922  */
5923 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5924 			    struct hclge_fd_rule *rule)
5925 {
5926 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5927 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5928 	u8 *cur_key_x, *cur_key_y;
5929 	u8 meta_data_region;
5930 	u8 tuple_size;
5931 	int ret;
5932 	u32 i;
5933 
5934 	memset(key_x, 0, sizeof(key_x));
5935 	memset(key_y, 0, sizeof(key_y));
5936 	cur_key_x = key_x;
5937 	cur_key_y = key_y;
5938 
5939 	for (i = 0 ; i < MAX_TUPLE; i++) {
5940 		bool tuple_valid;
5941 
5942 		tuple_size = tuple_key_info[i].key_length / 8;
5943 		if (!(key_cfg->tuple_active & BIT(i)))
5944 			continue;
5945 
5946 		tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5947 						     cur_key_y, rule);
5948 		if (tuple_valid) {
5949 			cur_key_x += tuple_size;
5950 			cur_key_y += tuple_size;
5951 		}
5952 	}
5953 
5954 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5955 			MAX_META_DATA_LENGTH / 8;
5956 
5957 	hclge_fd_convert_meta_data(key_cfg,
5958 				   (__le32 *)(key_x + meta_data_region),
5959 				   (__le32 *)(key_y + meta_data_region),
5960 				   rule);
5961 
5962 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5963 				   true);
5964 	if (ret) {
5965 		dev_err(&hdev->pdev->dev,
5966 			"fd key_y config fail, loc=%u, ret=%d\n",
5967 			rule->queue_id, ret);
5968 		return ret;
5969 	}
5970 
5971 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5972 				   true);
5973 	if (ret)
5974 		dev_err(&hdev->pdev->dev,
5975 			"fd key_x config fail, loc=%u, ret=%d\n",
5976 			rule->queue_id, ret);
5977 	return ret;
5978 }
5979 
5980 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5981 			       struct hclge_fd_rule *rule)
5982 {
5983 	struct hclge_vport *vport = hdev->vport;
5984 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5985 	struct hclge_fd_ad_data ad_data;
5986 
5987 	memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5988 	ad_data.ad_id = rule->location;
5989 
5990 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5991 		ad_data.drop_packet = true;
5992 	} else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5993 		ad_data.override_tc = true;
5994 		ad_data.queue_id =
5995 			kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5996 		ad_data.tc_size =
5997 			ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5998 	} else {
5999 		ad_data.forward_to_direct_queue = true;
6000 		ad_data.queue_id = rule->queue_id;
6001 	}
6002 
6003 	if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
6004 		ad_data.use_counter = true;
6005 		ad_data.counter_id = rule->vf_id %
6006 				     hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
6007 	} else {
6008 		ad_data.use_counter = false;
6009 		ad_data.counter_id = 0;
6010 	}
6011 
6012 	ad_data.use_next_stage = false;
6013 	ad_data.next_input_key = 0;
6014 
6015 	ad_data.write_rule_id_to_bd = true;
6016 	ad_data.rule_id = rule->location;
6017 
6018 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
6019 }
6020 
6021 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
6022 				       u32 *unused_tuple)
6023 {
6024 	if (!spec || !unused_tuple)
6025 		return -EINVAL;
6026 
6027 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6028 
6029 	if (!spec->ip4src)
6030 		*unused_tuple |= BIT(INNER_SRC_IP);
6031 
6032 	if (!spec->ip4dst)
6033 		*unused_tuple |= BIT(INNER_DST_IP);
6034 
6035 	if (!spec->psrc)
6036 		*unused_tuple |= BIT(INNER_SRC_PORT);
6037 
6038 	if (!spec->pdst)
6039 		*unused_tuple |= BIT(INNER_DST_PORT);
6040 
6041 	if (!spec->tos)
6042 		*unused_tuple |= BIT(INNER_IP_TOS);
6043 
6044 	return 0;
6045 }
6046 
6047 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
6048 				    u32 *unused_tuple)
6049 {
6050 	if (!spec || !unused_tuple)
6051 		return -EINVAL;
6052 
6053 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6054 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6055 
6056 	if (!spec->ip4src)
6057 		*unused_tuple |= BIT(INNER_SRC_IP);
6058 
6059 	if (!spec->ip4dst)
6060 		*unused_tuple |= BIT(INNER_DST_IP);
6061 
6062 	if (!spec->tos)
6063 		*unused_tuple |= BIT(INNER_IP_TOS);
6064 
6065 	if (!spec->proto)
6066 		*unused_tuple |= BIT(INNER_IP_PROTO);
6067 
6068 	if (spec->l4_4_bytes)
6069 		return -EOPNOTSUPP;
6070 
6071 	if (spec->ip_ver != ETH_RX_NFC_IP4)
6072 		return -EOPNOTSUPP;
6073 
6074 	return 0;
6075 }
6076 
6077 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
6078 				       u32 *unused_tuple)
6079 {
6080 	if (!spec || !unused_tuple)
6081 		return -EINVAL;
6082 
6083 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6084 
6085 	/* check whether src/dst ip address used */
6086 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6087 		*unused_tuple |= BIT(INNER_SRC_IP);
6088 
6089 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6090 		*unused_tuple |= BIT(INNER_DST_IP);
6091 
6092 	if (!spec->psrc)
6093 		*unused_tuple |= BIT(INNER_SRC_PORT);
6094 
6095 	if (!spec->pdst)
6096 		*unused_tuple |= BIT(INNER_DST_PORT);
6097 
6098 	if (!spec->tclass)
6099 		*unused_tuple |= BIT(INNER_IP_TOS);
6100 
6101 	return 0;
6102 }
6103 
6104 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6105 				    u32 *unused_tuple)
6106 {
6107 	if (!spec || !unused_tuple)
6108 		return -EINVAL;
6109 
6110 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6111 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6112 
6113 	/* check whether src/dst ip address used */
6114 	if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6115 		*unused_tuple |= BIT(INNER_SRC_IP);
6116 
6117 	if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6118 		*unused_tuple |= BIT(INNER_DST_IP);
6119 
6120 	if (!spec->l4_proto)
6121 		*unused_tuple |= BIT(INNER_IP_PROTO);
6122 
6123 	if (!spec->tclass)
6124 		*unused_tuple |= BIT(INNER_IP_TOS);
6125 
6126 	if (spec->l4_4_bytes)
6127 		return -EOPNOTSUPP;
6128 
6129 	return 0;
6130 }
6131 
6132 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6133 {
6134 	if (!spec || !unused_tuple)
6135 		return -EINVAL;
6136 
6137 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6138 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6139 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6140 
6141 	if (is_zero_ether_addr(spec->h_source))
6142 		*unused_tuple |= BIT(INNER_SRC_MAC);
6143 
6144 	if (is_zero_ether_addr(spec->h_dest))
6145 		*unused_tuple |= BIT(INNER_DST_MAC);
6146 
6147 	if (!spec->h_proto)
6148 		*unused_tuple |= BIT(INNER_ETH_TYPE);
6149 
6150 	return 0;
6151 }
6152 
6153 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6154 				    struct ethtool_rx_flow_spec *fs,
6155 				    u32 *unused_tuple)
6156 {
6157 	if (fs->flow_type & FLOW_EXT) {
6158 		if (fs->h_ext.vlan_etype) {
6159 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6160 			return -EOPNOTSUPP;
6161 		}
6162 
6163 		if (!fs->h_ext.vlan_tci)
6164 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6165 
6166 		if (fs->m_ext.vlan_tci &&
6167 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6168 			dev_err(&hdev->pdev->dev,
6169 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6170 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6171 			return -EINVAL;
6172 		}
6173 	} else {
6174 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6175 	}
6176 
6177 	if (fs->flow_type & FLOW_MAC_EXT) {
6178 		if (hdev->fd_cfg.fd_mode !=
6179 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6180 			dev_err(&hdev->pdev->dev,
6181 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
6182 			return -EOPNOTSUPP;
6183 		}
6184 
6185 		if (is_zero_ether_addr(fs->h_ext.h_dest))
6186 			*unused_tuple |= BIT(INNER_DST_MAC);
6187 		else
6188 			*unused_tuple &= ~BIT(INNER_DST_MAC);
6189 	}
6190 
6191 	return 0;
6192 }
6193 
6194 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6195 				       struct hclge_fd_user_def_info *info)
6196 {
6197 	switch (flow_type) {
6198 	case ETHER_FLOW:
6199 		info->layer = HCLGE_FD_USER_DEF_L2;
6200 		*unused_tuple &= ~BIT(INNER_L2_RSV);
6201 		break;
6202 	case IP_USER_FLOW:
6203 	case IPV6_USER_FLOW:
6204 		info->layer = HCLGE_FD_USER_DEF_L3;
6205 		*unused_tuple &= ~BIT(INNER_L3_RSV);
6206 		break;
6207 	case TCP_V4_FLOW:
6208 	case UDP_V4_FLOW:
6209 	case TCP_V6_FLOW:
6210 	case UDP_V6_FLOW:
6211 		info->layer = HCLGE_FD_USER_DEF_L4;
6212 		*unused_tuple &= ~BIT(INNER_L4_RSV);
6213 		break;
6214 	default:
6215 		return -EOPNOTSUPP;
6216 	}
6217 
6218 	return 0;
6219 }
6220 
6221 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6222 {
6223 	return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6224 }
6225 
6226 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6227 					 struct ethtool_rx_flow_spec *fs,
6228 					 u32 *unused_tuple,
6229 					 struct hclge_fd_user_def_info *info)
6230 {
6231 	u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6232 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6233 	u16 data, offset, data_mask, offset_mask;
6234 	int ret;
6235 
6236 	info->layer = HCLGE_FD_USER_DEF_NONE;
6237 	*unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6238 
6239 	if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6240 		return 0;
6241 
6242 	/* user-def data from ethtool is 64 bit value, the bit0~15 is used
6243 	 * for data, and bit32~47 is used for offset.
6244 	 */
6245 	data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6246 	data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6247 	offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6248 	offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6249 
6250 	if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6251 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6252 		return -EOPNOTSUPP;
6253 	}
6254 
6255 	if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6256 		dev_err(&hdev->pdev->dev,
6257 			"user-def offset[%u] should be no more than %u\n",
6258 			offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6259 		return -EINVAL;
6260 	}
6261 
6262 	if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6263 		dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6264 		return -EINVAL;
6265 	}
6266 
6267 	ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6268 	if (ret) {
6269 		dev_err(&hdev->pdev->dev,
6270 			"unsupported flow type for user-def bytes, ret = %d\n",
6271 			ret);
6272 		return ret;
6273 	}
6274 
6275 	info->data = data;
6276 	info->data_mask = data_mask;
6277 	info->offset = offset;
6278 
6279 	return 0;
6280 }
6281 
6282 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6283 			       struct ethtool_rx_flow_spec *fs,
6284 			       u32 *unused_tuple,
6285 			       struct hclge_fd_user_def_info *info)
6286 {
6287 	u32 flow_type;
6288 	int ret;
6289 
6290 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6291 		dev_err(&hdev->pdev->dev,
6292 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
6293 			fs->location,
6294 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6295 		return -EINVAL;
6296 	}
6297 
6298 	ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6299 	if (ret)
6300 		return ret;
6301 
6302 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6303 	switch (flow_type) {
6304 	case SCTP_V4_FLOW:
6305 	case TCP_V4_FLOW:
6306 	case UDP_V4_FLOW:
6307 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6308 						  unused_tuple);
6309 		break;
6310 	case IP_USER_FLOW:
6311 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6312 					       unused_tuple);
6313 		break;
6314 	case SCTP_V6_FLOW:
6315 	case TCP_V6_FLOW:
6316 	case UDP_V6_FLOW:
6317 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6318 						  unused_tuple);
6319 		break;
6320 	case IPV6_USER_FLOW:
6321 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6322 					       unused_tuple);
6323 		break;
6324 	case ETHER_FLOW:
6325 		if (hdev->fd_cfg.fd_mode !=
6326 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6327 			dev_err(&hdev->pdev->dev,
6328 				"ETHER_FLOW is not supported in current fd mode!\n");
6329 			return -EOPNOTSUPP;
6330 		}
6331 
6332 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6333 						 unused_tuple);
6334 		break;
6335 	default:
6336 		dev_err(&hdev->pdev->dev,
6337 			"unsupported protocol type, protocol type = %#x\n",
6338 			flow_type);
6339 		return -EOPNOTSUPP;
6340 	}
6341 
6342 	if (ret) {
6343 		dev_err(&hdev->pdev->dev,
6344 			"failed to check flow union tuple, ret = %d\n",
6345 			ret);
6346 		return ret;
6347 	}
6348 
6349 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6350 }
6351 
6352 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6353 				      struct ethtool_rx_flow_spec *fs,
6354 				      struct hclge_fd_rule *rule, u8 ip_proto)
6355 {
6356 	rule->tuples.src_ip[IPV4_INDEX] =
6357 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6358 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6359 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6360 
6361 	rule->tuples.dst_ip[IPV4_INDEX] =
6362 			be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6363 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6364 			be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6365 
6366 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6367 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6368 
6369 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6370 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6371 
6372 	rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6373 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6374 
6375 	rule->tuples.ether_proto = ETH_P_IP;
6376 	rule->tuples_mask.ether_proto = 0xFFFF;
6377 
6378 	rule->tuples.ip_proto = ip_proto;
6379 	rule->tuples_mask.ip_proto = 0xFF;
6380 }
6381 
6382 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6383 				   struct ethtool_rx_flow_spec *fs,
6384 				   struct hclge_fd_rule *rule)
6385 {
6386 	rule->tuples.src_ip[IPV4_INDEX] =
6387 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6388 	rule->tuples_mask.src_ip[IPV4_INDEX] =
6389 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6390 
6391 	rule->tuples.dst_ip[IPV4_INDEX] =
6392 			be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6393 	rule->tuples_mask.dst_ip[IPV4_INDEX] =
6394 			be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6395 
6396 	rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6397 	rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6398 
6399 	rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6400 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6401 
6402 	rule->tuples.ether_proto = ETH_P_IP;
6403 	rule->tuples_mask.ether_proto = 0xFFFF;
6404 }
6405 
6406 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6407 				      struct ethtool_rx_flow_spec *fs,
6408 				      struct hclge_fd_rule *rule, u8 ip_proto)
6409 {
6410 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6411 			  IPV6_SIZE);
6412 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6413 			  IPV6_SIZE);
6414 
6415 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6416 			  IPV6_SIZE);
6417 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6418 			  IPV6_SIZE);
6419 
6420 	rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6421 	rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6422 
6423 	rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6424 	rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6425 
6426 	rule->tuples.ether_proto = ETH_P_IPV6;
6427 	rule->tuples_mask.ether_proto = 0xFFFF;
6428 
6429 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6430 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6431 
6432 	rule->tuples.ip_proto = ip_proto;
6433 	rule->tuples_mask.ip_proto = 0xFF;
6434 }
6435 
6436 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6437 				   struct ethtool_rx_flow_spec *fs,
6438 				   struct hclge_fd_rule *rule)
6439 {
6440 	be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6441 			  IPV6_SIZE);
6442 	be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6443 			  IPV6_SIZE);
6444 
6445 	be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6446 			  IPV6_SIZE);
6447 	be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6448 			  IPV6_SIZE);
6449 
6450 	rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6451 	rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6452 
6453 	rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6454 	rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6455 
6456 	rule->tuples.ether_proto = ETH_P_IPV6;
6457 	rule->tuples_mask.ether_proto = 0xFFFF;
6458 }
6459 
6460 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6461 				     struct ethtool_rx_flow_spec *fs,
6462 				     struct hclge_fd_rule *rule)
6463 {
6464 	ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6465 	ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6466 
6467 	ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6468 	ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6469 
6470 	rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6471 	rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6472 }
6473 
6474 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6475 					struct hclge_fd_rule *rule)
6476 {
6477 	switch (info->layer) {
6478 	case HCLGE_FD_USER_DEF_L2:
6479 		rule->tuples.l2_user_def = info->data;
6480 		rule->tuples_mask.l2_user_def = info->data_mask;
6481 		break;
6482 	case HCLGE_FD_USER_DEF_L3:
6483 		rule->tuples.l3_user_def = info->data;
6484 		rule->tuples_mask.l3_user_def = info->data_mask;
6485 		break;
6486 	case HCLGE_FD_USER_DEF_L4:
6487 		rule->tuples.l4_user_def = (u32)info->data << 16;
6488 		rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6489 		break;
6490 	default:
6491 		break;
6492 	}
6493 
6494 	rule->ep.user_def = *info;
6495 }
6496 
6497 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6498 			      struct ethtool_rx_flow_spec *fs,
6499 			      struct hclge_fd_rule *rule,
6500 			      struct hclge_fd_user_def_info *info)
6501 {
6502 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6503 
6504 	switch (flow_type) {
6505 	case SCTP_V4_FLOW:
6506 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6507 		break;
6508 	case TCP_V4_FLOW:
6509 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6510 		break;
6511 	case UDP_V4_FLOW:
6512 		hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6513 		break;
6514 	case IP_USER_FLOW:
6515 		hclge_fd_get_ip4_tuple(hdev, fs, rule);
6516 		break;
6517 	case SCTP_V6_FLOW:
6518 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6519 		break;
6520 	case TCP_V6_FLOW:
6521 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6522 		break;
6523 	case UDP_V6_FLOW:
6524 		hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6525 		break;
6526 	case IPV6_USER_FLOW:
6527 		hclge_fd_get_ip6_tuple(hdev, fs, rule);
6528 		break;
6529 	case ETHER_FLOW:
6530 		hclge_fd_get_ether_tuple(hdev, fs, rule);
6531 		break;
6532 	default:
6533 		return -EOPNOTSUPP;
6534 	}
6535 
6536 	if (fs->flow_type & FLOW_EXT) {
6537 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6538 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6539 		hclge_fd_get_user_def_tuple(info, rule);
6540 	}
6541 
6542 	if (fs->flow_type & FLOW_MAC_EXT) {
6543 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6544 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6545 	}
6546 
6547 	return 0;
6548 }
6549 
6550 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6551 				struct hclge_fd_rule *rule)
6552 {
6553 	int ret;
6554 
6555 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6556 	if (ret)
6557 		return ret;
6558 
6559 	return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6560 }
6561 
6562 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6563 				     struct hclge_fd_rule *rule)
6564 {
6565 	int ret;
6566 
6567 	spin_lock_bh(&hdev->fd_rule_lock);
6568 
6569 	if (hdev->fd_active_type != rule->rule_type &&
6570 	    (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6571 	     hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6572 		dev_err(&hdev->pdev->dev,
6573 			"mode conflict(new type %d, active type %d), please delete existent rules first\n",
6574 			rule->rule_type, hdev->fd_active_type);
6575 		spin_unlock_bh(&hdev->fd_rule_lock);
6576 		return -EINVAL;
6577 	}
6578 
6579 	ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6580 	if (ret)
6581 		goto out;
6582 
6583 	ret = hclge_clear_arfs_rules(hdev);
6584 	if (ret)
6585 		goto out;
6586 
6587 	ret = hclge_fd_config_rule(hdev, rule);
6588 	if (ret)
6589 		goto out;
6590 
6591 	rule->state = HCLGE_FD_ACTIVE;
6592 	hdev->fd_active_type = rule->rule_type;
6593 	hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6594 
6595 out:
6596 	spin_unlock_bh(&hdev->fd_rule_lock);
6597 	return ret;
6598 }
6599 
6600 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6601 {
6602 	struct hclge_vport *vport = hclge_get_vport(handle);
6603 	struct hclge_dev *hdev = vport->back;
6604 
6605 	return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6606 }
6607 
6608 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6609 				      u16 *vport_id, u8 *action, u16 *queue_id)
6610 {
6611 	struct hclge_vport *vport = hdev->vport;
6612 
6613 	if (ring_cookie == RX_CLS_FLOW_DISC) {
6614 		*action = HCLGE_FD_ACTION_DROP_PACKET;
6615 	} else {
6616 		u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6617 		u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6618 		u16 tqps;
6619 
6620 		if (vf > hdev->num_req_vfs) {
6621 			dev_err(&hdev->pdev->dev,
6622 				"Error: vf id (%u) > max vf num (%u)\n",
6623 				vf, hdev->num_req_vfs);
6624 			return -EINVAL;
6625 		}
6626 
6627 		*vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6628 		tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6629 
6630 		if (ring >= tqps) {
6631 			dev_err(&hdev->pdev->dev,
6632 				"Error: queue id (%u) > max tqp num (%u)\n",
6633 				ring, tqps - 1);
6634 			return -EINVAL;
6635 		}
6636 
6637 		*action = HCLGE_FD_ACTION_SELECT_QUEUE;
6638 		*queue_id = ring;
6639 	}
6640 
6641 	return 0;
6642 }
6643 
6644 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6645 			      struct ethtool_rxnfc *cmd)
6646 {
6647 	struct hclge_vport *vport = hclge_get_vport(handle);
6648 	struct hclge_dev *hdev = vport->back;
6649 	struct hclge_fd_user_def_info info;
6650 	u16 dst_vport_id = 0, q_index = 0;
6651 	struct ethtool_rx_flow_spec *fs;
6652 	struct hclge_fd_rule *rule;
6653 	u32 unused = 0;
6654 	u8 action;
6655 	int ret;
6656 
6657 	if (!hnae3_dev_fd_supported(hdev)) {
6658 		dev_err(&hdev->pdev->dev,
6659 			"flow table director is not supported\n");
6660 		return -EOPNOTSUPP;
6661 	}
6662 
6663 	if (!hdev->fd_en) {
6664 		dev_err(&hdev->pdev->dev,
6665 			"please enable flow director first\n");
6666 		return -EOPNOTSUPP;
6667 	}
6668 
6669 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6670 
6671 	ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6672 	if (ret)
6673 		return ret;
6674 
6675 	ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6676 					 &action, &q_index);
6677 	if (ret)
6678 		return ret;
6679 
6680 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6681 	if (!rule)
6682 		return -ENOMEM;
6683 
6684 	ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6685 	if (ret) {
6686 		kfree(rule);
6687 		return ret;
6688 	}
6689 
6690 	rule->flow_type = fs->flow_type;
6691 	rule->location = fs->location;
6692 	rule->unused_tuple = unused;
6693 	rule->vf_id = dst_vport_id;
6694 	rule->queue_id = q_index;
6695 	rule->action = action;
6696 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
6697 
6698 	ret = hclge_add_fd_entry_common(hdev, rule);
6699 	if (ret)
6700 		kfree(rule);
6701 
6702 	return ret;
6703 }
6704 
6705 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6706 			      struct ethtool_rxnfc *cmd)
6707 {
6708 	struct hclge_vport *vport = hclge_get_vport(handle);
6709 	struct hclge_dev *hdev = vport->back;
6710 	struct ethtool_rx_flow_spec *fs;
6711 	int ret;
6712 
6713 	if (!hnae3_dev_fd_supported(hdev))
6714 		return -EOPNOTSUPP;
6715 
6716 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6717 
6718 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6719 		return -EINVAL;
6720 
6721 	spin_lock_bh(&hdev->fd_rule_lock);
6722 	if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6723 	    !test_bit(fs->location, hdev->fd_bmap)) {
6724 		dev_err(&hdev->pdev->dev,
6725 			"Delete fail, rule %u is inexistent\n", fs->location);
6726 		spin_unlock_bh(&hdev->fd_rule_lock);
6727 		return -ENOENT;
6728 	}
6729 
6730 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6731 				   NULL, false);
6732 	if (ret)
6733 		goto out;
6734 
6735 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6736 
6737 out:
6738 	spin_unlock_bh(&hdev->fd_rule_lock);
6739 	return ret;
6740 }
6741 
6742 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6743 					 bool clear_list)
6744 {
6745 	struct hclge_fd_rule *rule;
6746 	struct hlist_node *node;
6747 	u16 location;
6748 
6749 	if (!hnae3_dev_fd_supported(hdev))
6750 		return;
6751 
6752 	spin_lock_bh(&hdev->fd_rule_lock);
6753 
6754 	for_each_set_bit(location, hdev->fd_bmap,
6755 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6756 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6757 				     NULL, false);
6758 
6759 	if (clear_list) {
6760 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6761 					  rule_node) {
6762 			hlist_del(&rule->rule_node);
6763 			kfree(rule);
6764 		}
6765 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6766 		hdev->hclge_fd_rule_num = 0;
6767 		bitmap_zero(hdev->fd_bmap,
6768 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6769 	}
6770 
6771 	spin_unlock_bh(&hdev->fd_rule_lock);
6772 }
6773 
6774 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6775 {
6776 	hclge_clear_fd_rules_in_list(hdev, true);
6777 	hclge_fd_disable_user_def(hdev);
6778 }
6779 
6780 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6781 {
6782 	struct hclge_vport *vport = hclge_get_vport(handle);
6783 	struct hclge_dev *hdev = vport->back;
6784 	struct hclge_fd_rule *rule;
6785 	struct hlist_node *node;
6786 
6787 	/* Return ok here, because reset error handling will check this
6788 	 * return value. If error is returned here, the reset process will
6789 	 * fail.
6790 	 */
6791 	if (!hnae3_dev_fd_supported(hdev))
6792 		return 0;
6793 
6794 	/* if fd is disabled, should not restore it when reset */
6795 	if (!hdev->fd_en)
6796 		return 0;
6797 
6798 	spin_lock_bh(&hdev->fd_rule_lock);
6799 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6800 		if (rule->state == HCLGE_FD_ACTIVE)
6801 			rule->state = HCLGE_FD_TO_ADD;
6802 	}
6803 	spin_unlock_bh(&hdev->fd_rule_lock);
6804 	set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6805 
6806 	return 0;
6807 }
6808 
6809 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6810 				 struct ethtool_rxnfc *cmd)
6811 {
6812 	struct hclge_vport *vport = hclge_get_vport(handle);
6813 	struct hclge_dev *hdev = vport->back;
6814 
6815 	if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6816 		return -EOPNOTSUPP;
6817 
6818 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
6819 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6820 
6821 	return 0;
6822 }
6823 
6824 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6825 				     struct ethtool_tcpip4_spec *spec,
6826 				     struct ethtool_tcpip4_spec *spec_mask)
6827 {
6828 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6829 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6830 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6831 
6832 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6833 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6834 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6835 
6836 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6837 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6838 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6839 
6840 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6841 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6842 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6843 
6844 	spec->tos = rule->tuples.ip_tos;
6845 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6846 			0 : rule->tuples_mask.ip_tos;
6847 }
6848 
6849 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6850 				  struct ethtool_usrip4_spec *spec,
6851 				  struct ethtool_usrip4_spec *spec_mask)
6852 {
6853 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6854 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6855 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6856 
6857 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6858 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6859 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6860 
6861 	spec->tos = rule->tuples.ip_tos;
6862 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6863 			0 : rule->tuples_mask.ip_tos;
6864 
6865 	spec->proto = rule->tuples.ip_proto;
6866 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6867 			0 : rule->tuples_mask.ip_proto;
6868 
6869 	spec->ip_ver = ETH_RX_NFC_IP4;
6870 }
6871 
6872 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6873 				     struct ethtool_tcpip6_spec *spec,
6874 				     struct ethtool_tcpip6_spec *spec_mask)
6875 {
6876 	cpu_to_be32_array(spec->ip6src,
6877 			  rule->tuples.src_ip, IPV6_SIZE);
6878 	cpu_to_be32_array(spec->ip6dst,
6879 			  rule->tuples.dst_ip, IPV6_SIZE);
6880 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6881 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6882 	else
6883 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6884 				  IPV6_SIZE);
6885 
6886 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6887 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6888 	else
6889 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6890 				  IPV6_SIZE);
6891 
6892 	spec->tclass = rule->tuples.ip_tos;
6893 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6894 			0 : rule->tuples_mask.ip_tos;
6895 
6896 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6897 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6898 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6899 
6900 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6901 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6902 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6903 }
6904 
6905 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6906 				  struct ethtool_usrip6_spec *spec,
6907 				  struct ethtool_usrip6_spec *spec_mask)
6908 {
6909 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6910 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6911 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6912 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6913 	else
6914 		cpu_to_be32_array(spec_mask->ip6src,
6915 				  rule->tuples_mask.src_ip, IPV6_SIZE);
6916 
6917 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6918 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6919 	else
6920 		cpu_to_be32_array(spec_mask->ip6dst,
6921 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
6922 
6923 	spec->tclass = rule->tuples.ip_tos;
6924 	spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6925 			0 : rule->tuples_mask.ip_tos;
6926 
6927 	spec->l4_proto = rule->tuples.ip_proto;
6928 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6929 			0 : rule->tuples_mask.ip_proto;
6930 }
6931 
6932 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6933 				    struct ethhdr *spec,
6934 				    struct ethhdr *spec_mask)
6935 {
6936 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6937 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6938 
6939 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6940 		eth_zero_addr(spec_mask->h_source);
6941 	else
6942 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6943 
6944 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
6945 		eth_zero_addr(spec_mask->h_dest);
6946 	else
6947 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6948 
6949 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6950 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6951 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6952 }
6953 
6954 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6955 				       struct hclge_fd_rule *rule)
6956 {
6957 	if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6958 	    HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6959 		fs->h_ext.data[0] = 0;
6960 		fs->h_ext.data[1] = 0;
6961 		fs->m_ext.data[0] = 0;
6962 		fs->m_ext.data[1] = 0;
6963 	} else {
6964 		fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6965 		fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6966 		fs->m_ext.data[0] =
6967 				cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6968 		fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6969 	}
6970 }
6971 
6972 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6973 				  struct hclge_fd_rule *rule)
6974 {
6975 	if (fs->flow_type & FLOW_EXT) {
6976 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6977 		fs->m_ext.vlan_tci =
6978 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6979 				0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6980 
6981 		hclge_fd_get_user_def_info(fs, rule);
6982 	}
6983 
6984 	if (fs->flow_type & FLOW_MAC_EXT) {
6985 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6986 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6987 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6988 		else
6989 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6990 					rule->tuples_mask.dst_mac);
6991 	}
6992 }
6993 
6994 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6995 				  struct ethtool_rxnfc *cmd)
6996 {
6997 	struct hclge_vport *vport = hclge_get_vport(handle);
6998 	struct hclge_fd_rule *rule = NULL;
6999 	struct hclge_dev *hdev = vport->back;
7000 	struct ethtool_rx_flow_spec *fs;
7001 	struct hlist_node *node2;
7002 
7003 	if (!hnae3_dev_fd_supported(hdev))
7004 		return -EOPNOTSUPP;
7005 
7006 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
7007 
7008 	spin_lock_bh(&hdev->fd_rule_lock);
7009 
7010 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
7011 		if (rule->location >= fs->location)
7012 			break;
7013 	}
7014 
7015 	if (!rule || fs->location != rule->location) {
7016 		spin_unlock_bh(&hdev->fd_rule_lock);
7017 
7018 		return -ENOENT;
7019 	}
7020 
7021 	fs->flow_type = rule->flow_type;
7022 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
7023 	case SCTP_V4_FLOW:
7024 	case TCP_V4_FLOW:
7025 	case UDP_V4_FLOW:
7026 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
7027 					 &fs->m_u.tcp_ip4_spec);
7028 		break;
7029 	case IP_USER_FLOW:
7030 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
7031 				      &fs->m_u.usr_ip4_spec);
7032 		break;
7033 	case SCTP_V6_FLOW:
7034 	case TCP_V6_FLOW:
7035 	case UDP_V6_FLOW:
7036 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
7037 					 &fs->m_u.tcp_ip6_spec);
7038 		break;
7039 	case IPV6_USER_FLOW:
7040 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
7041 				      &fs->m_u.usr_ip6_spec);
7042 		break;
7043 	/* The flow type of fd rule has been checked before adding in to rule
7044 	 * list. As other flow types have been handled, it must be ETHER_FLOW
7045 	 * for the default case
7046 	 */
7047 	default:
7048 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
7049 					&fs->m_u.ether_spec);
7050 		break;
7051 	}
7052 
7053 	hclge_fd_get_ext_info(fs, rule);
7054 
7055 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
7056 		fs->ring_cookie = RX_CLS_FLOW_DISC;
7057 	} else {
7058 		u64 vf_id;
7059 
7060 		fs->ring_cookie = rule->queue_id;
7061 		vf_id = rule->vf_id;
7062 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
7063 		fs->ring_cookie |= vf_id;
7064 	}
7065 
7066 	spin_unlock_bh(&hdev->fd_rule_lock);
7067 
7068 	return 0;
7069 }
7070 
7071 static int hclge_get_all_rules(struct hnae3_handle *handle,
7072 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
7073 {
7074 	struct hclge_vport *vport = hclge_get_vport(handle);
7075 	struct hclge_dev *hdev = vport->back;
7076 	struct hclge_fd_rule *rule;
7077 	struct hlist_node *node2;
7078 	int cnt = 0;
7079 
7080 	if (!hnae3_dev_fd_supported(hdev))
7081 		return -EOPNOTSUPP;
7082 
7083 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7084 
7085 	spin_lock_bh(&hdev->fd_rule_lock);
7086 	hlist_for_each_entry_safe(rule, node2,
7087 				  &hdev->fd_rule_list, rule_node) {
7088 		if (cnt == cmd->rule_cnt) {
7089 			spin_unlock_bh(&hdev->fd_rule_lock);
7090 			return -EMSGSIZE;
7091 		}
7092 
7093 		if (rule->state == HCLGE_FD_TO_DEL)
7094 			continue;
7095 
7096 		rule_locs[cnt] = rule->location;
7097 		cnt++;
7098 	}
7099 
7100 	spin_unlock_bh(&hdev->fd_rule_lock);
7101 
7102 	cmd->rule_cnt = cnt;
7103 
7104 	return 0;
7105 }
7106 
7107 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7108 				     struct hclge_fd_rule_tuples *tuples)
7109 {
7110 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7111 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7112 
7113 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7114 	tuples->ip_proto = fkeys->basic.ip_proto;
7115 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7116 
7117 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7118 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7119 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7120 	} else {
7121 		int i;
7122 
7123 		for (i = 0; i < IPV6_SIZE; i++) {
7124 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7125 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7126 		}
7127 	}
7128 }
7129 
7130 /* traverse all rules, check whether an existed rule has the same tuples */
7131 static struct hclge_fd_rule *
7132 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7133 			  const struct hclge_fd_rule_tuples *tuples)
7134 {
7135 	struct hclge_fd_rule *rule = NULL;
7136 	struct hlist_node *node;
7137 
7138 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7139 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7140 			return rule;
7141 	}
7142 
7143 	return NULL;
7144 }
7145 
7146 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7147 				     struct hclge_fd_rule *rule)
7148 {
7149 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7150 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7151 			     BIT(INNER_SRC_PORT);
7152 	rule->action = 0;
7153 	rule->vf_id = 0;
7154 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7155 	rule->state = HCLGE_FD_TO_ADD;
7156 	if (tuples->ether_proto == ETH_P_IP) {
7157 		if (tuples->ip_proto == IPPROTO_TCP)
7158 			rule->flow_type = TCP_V4_FLOW;
7159 		else
7160 			rule->flow_type = UDP_V4_FLOW;
7161 	} else {
7162 		if (tuples->ip_proto == IPPROTO_TCP)
7163 			rule->flow_type = TCP_V6_FLOW;
7164 		else
7165 			rule->flow_type = UDP_V6_FLOW;
7166 	}
7167 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7168 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7169 }
7170 
7171 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7172 				      u16 flow_id, struct flow_keys *fkeys)
7173 {
7174 	struct hclge_vport *vport = hclge_get_vport(handle);
7175 	struct hclge_fd_rule_tuples new_tuples = {};
7176 	struct hclge_dev *hdev = vport->back;
7177 	struct hclge_fd_rule *rule;
7178 	u16 bit_id;
7179 
7180 	if (!hnae3_dev_fd_supported(hdev))
7181 		return -EOPNOTSUPP;
7182 
7183 	/* when there is already fd rule existed add by user,
7184 	 * arfs should not work
7185 	 */
7186 	spin_lock_bh(&hdev->fd_rule_lock);
7187 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7188 	    hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7189 		spin_unlock_bh(&hdev->fd_rule_lock);
7190 		return -EOPNOTSUPP;
7191 	}
7192 
7193 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7194 
7195 	/* check is there flow director filter existed for this flow,
7196 	 * if not, create a new filter for it;
7197 	 * if filter exist with different queue id, modify the filter;
7198 	 * if filter exist with same queue id, do nothing
7199 	 */
7200 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7201 	if (!rule) {
7202 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7203 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7204 			spin_unlock_bh(&hdev->fd_rule_lock);
7205 			return -ENOSPC;
7206 		}
7207 
7208 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7209 		if (!rule) {
7210 			spin_unlock_bh(&hdev->fd_rule_lock);
7211 			return -ENOMEM;
7212 		}
7213 
7214 		rule->location = bit_id;
7215 		rule->arfs.flow_id = flow_id;
7216 		rule->queue_id = queue_id;
7217 		hclge_fd_build_arfs_rule(&new_tuples, rule);
7218 		hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7219 		hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7220 	} else if (rule->queue_id != queue_id) {
7221 		rule->queue_id = queue_id;
7222 		rule->state = HCLGE_FD_TO_ADD;
7223 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7224 		hclge_task_schedule(hdev, 0);
7225 	}
7226 	spin_unlock_bh(&hdev->fd_rule_lock);
7227 	return rule->location;
7228 }
7229 
7230 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7231 {
7232 #ifdef CONFIG_RFS_ACCEL
7233 	struct hnae3_handle *handle = &hdev->vport[0].nic;
7234 	struct hclge_fd_rule *rule;
7235 	struct hlist_node *node;
7236 
7237 	spin_lock_bh(&hdev->fd_rule_lock);
7238 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7239 		spin_unlock_bh(&hdev->fd_rule_lock);
7240 		return;
7241 	}
7242 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7243 		if (rule->state != HCLGE_FD_ACTIVE)
7244 			continue;
7245 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7246 					rule->arfs.flow_id, rule->location)) {
7247 			rule->state = HCLGE_FD_TO_DEL;
7248 			set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7249 		}
7250 	}
7251 	spin_unlock_bh(&hdev->fd_rule_lock);
7252 #endif
7253 }
7254 
7255 /* make sure being called after lock up with fd_rule_lock */
7256 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7257 {
7258 #ifdef CONFIG_RFS_ACCEL
7259 	struct hclge_fd_rule *rule;
7260 	struct hlist_node *node;
7261 	int ret;
7262 
7263 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7264 		return 0;
7265 
7266 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7267 		switch (rule->state) {
7268 		case HCLGE_FD_TO_DEL:
7269 		case HCLGE_FD_ACTIVE:
7270 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7271 						   rule->location, NULL, false);
7272 			if (ret)
7273 				return ret;
7274 			fallthrough;
7275 		case HCLGE_FD_TO_ADD:
7276 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7277 			hlist_del(&rule->rule_node);
7278 			kfree(rule);
7279 			break;
7280 		default:
7281 			break;
7282 		}
7283 	}
7284 	hclge_sync_fd_state(hdev);
7285 
7286 #endif
7287 	return 0;
7288 }
7289 
7290 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7291 				    struct hclge_fd_rule *rule)
7292 {
7293 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7294 		struct flow_match_basic match;
7295 		u16 ethtype_key, ethtype_mask;
7296 
7297 		flow_rule_match_basic(flow, &match);
7298 		ethtype_key = ntohs(match.key->n_proto);
7299 		ethtype_mask = ntohs(match.mask->n_proto);
7300 
7301 		if (ethtype_key == ETH_P_ALL) {
7302 			ethtype_key = 0;
7303 			ethtype_mask = 0;
7304 		}
7305 		rule->tuples.ether_proto = ethtype_key;
7306 		rule->tuples_mask.ether_proto = ethtype_mask;
7307 		rule->tuples.ip_proto = match.key->ip_proto;
7308 		rule->tuples_mask.ip_proto = match.mask->ip_proto;
7309 	} else {
7310 		rule->unused_tuple |= BIT(INNER_IP_PROTO);
7311 		rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7312 	}
7313 }
7314 
7315 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7316 				  struct hclge_fd_rule *rule)
7317 {
7318 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7319 		struct flow_match_eth_addrs match;
7320 
7321 		flow_rule_match_eth_addrs(flow, &match);
7322 		ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7323 		ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7324 		ether_addr_copy(rule->tuples.src_mac, match.key->src);
7325 		ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7326 	} else {
7327 		rule->unused_tuple |= BIT(INNER_DST_MAC);
7328 		rule->unused_tuple |= BIT(INNER_SRC_MAC);
7329 	}
7330 }
7331 
7332 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7333 				   struct hclge_fd_rule *rule)
7334 {
7335 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7336 		struct flow_match_vlan match;
7337 
7338 		flow_rule_match_vlan(flow, &match);
7339 		rule->tuples.vlan_tag1 = match.key->vlan_id |
7340 				(match.key->vlan_priority << VLAN_PRIO_SHIFT);
7341 		rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7342 				(match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7343 	} else {
7344 		rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7345 	}
7346 }
7347 
7348 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7349 				 struct hclge_fd_rule *rule)
7350 {
7351 	u16 addr_type = 0;
7352 
7353 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7354 		struct flow_match_control match;
7355 
7356 		flow_rule_match_control(flow, &match);
7357 		addr_type = match.key->addr_type;
7358 	}
7359 
7360 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7361 		struct flow_match_ipv4_addrs match;
7362 
7363 		flow_rule_match_ipv4_addrs(flow, &match);
7364 		rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7365 		rule->tuples_mask.src_ip[IPV4_INDEX] =
7366 						be32_to_cpu(match.mask->src);
7367 		rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7368 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
7369 						be32_to_cpu(match.mask->dst);
7370 	} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7371 		struct flow_match_ipv6_addrs match;
7372 
7373 		flow_rule_match_ipv6_addrs(flow, &match);
7374 		be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7375 				  IPV6_SIZE);
7376 		be32_to_cpu_array(rule->tuples_mask.src_ip,
7377 				  match.mask->src.s6_addr32, IPV6_SIZE);
7378 		be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7379 				  IPV6_SIZE);
7380 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
7381 				  match.mask->dst.s6_addr32, IPV6_SIZE);
7382 	} else {
7383 		rule->unused_tuple |= BIT(INNER_SRC_IP);
7384 		rule->unused_tuple |= BIT(INNER_DST_IP);
7385 	}
7386 }
7387 
7388 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7389 				   struct hclge_fd_rule *rule)
7390 {
7391 	if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7392 		struct flow_match_ports match;
7393 
7394 		flow_rule_match_ports(flow, &match);
7395 
7396 		rule->tuples.src_port = be16_to_cpu(match.key->src);
7397 		rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7398 		rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7399 		rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7400 	} else {
7401 		rule->unused_tuple |= BIT(INNER_SRC_PORT);
7402 		rule->unused_tuple |= BIT(INNER_DST_PORT);
7403 	}
7404 }
7405 
7406 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7407 				  struct flow_cls_offload *cls_flower,
7408 				  struct hclge_fd_rule *rule)
7409 {
7410 	struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7411 	struct flow_dissector *dissector = flow->match.dissector;
7412 
7413 	if (dissector->used_keys &
7414 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7415 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
7416 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7417 	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
7418 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7419 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7420 	      BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7421 		dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7422 			dissector->used_keys);
7423 		return -EOPNOTSUPP;
7424 	}
7425 
7426 	hclge_get_cls_key_basic(flow, rule);
7427 	hclge_get_cls_key_mac(flow, rule);
7428 	hclge_get_cls_key_vlan(flow, rule);
7429 	hclge_get_cls_key_ip(flow, rule);
7430 	hclge_get_cls_key_port(flow, rule);
7431 
7432 	return 0;
7433 }
7434 
7435 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7436 				  struct flow_cls_offload *cls_flower, int tc)
7437 {
7438 	u32 prio = cls_flower->common.prio;
7439 
7440 	if (tc < 0 || tc > hdev->tc_max) {
7441 		dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7442 		return -EINVAL;
7443 	}
7444 
7445 	if (prio == 0 ||
7446 	    prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7447 		dev_err(&hdev->pdev->dev,
7448 			"prio %u should be in range[1, %u]\n",
7449 			prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7450 		return -EINVAL;
7451 	}
7452 
7453 	if (test_bit(prio - 1, hdev->fd_bmap)) {
7454 		dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7455 		return -EINVAL;
7456 	}
7457 	return 0;
7458 }
7459 
7460 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7461 				struct flow_cls_offload *cls_flower,
7462 				int tc)
7463 {
7464 	struct hclge_vport *vport = hclge_get_vport(handle);
7465 	struct hclge_dev *hdev = vport->back;
7466 	struct hclge_fd_rule *rule;
7467 	int ret;
7468 
7469 	ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7470 	if (ret) {
7471 		dev_err(&hdev->pdev->dev,
7472 			"failed to check cls flower params, ret = %d\n", ret);
7473 		return ret;
7474 	}
7475 
7476 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7477 	if (!rule)
7478 		return -ENOMEM;
7479 
7480 	ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7481 	if (ret) {
7482 		kfree(rule);
7483 		return ret;
7484 	}
7485 
7486 	rule->action = HCLGE_FD_ACTION_SELECT_TC;
7487 	rule->cls_flower.tc = tc;
7488 	rule->location = cls_flower->common.prio - 1;
7489 	rule->vf_id = 0;
7490 	rule->cls_flower.cookie = cls_flower->cookie;
7491 	rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7492 
7493 	ret = hclge_add_fd_entry_common(hdev, rule);
7494 	if (ret)
7495 		kfree(rule);
7496 
7497 	return ret;
7498 }
7499 
7500 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7501 						   unsigned long cookie)
7502 {
7503 	struct hclge_fd_rule *rule;
7504 	struct hlist_node *node;
7505 
7506 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7507 		if (rule->cls_flower.cookie == cookie)
7508 			return rule;
7509 	}
7510 
7511 	return NULL;
7512 }
7513 
7514 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7515 				struct flow_cls_offload *cls_flower)
7516 {
7517 	struct hclge_vport *vport = hclge_get_vport(handle);
7518 	struct hclge_dev *hdev = vport->back;
7519 	struct hclge_fd_rule *rule;
7520 	int ret;
7521 
7522 	spin_lock_bh(&hdev->fd_rule_lock);
7523 
7524 	rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7525 	if (!rule) {
7526 		spin_unlock_bh(&hdev->fd_rule_lock);
7527 		return -EINVAL;
7528 	}
7529 
7530 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7531 				   NULL, false);
7532 	if (ret) {
7533 		spin_unlock_bh(&hdev->fd_rule_lock);
7534 		return ret;
7535 	}
7536 
7537 	hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7538 	spin_unlock_bh(&hdev->fd_rule_lock);
7539 
7540 	return 0;
7541 }
7542 
7543 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7544 {
7545 	struct hclge_fd_rule *rule;
7546 	struct hlist_node *node;
7547 	int ret = 0;
7548 
7549 	if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7550 		return;
7551 
7552 	spin_lock_bh(&hdev->fd_rule_lock);
7553 
7554 	hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7555 		switch (rule->state) {
7556 		case HCLGE_FD_TO_ADD:
7557 			ret = hclge_fd_config_rule(hdev, rule);
7558 			if (ret)
7559 				goto out;
7560 			rule->state = HCLGE_FD_ACTIVE;
7561 			break;
7562 		case HCLGE_FD_TO_DEL:
7563 			ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7564 						   rule->location, NULL, false);
7565 			if (ret)
7566 				goto out;
7567 			hclge_fd_dec_rule_cnt(hdev, rule->location);
7568 			hclge_fd_free_node(hdev, rule);
7569 			break;
7570 		default:
7571 			break;
7572 		}
7573 	}
7574 
7575 out:
7576 	if (ret)
7577 		set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7578 
7579 	spin_unlock_bh(&hdev->fd_rule_lock);
7580 }
7581 
7582 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7583 {
7584 	if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7585 		bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7586 
7587 		hclge_clear_fd_rules_in_list(hdev, clear_list);
7588 	}
7589 
7590 	hclge_sync_fd_user_def_cfg(hdev, false);
7591 
7592 	hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7593 }
7594 
7595 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7596 {
7597 	struct hclge_vport *vport = hclge_get_vport(handle);
7598 	struct hclge_dev *hdev = vport->back;
7599 
7600 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7601 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7602 }
7603 
7604 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7605 {
7606 	struct hclge_vport *vport = hclge_get_vport(handle);
7607 	struct hclge_dev *hdev = vport->back;
7608 
7609 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7610 }
7611 
7612 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7613 {
7614 	struct hclge_vport *vport = hclge_get_vport(handle);
7615 	struct hclge_dev *hdev = vport->back;
7616 
7617 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7618 }
7619 
7620 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7621 {
7622 	struct hclge_vport *vport = hclge_get_vport(handle);
7623 	struct hclge_dev *hdev = vport->back;
7624 
7625 	return hdev->rst_stats.hw_reset_done_cnt;
7626 }
7627 
7628 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7629 {
7630 	struct hclge_vport *vport = hclge_get_vport(handle);
7631 	struct hclge_dev *hdev = vport->back;
7632 
7633 	hdev->fd_en = enable;
7634 
7635 	if (!enable)
7636 		set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7637 	else
7638 		hclge_restore_fd_entries(handle);
7639 
7640 	hclge_task_schedule(hdev, 0);
7641 }
7642 
7643 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7644 {
7645 	struct hclge_desc desc;
7646 	struct hclge_config_mac_mode_cmd *req =
7647 		(struct hclge_config_mac_mode_cmd *)desc.data;
7648 	u32 loop_en = 0;
7649 	int ret;
7650 
7651 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7652 
7653 	if (enable) {
7654 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7655 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7656 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7657 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7658 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7659 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7660 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7661 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7662 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7663 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7664 	}
7665 
7666 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7667 
7668 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7669 	if (ret)
7670 		dev_err(&hdev->pdev->dev,
7671 			"mac enable fail, ret =%d.\n", ret);
7672 }
7673 
7674 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7675 				     u8 switch_param, u8 param_mask)
7676 {
7677 	struct hclge_mac_vlan_switch_cmd *req;
7678 	struct hclge_desc desc;
7679 	u32 func_id;
7680 	int ret;
7681 
7682 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7683 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7684 
7685 	/* read current config parameter */
7686 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7687 				   true);
7688 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7689 	req->func_id = cpu_to_le32(func_id);
7690 
7691 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7692 	if (ret) {
7693 		dev_err(&hdev->pdev->dev,
7694 			"read mac vlan switch parameter fail, ret = %d\n", ret);
7695 		return ret;
7696 	}
7697 
7698 	/* modify and write new config parameter */
7699 	hclge_cmd_reuse_desc(&desc, false);
7700 	req->switch_param = (req->switch_param & param_mask) | switch_param;
7701 	req->param_mask = param_mask;
7702 
7703 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7704 	if (ret)
7705 		dev_err(&hdev->pdev->dev,
7706 			"set mac vlan switch parameter fail, ret = %d\n", ret);
7707 	return ret;
7708 }
7709 
7710 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7711 				       int link_ret)
7712 {
7713 #define HCLGE_PHY_LINK_STATUS_NUM  200
7714 
7715 	struct phy_device *phydev = hdev->hw.mac.phydev;
7716 	int i = 0;
7717 	int ret;
7718 
7719 	do {
7720 		ret = phy_read_status(phydev);
7721 		if (ret) {
7722 			dev_err(&hdev->pdev->dev,
7723 				"phy update link status fail, ret = %d\n", ret);
7724 			return;
7725 		}
7726 
7727 		if (phydev->link == link_ret)
7728 			break;
7729 
7730 		msleep(HCLGE_LINK_STATUS_MS);
7731 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7732 }
7733 
7734 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7735 {
7736 #define HCLGE_MAC_LINK_STATUS_NUM  100
7737 
7738 	int link_status;
7739 	int i = 0;
7740 	int ret;
7741 
7742 	do {
7743 		ret = hclge_get_mac_link_status(hdev, &link_status);
7744 		if (ret)
7745 			return ret;
7746 		if (link_status == link_ret)
7747 			return 0;
7748 
7749 		msleep(HCLGE_LINK_STATUS_MS);
7750 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7751 	return -EBUSY;
7752 }
7753 
7754 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7755 					  bool is_phy)
7756 {
7757 	int link_ret;
7758 
7759 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7760 
7761 	if (is_phy)
7762 		hclge_phy_link_status_wait(hdev, link_ret);
7763 
7764 	return hclge_mac_link_status_wait(hdev, link_ret);
7765 }
7766 
7767 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7768 {
7769 	struct hclge_config_mac_mode_cmd *req;
7770 	struct hclge_desc desc;
7771 	u32 loop_en;
7772 	int ret;
7773 
7774 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7775 	/* 1 Read out the MAC mode config at first */
7776 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7777 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7778 	if (ret) {
7779 		dev_err(&hdev->pdev->dev,
7780 			"mac loopback get fail, ret =%d.\n", ret);
7781 		return ret;
7782 	}
7783 
7784 	/* 2 Then setup the loopback flag */
7785 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7786 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7787 
7788 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7789 
7790 	/* 3 Config mac work mode with loopback flag
7791 	 * and its original configure parameters
7792 	 */
7793 	hclge_cmd_reuse_desc(&desc, false);
7794 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7795 	if (ret)
7796 		dev_err(&hdev->pdev->dev,
7797 			"mac loopback set fail, ret =%d.\n", ret);
7798 	return ret;
7799 }
7800 
7801 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7802 				     enum hnae3_loop loop_mode)
7803 {
7804 #define HCLGE_COMMON_LB_RETRY_MS	10
7805 #define HCLGE_COMMON_LB_RETRY_NUM	100
7806 
7807 	struct hclge_common_lb_cmd *req;
7808 	struct hclge_desc desc;
7809 	int ret, i = 0;
7810 	u8 loop_mode_b;
7811 
7812 	req = (struct hclge_common_lb_cmd *)desc.data;
7813 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7814 
7815 	switch (loop_mode) {
7816 	case HNAE3_LOOP_SERIAL_SERDES:
7817 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7818 		break;
7819 	case HNAE3_LOOP_PARALLEL_SERDES:
7820 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7821 		break;
7822 	case HNAE3_LOOP_PHY:
7823 		loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7824 		break;
7825 	default:
7826 		dev_err(&hdev->pdev->dev,
7827 			"unsupported common loopback mode %d\n", loop_mode);
7828 		return -ENOTSUPP;
7829 	}
7830 
7831 	if (en) {
7832 		req->enable = loop_mode_b;
7833 		req->mask = loop_mode_b;
7834 	} else {
7835 		req->mask = loop_mode_b;
7836 	}
7837 
7838 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7839 	if (ret) {
7840 		dev_err(&hdev->pdev->dev,
7841 			"common loopback set fail, ret = %d\n", ret);
7842 		return ret;
7843 	}
7844 
7845 	do {
7846 		msleep(HCLGE_COMMON_LB_RETRY_MS);
7847 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7848 					   true);
7849 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7850 		if (ret) {
7851 			dev_err(&hdev->pdev->dev,
7852 				"common loopback get, ret = %d\n", ret);
7853 			return ret;
7854 		}
7855 	} while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7856 		 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7857 
7858 	if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7859 		dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7860 		return -EBUSY;
7861 	} else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7862 		dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7863 		return -EIO;
7864 	}
7865 	return ret;
7866 }
7867 
7868 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7869 				     enum hnae3_loop loop_mode)
7870 {
7871 	int ret;
7872 
7873 	ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7874 	if (ret)
7875 		return ret;
7876 
7877 	hclge_cfg_mac_mode(hdev, en);
7878 
7879 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7880 	if (ret)
7881 		dev_err(&hdev->pdev->dev,
7882 			"serdes loopback config mac mode timeout\n");
7883 
7884 	return ret;
7885 }
7886 
7887 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7888 				     struct phy_device *phydev)
7889 {
7890 	int ret;
7891 
7892 	if (!phydev->suspended) {
7893 		ret = phy_suspend(phydev);
7894 		if (ret)
7895 			return ret;
7896 	}
7897 
7898 	ret = phy_resume(phydev);
7899 	if (ret)
7900 		return ret;
7901 
7902 	return phy_loopback(phydev, true);
7903 }
7904 
7905 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7906 				      struct phy_device *phydev)
7907 {
7908 	int ret;
7909 
7910 	ret = phy_loopback(phydev, false);
7911 	if (ret)
7912 		return ret;
7913 
7914 	return phy_suspend(phydev);
7915 }
7916 
7917 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7918 {
7919 	struct phy_device *phydev = hdev->hw.mac.phydev;
7920 	int ret;
7921 
7922 	if (!phydev) {
7923 		if (hnae3_dev_phy_imp_supported(hdev))
7924 			return hclge_set_common_loopback(hdev, en,
7925 							 HNAE3_LOOP_PHY);
7926 		return -ENOTSUPP;
7927 	}
7928 
7929 	if (en)
7930 		ret = hclge_enable_phy_loopback(hdev, phydev);
7931 	else
7932 		ret = hclge_disable_phy_loopback(hdev, phydev);
7933 	if (ret) {
7934 		dev_err(&hdev->pdev->dev,
7935 			"set phy loopback fail, ret = %d\n", ret);
7936 		return ret;
7937 	}
7938 
7939 	hclge_cfg_mac_mode(hdev, en);
7940 
7941 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7942 	if (ret)
7943 		dev_err(&hdev->pdev->dev,
7944 			"phy loopback config mac mode timeout\n");
7945 
7946 	return ret;
7947 }
7948 
7949 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7950 				     u16 stream_id, bool enable)
7951 {
7952 	struct hclge_desc desc;
7953 	struct hclge_cfg_com_tqp_queue_cmd *req =
7954 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7955 
7956 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7957 	req->tqp_id = cpu_to_le16(tqp_id);
7958 	req->stream_id = cpu_to_le16(stream_id);
7959 	if (enable)
7960 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7961 
7962 	return hclge_cmd_send(&hdev->hw, &desc, 1);
7963 }
7964 
7965 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7966 {
7967 	struct hclge_vport *vport = hclge_get_vport(handle);
7968 	struct hclge_dev *hdev = vport->back;
7969 	int ret;
7970 	u16 i;
7971 
7972 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
7973 		ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7974 		if (ret)
7975 			return ret;
7976 	}
7977 	return 0;
7978 }
7979 
7980 static int hclge_set_loopback(struct hnae3_handle *handle,
7981 			      enum hnae3_loop loop_mode, bool en)
7982 {
7983 	struct hclge_vport *vport = hclge_get_vport(handle);
7984 	struct hclge_dev *hdev = vport->back;
7985 	int ret;
7986 
7987 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7988 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7989 	 * the same, the packets are looped back in the SSU. If SSU loopback
7990 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7991 	 */
7992 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7993 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7994 
7995 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7996 						HCLGE_SWITCH_ALW_LPBK_MASK);
7997 		if (ret)
7998 			return ret;
7999 	}
8000 
8001 	switch (loop_mode) {
8002 	case HNAE3_LOOP_APP:
8003 		ret = hclge_set_app_loopback(hdev, en);
8004 		break;
8005 	case HNAE3_LOOP_SERIAL_SERDES:
8006 	case HNAE3_LOOP_PARALLEL_SERDES:
8007 		ret = hclge_set_common_loopback(hdev, en, loop_mode);
8008 		break;
8009 	case HNAE3_LOOP_PHY:
8010 		ret = hclge_set_phy_loopback(hdev, en);
8011 		break;
8012 	default:
8013 		ret = -ENOTSUPP;
8014 		dev_err(&hdev->pdev->dev,
8015 			"loop_mode %d is not supported\n", loop_mode);
8016 		break;
8017 	}
8018 
8019 	if (ret)
8020 		return ret;
8021 
8022 	ret = hclge_tqp_enable(handle, en);
8023 	if (ret)
8024 		dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
8025 			en ? "enable" : "disable", ret);
8026 
8027 	return ret;
8028 }
8029 
8030 static int hclge_set_default_loopback(struct hclge_dev *hdev)
8031 {
8032 	int ret;
8033 
8034 	ret = hclge_set_app_loopback(hdev, false);
8035 	if (ret)
8036 		return ret;
8037 
8038 	ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8039 	if (ret)
8040 		return ret;
8041 
8042 	return hclge_cfg_common_loopback(hdev, false,
8043 					 HNAE3_LOOP_PARALLEL_SERDES);
8044 }
8045 
8046 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
8047 {
8048 	struct hclge_vport *vport = hclge_get_vport(handle);
8049 	struct hnae3_knic_private_info *kinfo;
8050 	struct hnae3_queue *queue;
8051 	struct hclge_tqp *tqp;
8052 	int i;
8053 
8054 	kinfo = &vport->nic.kinfo;
8055 	for (i = 0; i < kinfo->num_tqps; i++) {
8056 		queue = handle->kinfo.tqp[i];
8057 		tqp = container_of(queue, struct hclge_tqp, q);
8058 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
8059 	}
8060 }
8061 
8062 static void hclge_flush_link_update(struct hclge_dev *hdev)
8063 {
8064 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
8065 
8066 	unsigned long last = hdev->serv_processed_cnt;
8067 	int i = 0;
8068 
8069 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8070 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8071 	       last == hdev->serv_processed_cnt)
8072 		usleep_range(1, 1);
8073 }
8074 
8075 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8076 {
8077 	struct hclge_vport *vport = hclge_get_vport(handle);
8078 	struct hclge_dev *hdev = vport->back;
8079 
8080 	if (enable) {
8081 		hclge_task_schedule(hdev, 0);
8082 	} else {
8083 		/* Set the DOWN flag here to disable link updating */
8084 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
8085 
8086 		/* flush memory to make sure DOWN is seen by service task */
8087 		smp_mb__before_atomic();
8088 		hclge_flush_link_update(hdev);
8089 	}
8090 }
8091 
8092 static int hclge_ae_start(struct hnae3_handle *handle)
8093 {
8094 	struct hclge_vport *vport = hclge_get_vport(handle);
8095 	struct hclge_dev *hdev = vport->back;
8096 
8097 	/* mac enable */
8098 	hclge_cfg_mac_mode(hdev, true);
8099 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8100 	hdev->hw.mac.link = 0;
8101 
8102 	/* reset tqp stats */
8103 	hclge_reset_tqp_stats(handle);
8104 
8105 	hclge_mac_start_phy(hdev);
8106 
8107 	return 0;
8108 }
8109 
8110 static void hclge_ae_stop(struct hnae3_handle *handle)
8111 {
8112 	struct hclge_vport *vport = hclge_get_vport(handle);
8113 	struct hclge_dev *hdev = vport->back;
8114 
8115 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
8116 	spin_lock_bh(&hdev->fd_rule_lock);
8117 	hclge_clear_arfs_rules(hdev);
8118 	spin_unlock_bh(&hdev->fd_rule_lock);
8119 
8120 	/* If it is not PF reset, the firmware will disable the MAC,
8121 	 * so it only need to stop phy here.
8122 	 */
8123 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8124 	    hdev->reset_type != HNAE3_FUNC_RESET) {
8125 		hclge_mac_stop_phy(hdev);
8126 		hclge_update_link_status(hdev);
8127 		return;
8128 	}
8129 
8130 	hclge_reset_tqp(handle);
8131 
8132 	hclge_config_mac_tnl_int(hdev, false);
8133 
8134 	/* Mac disable */
8135 	hclge_cfg_mac_mode(hdev, false);
8136 
8137 	hclge_mac_stop_phy(hdev);
8138 
8139 	/* reset tqp stats */
8140 	hclge_reset_tqp_stats(handle);
8141 	hclge_update_link_status(hdev);
8142 }
8143 
8144 int hclge_vport_start(struct hclge_vport *vport)
8145 {
8146 	struct hclge_dev *hdev = vport->back;
8147 
8148 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8149 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8150 	vport->last_active_jiffies = jiffies;
8151 
8152 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8153 		if (vport->vport_id) {
8154 			hclge_restore_mac_table_common(vport);
8155 			hclge_restore_vport_vlan_table(vport);
8156 		} else {
8157 			hclge_restore_hw_table(hdev);
8158 		}
8159 	}
8160 
8161 	clear_bit(vport->vport_id, hdev->vport_config_block);
8162 
8163 	return 0;
8164 }
8165 
8166 void hclge_vport_stop(struct hclge_vport *vport)
8167 {
8168 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8169 }
8170 
8171 static int hclge_client_start(struct hnae3_handle *handle)
8172 {
8173 	struct hclge_vport *vport = hclge_get_vport(handle);
8174 
8175 	return hclge_vport_start(vport);
8176 }
8177 
8178 static void hclge_client_stop(struct hnae3_handle *handle)
8179 {
8180 	struct hclge_vport *vport = hclge_get_vport(handle);
8181 
8182 	hclge_vport_stop(vport);
8183 }
8184 
8185 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8186 					 u16 cmdq_resp, u8  resp_code,
8187 					 enum hclge_mac_vlan_tbl_opcode op)
8188 {
8189 	struct hclge_dev *hdev = vport->back;
8190 
8191 	if (cmdq_resp) {
8192 		dev_err(&hdev->pdev->dev,
8193 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8194 			cmdq_resp);
8195 		return -EIO;
8196 	}
8197 
8198 	if (op == HCLGE_MAC_VLAN_ADD) {
8199 		if (!resp_code || resp_code == 1)
8200 			return 0;
8201 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8202 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
8203 			return -ENOSPC;
8204 
8205 		dev_err(&hdev->pdev->dev,
8206 			"add mac addr failed for undefined, code=%u.\n",
8207 			resp_code);
8208 		return -EIO;
8209 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
8210 		if (!resp_code) {
8211 			return 0;
8212 		} else if (resp_code == 1) {
8213 			dev_dbg(&hdev->pdev->dev,
8214 				"remove mac addr failed for miss.\n");
8215 			return -ENOENT;
8216 		}
8217 
8218 		dev_err(&hdev->pdev->dev,
8219 			"remove mac addr failed for undefined, code=%u.\n",
8220 			resp_code);
8221 		return -EIO;
8222 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
8223 		if (!resp_code) {
8224 			return 0;
8225 		} else if (resp_code == 1) {
8226 			dev_dbg(&hdev->pdev->dev,
8227 				"lookup mac addr failed for miss.\n");
8228 			return -ENOENT;
8229 		}
8230 
8231 		dev_err(&hdev->pdev->dev,
8232 			"lookup mac addr failed for undefined, code=%u.\n",
8233 			resp_code);
8234 		return -EIO;
8235 	}
8236 
8237 	dev_err(&hdev->pdev->dev,
8238 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8239 
8240 	return -EINVAL;
8241 }
8242 
8243 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8244 {
8245 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8246 
8247 	unsigned int word_num;
8248 	unsigned int bit_num;
8249 
8250 	if (vfid > 255 || vfid < 0)
8251 		return -EIO;
8252 
8253 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8254 		word_num = vfid / 32;
8255 		bit_num  = vfid % 32;
8256 		if (clr)
8257 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8258 		else
8259 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8260 	} else {
8261 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8262 		bit_num  = vfid % 32;
8263 		if (clr)
8264 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8265 		else
8266 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8267 	}
8268 
8269 	return 0;
8270 }
8271 
8272 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8273 {
8274 #define HCLGE_DESC_NUMBER 3
8275 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8276 	int i, j;
8277 
8278 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8279 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8280 			if (desc[i].data[j])
8281 				return false;
8282 
8283 	return true;
8284 }
8285 
8286 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8287 				   const u8 *addr, bool is_mc)
8288 {
8289 	const unsigned char *mac_addr = addr;
8290 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8291 		       (mac_addr[0]) | (mac_addr[1] << 8);
8292 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8293 
8294 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8295 	if (is_mc) {
8296 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8297 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8298 	}
8299 
8300 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8301 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8302 }
8303 
8304 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8305 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
8306 {
8307 	struct hclge_dev *hdev = vport->back;
8308 	struct hclge_desc desc;
8309 	u8 resp_code;
8310 	u16 retval;
8311 	int ret;
8312 
8313 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8314 
8315 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8316 
8317 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8318 	if (ret) {
8319 		dev_err(&hdev->pdev->dev,
8320 			"del mac addr failed for cmd_send, ret =%d.\n",
8321 			ret);
8322 		return ret;
8323 	}
8324 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8325 	retval = le16_to_cpu(desc.retval);
8326 
8327 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8328 					     HCLGE_MAC_VLAN_REMOVE);
8329 }
8330 
8331 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8332 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
8333 				     struct hclge_desc *desc,
8334 				     bool is_mc)
8335 {
8336 	struct hclge_dev *hdev = vport->back;
8337 	u8 resp_code;
8338 	u16 retval;
8339 	int ret;
8340 
8341 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8342 	if (is_mc) {
8343 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8344 		memcpy(desc[0].data,
8345 		       req,
8346 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8347 		hclge_cmd_setup_basic_desc(&desc[1],
8348 					   HCLGE_OPC_MAC_VLAN_ADD,
8349 					   true);
8350 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8351 		hclge_cmd_setup_basic_desc(&desc[2],
8352 					   HCLGE_OPC_MAC_VLAN_ADD,
8353 					   true);
8354 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
8355 	} else {
8356 		memcpy(desc[0].data,
8357 		       req,
8358 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8359 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
8360 	}
8361 	if (ret) {
8362 		dev_err(&hdev->pdev->dev,
8363 			"lookup mac addr failed for cmd_send, ret =%d.\n",
8364 			ret);
8365 		return ret;
8366 	}
8367 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8368 	retval = le16_to_cpu(desc[0].retval);
8369 
8370 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8371 					     HCLGE_MAC_VLAN_LKUP);
8372 }
8373 
8374 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8375 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
8376 				  struct hclge_desc *mc_desc)
8377 {
8378 	struct hclge_dev *hdev = vport->back;
8379 	int cfg_status;
8380 	u8 resp_code;
8381 	u16 retval;
8382 	int ret;
8383 
8384 	if (!mc_desc) {
8385 		struct hclge_desc desc;
8386 
8387 		hclge_cmd_setup_basic_desc(&desc,
8388 					   HCLGE_OPC_MAC_VLAN_ADD,
8389 					   false);
8390 		memcpy(desc.data, req,
8391 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8392 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8393 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8394 		retval = le16_to_cpu(desc.retval);
8395 
8396 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8397 							   resp_code,
8398 							   HCLGE_MAC_VLAN_ADD);
8399 	} else {
8400 		hclge_cmd_reuse_desc(&mc_desc[0], false);
8401 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8402 		hclge_cmd_reuse_desc(&mc_desc[1], false);
8403 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8404 		hclge_cmd_reuse_desc(&mc_desc[2], false);
8405 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8406 		memcpy(mc_desc[0].data, req,
8407 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8408 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8409 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8410 		retval = le16_to_cpu(mc_desc[0].retval);
8411 
8412 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8413 							   resp_code,
8414 							   HCLGE_MAC_VLAN_ADD);
8415 	}
8416 
8417 	if (ret) {
8418 		dev_err(&hdev->pdev->dev,
8419 			"add mac addr failed for cmd_send, ret =%d.\n",
8420 			ret);
8421 		return ret;
8422 	}
8423 
8424 	return cfg_status;
8425 }
8426 
8427 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8428 			       u16 *allocated_size)
8429 {
8430 	struct hclge_umv_spc_alc_cmd *req;
8431 	struct hclge_desc desc;
8432 	int ret;
8433 
8434 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8435 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8436 
8437 	req->space_size = cpu_to_le32(space_size);
8438 
8439 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8440 	if (ret) {
8441 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8442 			ret);
8443 		return ret;
8444 	}
8445 
8446 	*allocated_size = le32_to_cpu(desc.data[1]);
8447 
8448 	return 0;
8449 }
8450 
8451 static int hclge_init_umv_space(struct hclge_dev *hdev)
8452 {
8453 	u16 allocated_size = 0;
8454 	int ret;
8455 
8456 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8457 	if (ret)
8458 		return ret;
8459 
8460 	if (allocated_size < hdev->wanted_umv_size)
8461 		dev_warn(&hdev->pdev->dev,
8462 			 "failed to alloc umv space, want %u, get %u\n",
8463 			 hdev->wanted_umv_size, allocated_size);
8464 
8465 	hdev->max_umv_size = allocated_size;
8466 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8467 	hdev->share_umv_size = hdev->priv_umv_size +
8468 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8469 
8470 	return 0;
8471 }
8472 
8473 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8474 {
8475 	struct hclge_vport *vport;
8476 	int i;
8477 
8478 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8479 		vport = &hdev->vport[i];
8480 		vport->used_umv_num = 0;
8481 	}
8482 
8483 	mutex_lock(&hdev->vport_lock);
8484 	hdev->share_umv_size = hdev->priv_umv_size +
8485 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8486 	mutex_unlock(&hdev->vport_lock);
8487 }
8488 
8489 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8490 {
8491 	struct hclge_dev *hdev = vport->back;
8492 	bool is_full;
8493 
8494 	if (need_lock)
8495 		mutex_lock(&hdev->vport_lock);
8496 
8497 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8498 		   hdev->share_umv_size == 0);
8499 
8500 	if (need_lock)
8501 		mutex_unlock(&hdev->vport_lock);
8502 
8503 	return is_full;
8504 }
8505 
8506 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8507 {
8508 	struct hclge_dev *hdev = vport->back;
8509 
8510 	if (is_free) {
8511 		if (vport->used_umv_num > hdev->priv_umv_size)
8512 			hdev->share_umv_size++;
8513 
8514 		if (vport->used_umv_num > 0)
8515 			vport->used_umv_num--;
8516 	} else {
8517 		if (vport->used_umv_num >= hdev->priv_umv_size &&
8518 		    hdev->share_umv_size > 0)
8519 			hdev->share_umv_size--;
8520 		vport->used_umv_num++;
8521 	}
8522 }
8523 
8524 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8525 						  const u8 *mac_addr)
8526 {
8527 	struct hclge_mac_node *mac_node, *tmp;
8528 
8529 	list_for_each_entry_safe(mac_node, tmp, list, node)
8530 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8531 			return mac_node;
8532 
8533 	return NULL;
8534 }
8535 
8536 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8537 				  enum HCLGE_MAC_NODE_STATE state)
8538 {
8539 	switch (state) {
8540 	/* from set_rx_mode or tmp_add_list */
8541 	case HCLGE_MAC_TO_ADD:
8542 		if (mac_node->state == HCLGE_MAC_TO_DEL)
8543 			mac_node->state = HCLGE_MAC_ACTIVE;
8544 		break;
8545 	/* only from set_rx_mode */
8546 	case HCLGE_MAC_TO_DEL:
8547 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
8548 			list_del(&mac_node->node);
8549 			kfree(mac_node);
8550 		} else {
8551 			mac_node->state = HCLGE_MAC_TO_DEL;
8552 		}
8553 		break;
8554 	/* only from tmp_add_list, the mac_node->state won't be
8555 	 * ACTIVE.
8556 	 */
8557 	case HCLGE_MAC_ACTIVE:
8558 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8559 			mac_node->state = HCLGE_MAC_ACTIVE;
8560 
8561 		break;
8562 	}
8563 }
8564 
8565 int hclge_update_mac_list(struct hclge_vport *vport,
8566 			  enum HCLGE_MAC_NODE_STATE state,
8567 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
8568 			  const unsigned char *addr)
8569 {
8570 	struct hclge_dev *hdev = vport->back;
8571 	struct hclge_mac_node *mac_node;
8572 	struct list_head *list;
8573 
8574 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8575 		&vport->uc_mac_list : &vport->mc_mac_list;
8576 
8577 	spin_lock_bh(&vport->mac_list_lock);
8578 
8579 	/* if the mac addr is already in the mac list, no need to add a new
8580 	 * one into it, just check the mac addr state, convert it to a new
8581 	 * state, or just remove it, or do nothing.
8582 	 */
8583 	mac_node = hclge_find_mac_node(list, addr);
8584 	if (mac_node) {
8585 		hclge_update_mac_node(mac_node, state);
8586 		spin_unlock_bh(&vport->mac_list_lock);
8587 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8588 		return 0;
8589 	}
8590 
8591 	/* if this address is never added, unnecessary to delete */
8592 	if (state == HCLGE_MAC_TO_DEL) {
8593 		spin_unlock_bh(&vport->mac_list_lock);
8594 		dev_err(&hdev->pdev->dev,
8595 			"failed to delete address %pM from mac list\n",
8596 			addr);
8597 		return -ENOENT;
8598 	}
8599 
8600 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8601 	if (!mac_node) {
8602 		spin_unlock_bh(&vport->mac_list_lock);
8603 		return -ENOMEM;
8604 	}
8605 
8606 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8607 
8608 	mac_node->state = state;
8609 	ether_addr_copy(mac_node->mac_addr, addr);
8610 	list_add_tail(&mac_node->node, list);
8611 
8612 	spin_unlock_bh(&vport->mac_list_lock);
8613 
8614 	return 0;
8615 }
8616 
8617 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8618 			     const unsigned char *addr)
8619 {
8620 	struct hclge_vport *vport = hclge_get_vport(handle);
8621 
8622 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8623 				     addr);
8624 }
8625 
8626 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8627 			     const unsigned char *addr)
8628 {
8629 	struct hclge_dev *hdev = vport->back;
8630 	struct hclge_mac_vlan_tbl_entry_cmd req;
8631 	struct hclge_desc desc;
8632 	u16 egress_port = 0;
8633 	int ret;
8634 
8635 	/* mac addr check */
8636 	if (is_zero_ether_addr(addr) ||
8637 	    is_broadcast_ether_addr(addr) ||
8638 	    is_multicast_ether_addr(addr)) {
8639 		dev_err(&hdev->pdev->dev,
8640 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8641 			 addr, is_zero_ether_addr(addr),
8642 			 is_broadcast_ether_addr(addr),
8643 			 is_multicast_ether_addr(addr));
8644 		return -EINVAL;
8645 	}
8646 
8647 	memset(&req, 0, sizeof(req));
8648 
8649 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8650 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8651 
8652 	req.egress_port = cpu_to_le16(egress_port);
8653 
8654 	hclge_prepare_mac_addr(&req, addr, false);
8655 
8656 	/* Lookup the mac address in the mac_vlan table, and add
8657 	 * it if the entry is inexistent. Repeated unicast entry
8658 	 * is not allowed in the mac vlan table.
8659 	 */
8660 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8661 	if (ret == -ENOENT) {
8662 		mutex_lock(&hdev->vport_lock);
8663 		if (!hclge_is_umv_space_full(vport, false)) {
8664 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8665 			if (!ret)
8666 				hclge_update_umv_space(vport, false);
8667 			mutex_unlock(&hdev->vport_lock);
8668 			return ret;
8669 		}
8670 		mutex_unlock(&hdev->vport_lock);
8671 
8672 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8673 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8674 				hdev->priv_umv_size);
8675 
8676 		return -ENOSPC;
8677 	}
8678 
8679 	/* check if we just hit the duplicate */
8680 	if (!ret) {
8681 		dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8682 			 vport->vport_id, addr);
8683 		return 0;
8684 	}
8685 
8686 	dev_err(&hdev->pdev->dev,
8687 		"PF failed to add unicast entry(%pM) in the MAC table\n",
8688 		addr);
8689 
8690 	return ret;
8691 }
8692 
8693 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8694 			    const unsigned char *addr)
8695 {
8696 	struct hclge_vport *vport = hclge_get_vport(handle);
8697 
8698 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8699 				     addr);
8700 }
8701 
8702 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8703 			    const unsigned char *addr)
8704 {
8705 	struct hclge_dev *hdev = vport->back;
8706 	struct hclge_mac_vlan_tbl_entry_cmd req;
8707 	int ret;
8708 
8709 	/* mac addr check */
8710 	if (is_zero_ether_addr(addr) ||
8711 	    is_broadcast_ether_addr(addr) ||
8712 	    is_multicast_ether_addr(addr)) {
8713 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8714 			addr);
8715 		return -EINVAL;
8716 	}
8717 
8718 	memset(&req, 0, sizeof(req));
8719 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8720 	hclge_prepare_mac_addr(&req, addr, false);
8721 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
8722 	if (!ret) {
8723 		mutex_lock(&hdev->vport_lock);
8724 		hclge_update_umv_space(vport, true);
8725 		mutex_unlock(&hdev->vport_lock);
8726 	} else if (ret == -ENOENT) {
8727 		ret = 0;
8728 	}
8729 
8730 	return ret;
8731 }
8732 
8733 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8734 			     const unsigned char *addr)
8735 {
8736 	struct hclge_vport *vport = hclge_get_vport(handle);
8737 
8738 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8739 				     addr);
8740 }
8741 
8742 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8743 			     const unsigned char *addr)
8744 {
8745 	struct hclge_dev *hdev = vport->back;
8746 	struct hclge_mac_vlan_tbl_entry_cmd req;
8747 	struct hclge_desc desc[3];
8748 	int status;
8749 
8750 	/* mac addr check */
8751 	if (!is_multicast_ether_addr(addr)) {
8752 		dev_err(&hdev->pdev->dev,
8753 			"Add mc mac err! invalid mac:%pM.\n",
8754 			 addr);
8755 		return -EINVAL;
8756 	}
8757 	memset(&req, 0, sizeof(req));
8758 	hclge_prepare_mac_addr(&req, addr, true);
8759 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8760 	if (status) {
8761 		/* This mac addr do not exist, add new entry for it */
8762 		memset(desc[0].data, 0, sizeof(desc[0].data));
8763 		memset(desc[1].data, 0, sizeof(desc[0].data));
8764 		memset(desc[2].data, 0, sizeof(desc[0].data));
8765 	}
8766 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8767 	if (status)
8768 		return status;
8769 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8770 	/* if already overflow, not to print each time */
8771 	if (status == -ENOSPC &&
8772 	    !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8773 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8774 
8775 	return status;
8776 }
8777 
8778 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8779 			    const unsigned char *addr)
8780 {
8781 	struct hclge_vport *vport = hclge_get_vport(handle);
8782 
8783 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8784 				     addr);
8785 }
8786 
8787 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8788 			    const unsigned char *addr)
8789 {
8790 	struct hclge_dev *hdev = vport->back;
8791 	struct hclge_mac_vlan_tbl_entry_cmd req;
8792 	enum hclge_cmd_status status;
8793 	struct hclge_desc desc[3];
8794 
8795 	/* mac addr check */
8796 	if (!is_multicast_ether_addr(addr)) {
8797 		dev_dbg(&hdev->pdev->dev,
8798 			"Remove mc mac err! invalid mac:%pM.\n",
8799 			 addr);
8800 		return -EINVAL;
8801 	}
8802 
8803 	memset(&req, 0, sizeof(req));
8804 	hclge_prepare_mac_addr(&req, addr, true);
8805 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8806 	if (!status) {
8807 		/* This mac addr exist, remove this handle's VFID for it */
8808 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8809 		if (status)
8810 			return status;
8811 
8812 		if (hclge_is_all_function_id_zero(desc))
8813 			/* All the vfid is zero, so need to delete this entry */
8814 			status = hclge_remove_mac_vlan_tbl(vport, &req);
8815 		else
8816 			/* Not all the vfid is zero, update the vfid */
8817 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8818 	} else if (status == -ENOENT) {
8819 		status = 0;
8820 	}
8821 
8822 	return status;
8823 }
8824 
8825 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8826 				      struct list_head *list,
8827 				      int (*sync)(struct hclge_vport *,
8828 						  const unsigned char *))
8829 {
8830 	struct hclge_mac_node *mac_node, *tmp;
8831 	int ret;
8832 
8833 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8834 		ret = sync(vport, mac_node->mac_addr);
8835 		if (!ret) {
8836 			mac_node->state = HCLGE_MAC_ACTIVE;
8837 		} else {
8838 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8839 				&vport->state);
8840 			break;
8841 		}
8842 	}
8843 }
8844 
8845 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8846 					struct list_head *list,
8847 					int (*unsync)(struct hclge_vport *,
8848 						      const unsigned char *))
8849 {
8850 	struct hclge_mac_node *mac_node, *tmp;
8851 	int ret;
8852 
8853 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8854 		ret = unsync(vport, mac_node->mac_addr);
8855 		if (!ret || ret == -ENOENT) {
8856 			list_del(&mac_node->node);
8857 			kfree(mac_node);
8858 		} else {
8859 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8860 				&vport->state);
8861 			break;
8862 		}
8863 	}
8864 }
8865 
8866 static bool hclge_sync_from_add_list(struct list_head *add_list,
8867 				     struct list_head *mac_list)
8868 {
8869 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8870 	bool all_added = true;
8871 
8872 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8873 		if (mac_node->state == HCLGE_MAC_TO_ADD)
8874 			all_added = false;
8875 
8876 		/* if the mac address from tmp_add_list is not in the
8877 		 * uc/mc_mac_list, it means have received a TO_DEL request
8878 		 * during the time window of adding the mac address into mac
8879 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8880 		 * then it will be removed at next time. else it must be TO_ADD,
8881 		 * this address hasn't been added into mac table,
8882 		 * so just remove the mac node.
8883 		 */
8884 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8885 		if (new_node) {
8886 			hclge_update_mac_node(new_node, mac_node->state);
8887 			list_del(&mac_node->node);
8888 			kfree(mac_node);
8889 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8890 			mac_node->state = HCLGE_MAC_TO_DEL;
8891 			list_move_tail(&mac_node->node, mac_list);
8892 		} else {
8893 			list_del(&mac_node->node);
8894 			kfree(mac_node);
8895 		}
8896 	}
8897 
8898 	return all_added;
8899 }
8900 
8901 static void hclge_sync_from_del_list(struct list_head *del_list,
8902 				     struct list_head *mac_list)
8903 {
8904 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8905 
8906 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8907 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8908 		if (new_node) {
8909 			/* If the mac addr exists in the mac list, it means
8910 			 * received a new TO_ADD request during the time window
8911 			 * of configuring the mac address. For the mac node
8912 			 * state is TO_ADD, and the address is already in the
8913 			 * in the hardware(due to delete fail), so we just need
8914 			 * to change the mac node state to ACTIVE.
8915 			 */
8916 			new_node->state = HCLGE_MAC_ACTIVE;
8917 			list_del(&mac_node->node);
8918 			kfree(mac_node);
8919 		} else {
8920 			list_move_tail(&mac_node->node, mac_list);
8921 		}
8922 	}
8923 }
8924 
8925 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8926 					enum HCLGE_MAC_ADDR_TYPE mac_type,
8927 					bool is_all_added)
8928 {
8929 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8930 		if (is_all_added)
8931 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8932 		else
8933 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8934 	} else {
8935 		if (is_all_added)
8936 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8937 		else
8938 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8939 	}
8940 }
8941 
8942 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8943 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
8944 {
8945 	struct hclge_mac_node *mac_node, *tmp, *new_node;
8946 	struct list_head tmp_add_list, tmp_del_list;
8947 	struct list_head *list;
8948 	bool all_added;
8949 
8950 	INIT_LIST_HEAD(&tmp_add_list);
8951 	INIT_LIST_HEAD(&tmp_del_list);
8952 
8953 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
8954 	 * we can add/delete these mac addr outside the spin lock
8955 	 */
8956 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8957 		&vport->uc_mac_list : &vport->mc_mac_list;
8958 
8959 	spin_lock_bh(&vport->mac_list_lock);
8960 
8961 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8962 		switch (mac_node->state) {
8963 		case HCLGE_MAC_TO_DEL:
8964 			list_move_tail(&mac_node->node, &tmp_del_list);
8965 			break;
8966 		case HCLGE_MAC_TO_ADD:
8967 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8968 			if (!new_node)
8969 				goto stop_traverse;
8970 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8971 			new_node->state = mac_node->state;
8972 			list_add_tail(&new_node->node, &tmp_add_list);
8973 			break;
8974 		default:
8975 			break;
8976 		}
8977 	}
8978 
8979 stop_traverse:
8980 	spin_unlock_bh(&vport->mac_list_lock);
8981 
8982 	/* delete first, in order to get max mac table space for adding */
8983 	if (mac_type == HCLGE_MAC_ADDR_UC) {
8984 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8985 					    hclge_rm_uc_addr_common);
8986 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8987 					  hclge_add_uc_addr_common);
8988 	} else {
8989 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8990 					    hclge_rm_mc_addr_common);
8991 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
8992 					  hclge_add_mc_addr_common);
8993 	}
8994 
8995 	/* if some mac addresses were added/deleted fail, move back to the
8996 	 * mac_list, and retry at next time.
8997 	 */
8998 	spin_lock_bh(&vport->mac_list_lock);
8999 
9000 	hclge_sync_from_del_list(&tmp_del_list, list);
9001 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
9002 
9003 	spin_unlock_bh(&vport->mac_list_lock);
9004 
9005 	hclge_update_overflow_flags(vport, mac_type, all_added);
9006 }
9007 
9008 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
9009 {
9010 	struct hclge_dev *hdev = vport->back;
9011 
9012 	if (test_bit(vport->vport_id, hdev->vport_config_block))
9013 		return false;
9014 
9015 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9016 		return true;
9017 
9018 	return false;
9019 }
9020 
9021 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9022 {
9023 	int i;
9024 
9025 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9026 		struct hclge_vport *vport = &hdev->vport[i];
9027 
9028 		if (!hclge_need_sync_mac_table(vport))
9029 			continue;
9030 
9031 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9032 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9033 	}
9034 }
9035 
9036 static void hclge_build_del_list(struct list_head *list,
9037 				 bool is_del_list,
9038 				 struct list_head *tmp_del_list)
9039 {
9040 	struct hclge_mac_node *mac_cfg, *tmp;
9041 
9042 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9043 		switch (mac_cfg->state) {
9044 		case HCLGE_MAC_TO_DEL:
9045 		case HCLGE_MAC_ACTIVE:
9046 			list_move_tail(&mac_cfg->node, tmp_del_list);
9047 			break;
9048 		case HCLGE_MAC_TO_ADD:
9049 			if (is_del_list) {
9050 				list_del(&mac_cfg->node);
9051 				kfree(mac_cfg);
9052 			}
9053 			break;
9054 		}
9055 	}
9056 }
9057 
9058 static void hclge_unsync_del_list(struct hclge_vport *vport,
9059 				  int (*unsync)(struct hclge_vport *vport,
9060 						const unsigned char *addr),
9061 				  bool is_del_list,
9062 				  struct list_head *tmp_del_list)
9063 {
9064 	struct hclge_mac_node *mac_cfg, *tmp;
9065 	int ret;
9066 
9067 	list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9068 		ret = unsync(vport, mac_cfg->mac_addr);
9069 		if (!ret || ret == -ENOENT) {
9070 			/* clear all mac addr from hardware, but remain these
9071 			 * mac addr in the mac list, and restore them after
9072 			 * vf reset finished.
9073 			 */
9074 			if (!is_del_list &&
9075 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
9076 				mac_cfg->state = HCLGE_MAC_TO_ADD;
9077 			} else {
9078 				list_del(&mac_cfg->node);
9079 				kfree(mac_cfg);
9080 			}
9081 		} else if (is_del_list) {
9082 			mac_cfg->state = HCLGE_MAC_TO_DEL;
9083 		}
9084 	}
9085 }
9086 
9087 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9088 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
9089 {
9090 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9091 	struct hclge_dev *hdev = vport->back;
9092 	struct list_head tmp_del_list, *list;
9093 
9094 	if (mac_type == HCLGE_MAC_ADDR_UC) {
9095 		list = &vport->uc_mac_list;
9096 		unsync = hclge_rm_uc_addr_common;
9097 	} else {
9098 		list = &vport->mc_mac_list;
9099 		unsync = hclge_rm_mc_addr_common;
9100 	}
9101 
9102 	INIT_LIST_HEAD(&tmp_del_list);
9103 
9104 	if (!is_del_list)
9105 		set_bit(vport->vport_id, hdev->vport_config_block);
9106 
9107 	spin_lock_bh(&vport->mac_list_lock);
9108 
9109 	hclge_build_del_list(list, is_del_list, &tmp_del_list);
9110 
9111 	spin_unlock_bh(&vport->mac_list_lock);
9112 
9113 	hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9114 
9115 	spin_lock_bh(&vport->mac_list_lock);
9116 
9117 	hclge_sync_from_del_list(&tmp_del_list, list);
9118 
9119 	spin_unlock_bh(&vport->mac_list_lock);
9120 }
9121 
9122 /* remove all mac address when uninitailize */
9123 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9124 					enum HCLGE_MAC_ADDR_TYPE mac_type)
9125 {
9126 	struct hclge_mac_node *mac_node, *tmp;
9127 	struct hclge_dev *hdev = vport->back;
9128 	struct list_head tmp_del_list, *list;
9129 
9130 	INIT_LIST_HEAD(&tmp_del_list);
9131 
9132 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9133 		&vport->uc_mac_list : &vport->mc_mac_list;
9134 
9135 	spin_lock_bh(&vport->mac_list_lock);
9136 
9137 	list_for_each_entry_safe(mac_node, tmp, list, node) {
9138 		switch (mac_node->state) {
9139 		case HCLGE_MAC_TO_DEL:
9140 		case HCLGE_MAC_ACTIVE:
9141 			list_move_tail(&mac_node->node, &tmp_del_list);
9142 			break;
9143 		case HCLGE_MAC_TO_ADD:
9144 			list_del(&mac_node->node);
9145 			kfree(mac_node);
9146 			break;
9147 		}
9148 	}
9149 
9150 	spin_unlock_bh(&vport->mac_list_lock);
9151 
9152 	if (mac_type == HCLGE_MAC_ADDR_UC)
9153 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9154 					    hclge_rm_uc_addr_common);
9155 	else
9156 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9157 					    hclge_rm_mc_addr_common);
9158 
9159 	if (!list_empty(&tmp_del_list))
9160 		dev_warn(&hdev->pdev->dev,
9161 			 "uninit %s mac list for vport %u not completely.\n",
9162 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9163 			 vport->vport_id);
9164 
9165 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9166 		list_del(&mac_node->node);
9167 		kfree(mac_node);
9168 	}
9169 }
9170 
9171 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9172 {
9173 	struct hclge_vport *vport;
9174 	int i;
9175 
9176 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9177 		vport = &hdev->vport[i];
9178 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9179 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9180 	}
9181 }
9182 
9183 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9184 					      u16 cmdq_resp, u8 resp_code)
9185 {
9186 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
9187 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
9188 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
9189 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
9190 
9191 	int return_status;
9192 
9193 	if (cmdq_resp) {
9194 		dev_err(&hdev->pdev->dev,
9195 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9196 			cmdq_resp);
9197 		return -EIO;
9198 	}
9199 
9200 	switch (resp_code) {
9201 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
9202 	case HCLGE_ETHERTYPE_ALREADY_ADD:
9203 		return_status = 0;
9204 		break;
9205 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9206 		dev_err(&hdev->pdev->dev,
9207 			"add mac ethertype failed for manager table overflow.\n");
9208 		return_status = -EIO;
9209 		break;
9210 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
9211 		dev_err(&hdev->pdev->dev,
9212 			"add mac ethertype failed for key conflict.\n");
9213 		return_status = -EIO;
9214 		break;
9215 	default:
9216 		dev_err(&hdev->pdev->dev,
9217 			"add mac ethertype failed for undefined, code=%u.\n",
9218 			resp_code);
9219 		return_status = -EIO;
9220 	}
9221 
9222 	return return_status;
9223 }
9224 
9225 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9226 				     u8 *mac_addr)
9227 {
9228 	struct hclge_mac_vlan_tbl_entry_cmd req;
9229 	struct hclge_dev *hdev = vport->back;
9230 	struct hclge_desc desc;
9231 	u16 egress_port = 0;
9232 	int i;
9233 
9234 	if (is_zero_ether_addr(mac_addr))
9235 		return false;
9236 
9237 	memset(&req, 0, sizeof(req));
9238 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9239 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9240 	req.egress_port = cpu_to_le16(egress_port);
9241 	hclge_prepare_mac_addr(&req, mac_addr, false);
9242 
9243 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9244 		return true;
9245 
9246 	vf_idx += HCLGE_VF_VPORT_START_NUM;
9247 	for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9248 		if (i != vf_idx &&
9249 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9250 			return true;
9251 
9252 	return false;
9253 }
9254 
9255 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9256 			    u8 *mac_addr)
9257 {
9258 	struct hclge_vport *vport = hclge_get_vport(handle);
9259 	struct hclge_dev *hdev = vport->back;
9260 
9261 	vport = hclge_get_vf_vport(hdev, vf);
9262 	if (!vport)
9263 		return -EINVAL;
9264 
9265 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9266 		dev_info(&hdev->pdev->dev,
9267 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
9268 			 mac_addr);
9269 		return 0;
9270 	}
9271 
9272 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9273 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9274 			mac_addr);
9275 		return -EEXIST;
9276 	}
9277 
9278 	ether_addr_copy(vport->vf_info.mac, mac_addr);
9279 
9280 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9281 		dev_info(&hdev->pdev->dev,
9282 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9283 			 vf, mac_addr);
9284 		return hclge_inform_reset_assert_to_vf(vport);
9285 	}
9286 
9287 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9288 		 vf, mac_addr);
9289 	return 0;
9290 }
9291 
9292 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9293 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
9294 {
9295 	struct hclge_desc desc;
9296 	u8 resp_code;
9297 	u16 retval;
9298 	int ret;
9299 
9300 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9301 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9302 
9303 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9304 	if (ret) {
9305 		dev_err(&hdev->pdev->dev,
9306 			"add mac ethertype failed for cmd_send, ret =%d.\n",
9307 			ret);
9308 		return ret;
9309 	}
9310 
9311 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9312 	retval = le16_to_cpu(desc.retval);
9313 
9314 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9315 }
9316 
9317 static int init_mgr_tbl(struct hclge_dev *hdev)
9318 {
9319 	int ret;
9320 	int i;
9321 
9322 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9323 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9324 		if (ret) {
9325 			dev_err(&hdev->pdev->dev,
9326 				"add mac ethertype failed, ret =%d.\n",
9327 				ret);
9328 			return ret;
9329 		}
9330 	}
9331 
9332 	return 0;
9333 }
9334 
9335 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9336 {
9337 	struct hclge_vport *vport = hclge_get_vport(handle);
9338 	struct hclge_dev *hdev = vport->back;
9339 
9340 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
9341 }
9342 
9343 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9344 				       const u8 *old_addr, const u8 *new_addr)
9345 {
9346 	struct list_head *list = &vport->uc_mac_list;
9347 	struct hclge_mac_node *old_node, *new_node;
9348 
9349 	new_node = hclge_find_mac_node(list, new_addr);
9350 	if (!new_node) {
9351 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9352 		if (!new_node)
9353 			return -ENOMEM;
9354 
9355 		new_node->state = HCLGE_MAC_TO_ADD;
9356 		ether_addr_copy(new_node->mac_addr, new_addr);
9357 		list_add(&new_node->node, list);
9358 	} else {
9359 		if (new_node->state == HCLGE_MAC_TO_DEL)
9360 			new_node->state = HCLGE_MAC_ACTIVE;
9361 
9362 		/* make sure the new addr is in the list head, avoid dev
9363 		 * addr may be not re-added into mac table for the umv space
9364 		 * limitation after global/imp reset which will clear mac
9365 		 * table by hardware.
9366 		 */
9367 		list_move(&new_node->node, list);
9368 	}
9369 
9370 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9371 		old_node = hclge_find_mac_node(list, old_addr);
9372 		if (old_node) {
9373 			if (old_node->state == HCLGE_MAC_TO_ADD) {
9374 				list_del(&old_node->node);
9375 				kfree(old_node);
9376 			} else {
9377 				old_node->state = HCLGE_MAC_TO_DEL;
9378 			}
9379 		}
9380 	}
9381 
9382 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9383 
9384 	return 0;
9385 }
9386 
9387 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9388 			      bool is_first)
9389 {
9390 	const unsigned char *new_addr = (const unsigned char *)p;
9391 	struct hclge_vport *vport = hclge_get_vport(handle);
9392 	struct hclge_dev *hdev = vport->back;
9393 	unsigned char *old_addr = NULL;
9394 	int ret;
9395 
9396 	/* mac addr check */
9397 	if (is_zero_ether_addr(new_addr) ||
9398 	    is_broadcast_ether_addr(new_addr) ||
9399 	    is_multicast_ether_addr(new_addr)) {
9400 		dev_err(&hdev->pdev->dev,
9401 			"change uc mac err! invalid mac: %pM.\n",
9402 			 new_addr);
9403 		return -EINVAL;
9404 	}
9405 
9406 	ret = hclge_pause_addr_cfg(hdev, new_addr);
9407 	if (ret) {
9408 		dev_err(&hdev->pdev->dev,
9409 			"failed to configure mac pause address, ret = %d\n",
9410 			ret);
9411 		return ret;
9412 	}
9413 
9414 	if (!is_first)
9415 		old_addr = hdev->hw.mac.mac_addr;
9416 
9417 	spin_lock_bh(&vport->mac_list_lock);
9418 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9419 	if (ret) {
9420 		dev_err(&hdev->pdev->dev,
9421 			"failed to change the mac addr:%pM, ret = %d\n",
9422 			new_addr, ret);
9423 		spin_unlock_bh(&vport->mac_list_lock);
9424 
9425 		if (!is_first)
9426 			hclge_pause_addr_cfg(hdev, old_addr);
9427 
9428 		return ret;
9429 	}
9430 	/* we must update dev addr with spin lock protect, preventing dev addr
9431 	 * being removed by set_rx_mode path.
9432 	 */
9433 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9434 	spin_unlock_bh(&vport->mac_list_lock);
9435 
9436 	hclge_task_schedule(hdev, 0);
9437 
9438 	return 0;
9439 }
9440 
9441 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9442 {
9443 	struct mii_ioctl_data *data = if_mii(ifr);
9444 
9445 	if (!hnae3_dev_phy_imp_supported(hdev))
9446 		return -EOPNOTSUPP;
9447 
9448 	switch (cmd) {
9449 	case SIOCGMIIPHY:
9450 		data->phy_id = hdev->hw.mac.phy_addr;
9451 		/* this command reads phy id and register at the same time */
9452 		fallthrough;
9453 	case SIOCGMIIREG:
9454 		data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9455 		return 0;
9456 
9457 	case SIOCSMIIREG:
9458 		return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9459 	default:
9460 		return -EOPNOTSUPP;
9461 	}
9462 }
9463 
9464 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9465 			  int cmd)
9466 {
9467 	struct hclge_vport *vport = hclge_get_vport(handle);
9468 	struct hclge_dev *hdev = vport->back;
9469 
9470 	switch (cmd) {
9471 	case SIOCGHWTSTAMP:
9472 		return hclge_ptp_get_cfg(hdev, ifr);
9473 	case SIOCSHWTSTAMP:
9474 		return hclge_ptp_set_cfg(hdev, ifr);
9475 	default:
9476 		if (!hdev->hw.mac.phydev)
9477 			return hclge_mii_ioctl(hdev, ifr, cmd);
9478 	}
9479 
9480 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9481 }
9482 
9483 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9484 					     bool bypass_en)
9485 {
9486 	struct hclge_port_vlan_filter_bypass_cmd *req;
9487 	struct hclge_desc desc;
9488 	int ret;
9489 
9490 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9491 	req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9492 	req->vf_id = vf_id;
9493 	hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9494 		      bypass_en ? 1 : 0);
9495 
9496 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9497 	if (ret)
9498 		dev_err(&hdev->pdev->dev,
9499 			"failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9500 			vf_id, ret);
9501 
9502 	return ret;
9503 }
9504 
9505 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9506 				      u8 fe_type, bool filter_en, u8 vf_id)
9507 {
9508 	struct hclge_vlan_filter_ctrl_cmd *req;
9509 	struct hclge_desc desc;
9510 	int ret;
9511 
9512 	/* read current vlan filter parameter */
9513 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9514 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9515 	req->vlan_type = vlan_type;
9516 	req->vf_id = vf_id;
9517 
9518 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9519 	if (ret) {
9520 		dev_err(&hdev->pdev->dev,
9521 			"failed to get vlan filter config, ret = %d.\n", ret);
9522 		return ret;
9523 	}
9524 
9525 	/* modify and write new config parameter */
9526 	hclge_cmd_reuse_desc(&desc, false);
9527 	req->vlan_fe = filter_en ?
9528 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9529 
9530 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9531 	if (ret)
9532 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9533 			ret);
9534 
9535 	return ret;
9536 }
9537 
9538 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9539 {
9540 	struct hclge_dev *hdev = vport->back;
9541 	struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9542 	int ret;
9543 
9544 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9545 		return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9546 						  HCLGE_FILTER_FE_EGRESS_V1_B,
9547 						  enable, vport->vport_id);
9548 
9549 	ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9550 					 HCLGE_FILTER_FE_EGRESS, enable,
9551 					 vport->vport_id);
9552 	if (ret)
9553 		return ret;
9554 
9555 	if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps))
9556 		ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9557 							!enable);
9558 	else if (!vport->vport_id)
9559 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9560 						 HCLGE_FILTER_FE_INGRESS,
9561 						 enable, 0);
9562 
9563 	return ret;
9564 }
9565 
9566 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9567 {
9568 	struct hnae3_handle *handle = &vport->nic;
9569 	struct hclge_vport_vlan_cfg *vlan, *tmp;
9570 	struct hclge_dev *hdev = vport->back;
9571 
9572 	if (vport->vport_id) {
9573 		if (vport->port_base_vlan_cfg.state !=
9574 			HNAE3_PORT_BASE_VLAN_DISABLE)
9575 			return true;
9576 
9577 		if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9578 			return false;
9579 	} else if (handle->netdev_flags & HNAE3_USER_UPE) {
9580 		return false;
9581 	}
9582 
9583 	if (!vport->req_vlan_fltr_en)
9584 		return false;
9585 
9586 	/* compatible with former device, always enable vlan filter */
9587 	if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9588 		return true;
9589 
9590 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9591 		if (vlan->vlan_id != 0)
9592 			return true;
9593 
9594 	return false;
9595 }
9596 
9597 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9598 {
9599 	struct hclge_dev *hdev = vport->back;
9600 	bool need_en;
9601 	int ret;
9602 
9603 	mutex_lock(&hdev->vport_lock);
9604 
9605 	vport->req_vlan_fltr_en = request_en;
9606 
9607 	need_en = hclge_need_enable_vport_vlan_filter(vport);
9608 	if (need_en == vport->cur_vlan_fltr_en) {
9609 		mutex_unlock(&hdev->vport_lock);
9610 		return 0;
9611 	}
9612 
9613 	ret = hclge_set_vport_vlan_filter(vport, need_en);
9614 	if (ret) {
9615 		mutex_unlock(&hdev->vport_lock);
9616 		return ret;
9617 	}
9618 
9619 	vport->cur_vlan_fltr_en = need_en;
9620 
9621 	mutex_unlock(&hdev->vport_lock);
9622 
9623 	return 0;
9624 }
9625 
9626 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9627 {
9628 	struct hclge_vport *vport = hclge_get_vport(handle);
9629 
9630 	return hclge_enable_vport_vlan_filter(vport, enable);
9631 }
9632 
9633 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9634 					bool is_kill, u16 vlan,
9635 					struct hclge_desc *desc)
9636 {
9637 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
9638 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
9639 	u8 vf_byte_val;
9640 	u8 vf_byte_off;
9641 	int ret;
9642 
9643 	hclge_cmd_setup_basic_desc(&desc[0],
9644 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9645 	hclge_cmd_setup_basic_desc(&desc[1],
9646 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9647 
9648 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9649 
9650 	vf_byte_off = vfid / 8;
9651 	vf_byte_val = 1 << (vfid % 8);
9652 
9653 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9654 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9655 
9656 	req0->vlan_id  = cpu_to_le16(vlan);
9657 	req0->vlan_cfg = is_kill;
9658 
9659 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9660 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9661 	else
9662 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9663 
9664 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
9665 	if (ret) {
9666 		dev_err(&hdev->pdev->dev,
9667 			"Send vf vlan command fail, ret =%d.\n",
9668 			ret);
9669 		return ret;
9670 	}
9671 
9672 	return 0;
9673 }
9674 
9675 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9676 					  bool is_kill, struct hclge_desc *desc)
9677 {
9678 	struct hclge_vlan_filter_vf_cfg_cmd *req;
9679 
9680 	req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9681 
9682 	if (!is_kill) {
9683 #define HCLGE_VF_VLAN_NO_ENTRY	2
9684 		if (!req->resp_code || req->resp_code == 1)
9685 			return 0;
9686 
9687 		if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9688 			set_bit(vfid, hdev->vf_vlan_full);
9689 			dev_warn(&hdev->pdev->dev,
9690 				 "vf vlan table is full, vf vlan filter is disabled\n");
9691 			return 0;
9692 		}
9693 
9694 		dev_err(&hdev->pdev->dev,
9695 			"Add vf vlan filter fail, ret =%u.\n",
9696 			req->resp_code);
9697 	} else {
9698 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
9699 		if (!req->resp_code)
9700 			return 0;
9701 
9702 		/* vf vlan filter is disabled when vf vlan table is full,
9703 		 * then new vlan id will not be added into vf vlan table.
9704 		 * Just return 0 without warning, avoid massive verbose
9705 		 * print logs when unload.
9706 		 */
9707 		if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9708 			return 0;
9709 
9710 		dev_err(&hdev->pdev->dev,
9711 			"Kill vf vlan filter fail, ret =%u.\n",
9712 			req->resp_code);
9713 	}
9714 
9715 	return -EIO;
9716 }
9717 
9718 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9719 				    bool is_kill, u16 vlan)
9720 {
9721 	struct hclge_vport *vport = &hdev->vport[vfid];
9722 	struct hclge_desc desc[2];
9723 	int ret;
9724 
9725 	/* if vf vlan table is full, firmware will close vf vlan filter, it
9726 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
9727 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
9728 	 * new vlan, because tx packets with these vlan id will be dropped.
9729 	 */
9730 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9731 		if (vport->vf_info.spoofchk && vlan) {
9732 			dev_err(&hdev->pdev->dev,
9733 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
9734 			return -EPERM;
9735 		}
9736 		return 0;
9737 	}
9738 
9739 	ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9740 	if (ret)
9741 		return ret;
9742 
9743 	return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9744 }
9745 
9746 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9747 				      u16 vlan_id, bool is_kill)
9748 {
9749 	struct hclge_vlan_filter_pf_cfg_cmd *req;
9750 	struct hclge_desc desc;
9751 	u8 vlan_offset_byte_val;
9752 	u8 vlan_offset_byte;
9753 	u8 vlan_offset_160;
9754 	int ret;
9755 
9756 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9757 
9758 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9759 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9760 			   HCLGE_VLAN_BYTE_SIZE;
9761 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9762 
9763 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9764 	req->vlan_offset = vlan_offset_160;
9765 	req->vlan_cfg = is_kill;
9766 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9767 
9768 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9769 	if (ret)
9770 		dev_err(&hdev->pdev->dev,
9771 			"port vlan command, send fail, ret =%d.\n", ret);
9772 	return ret;
9773 }
9774 
9775 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9776 				    u16 vport_id, u16 vlan_id,
9777 				    bool is_kill)
9778 {
9779 	u16 vport_idx, vport_num = 0;
9780 	int ret;
9781 
9782 	if (is_kill && !vlan_id)
9783 		return 0;
9784 
9785 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9786 	if (ret) {
9787 		dev_err(&hdev->pdev->dev,
9788 			"Set %u vport vlan filter config fail, ret =%d.\n",
9789 			vport_id, ret);
9790 		return ret;
9791 	}
9792 
9793 	/* vlan 0 may be added twice when 8021q module is enabled */
9794 	if (!is_kill && !vlan_id &&
9795 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
9796 		return 0;
9797 
9798 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9799 		dev_err(&hdev->pdev->dev,
9800 			"Add port vlan failed, vport %u is already in vlan %u\n",
9801 			vport_id, vlan_id);
9802 		return -EINVAL;
9803 	}
9804 
9805 	if (is_kill &&
9806 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9807 		dev_err(&hdev->pdev->dev,
9808 			"Delete port vlan failed, vport %u is not in vlan %u\n",
9809 			vport_id, vlan_id);
9810 		return -EINVAL;
9811 	}
9812 
9813 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9814 		vport_num++;
9815 
9816 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9817 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9818 						 is_kill);
9819 
9820 	return ret;
9821 }
9822 
9823 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9824 {
9825 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9826 	struct hclge_vport_vtag_tx_cfg_cmd *req;
9827 	struct hclge_dev *hdev = vport->back;
9828 	struct hclge_desc desc;
9829 	u16 bmap_index;
9830 	int status;
9831 
9832 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9833 
9834 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9835 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9836 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9837 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9838 		      vcfg->accept_tag1 ? 1 : 0);
9839 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9840 		      vcfg->accept_untag1 ? 1 : 0);
9841 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9842 		      vcfg->accept_tag2 ? 1 : 0);
9843 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9844 		      vcfg->accept_untag2 ? 1 : 0);
9845 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9846 		      vcfg->insert_tag1_en ? 1 : 0);
9847 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9848 		      vcfg->insert_tag2_en ? 1 : 0);
9849 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9850 		      vcfg->tag_shift_mode_en ? 1 : 0);
9851 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9852 
9853 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9854 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9855 			HCLGE_VF_NUM_PER_BYTE;
9856 	req->vf_bitmap[bmap_index] =
9857 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9858 
9859 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9860 	if (status)
9861 		dev_err(&hdev->pdev->dev,
9862 			"Send port txvlan cfg command fail, ret =%d\n",
9863 			status);
9864 
9865 	return status;
9866 }
9867 
9868 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9869 {
9870 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9871 	struct hclge_vport_vtag_rx_cfg_cmd *req;
9872 	struct hclge_dev *hdev = vport->back;
9873 	struct hclge_desc desc;
9874 	u16 bmap_index;
9875 	int status;
9876 
9877 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9878 
9879 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9880 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9881 		      vcfg->strip_tag1_en ? 1 : 0);
9882 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9883 		      vcfg->strip_tag2_en ? 1 : 0);
9884 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9885 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
9886 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9887 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
9888 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9889 		      vcfg->strip_tag1_discard_en ? 1 : 0);
9890 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9891 		      vcfg->strip_tag2_discard_en ? 1 : 0);
9892 
9893 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9894 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9895 			HCLGE_VF_NUM_PER_BYTE;
9896 	req->vf_bitmap[bmap_index] =
9897 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9898 
9899 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9900 	if (status)
9901 		dev_err(&hdev->pdev->dev,
9902 			"Send port rxvlan cfg command fail, ret =%d\n",
9903 			status);
9904 
9905 	return status;
9906 }
9907 
9908 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9909 				  u16 port_base_vlan_state,
9910 				  u16 vlan_tag, u8 qos)
9911 {
9912 	int ret;
9913 
9914 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9915 		vport->txvlan_cfg.accept_tag1 = true;
9916 		vport->txvlan_cfg.insert_tag1_en = false;
9917 		vport->txvlan_cfg.default_tag1 = 0;
9918 	} else {
9919 		struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9920 
9921 		vport->txvlan_cfg.accept_tag1 =
9922 			ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9923 		vport->txvlan_cfg.insert_tag1_en = true;
9924 		vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9925 						 vlan_tag;
9926 	}
9927 
9928 	vport->txvlan_cfg.accept_untag1 = true;
9929 
9930 	/* accept_tag2 and accept_untag2 are not supported on
9931 	 * pdev revision(0x20), new revision support them,
9932 	 * this two fields can not be configured by user.
9933 	 */
9934 	vport->txvlan_cfg.accept_tag2 = true;
9935 	vport->txvlan_cfg.accept_untag2 = true;
9936 	vport->txvlan_cfg.insert_tag2_en = false;
9937 	vport->txvlan_cfg.default_tag2 = 0;
9938 	vport->txvlan_cfg.tag_shift_mode_en = true;
9939 
9940 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9941 		vport->rxvlan_cfg.strip_tag1_en = false;
9942 		vport->rxvlan_cfg.strip_tag2_en =
9943 				vport->rxvlan_cfg.rx_vlan_offload_en;
9944 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
9945 	} else {
9946 		vport->rxvlan_cfg.strip_tag1_en =
9947 				vport->rxvlan_cfg.rx_vlan_offload_en;
9948 		vport->rxvlan_cfg.strip_tag2_en = true;
9949 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
9950 	}
9951 
9952 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
9953 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9954 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9955 
9956 	ret = hclge_set_vlan_tx_offload_cfg(vport);
9957 	if (ret)
9958 		return ret;
9959 
9960 	return hclge_set_vlan_rx_offload_cfg(vport);
9961 }
9962 
9963 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9964 {
9965 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9966 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9967 	struct hclge_desc desc;
9968 	int status;
9969 
9970 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9971 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9972 	rx_req->ot_fst_vlan_type =
9973 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9974 	rx_req->ot_sec_vlan_type =
9975 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9976 	rx_req->in_fst_vlan_type =
9977 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9978 	rx_req->in_sec_vlan_type =
9979 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9980 
9981 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9982 	if (status) {
9983 		dev_err(&hdev->pdev->dev,
9984 			"Send rxvlan protocol type command fail, ret =%d\n",
9985 			status);
9986 		return status;
9987 	}
9988 
9989 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9990 
9991 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9992 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9993 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9994 
9995 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
9996 	if (status)
9997 		dev_err(&hdev->pdev->dev,
9998 			"Send txvlan protocol type command fail, ret =%d\n",
9999 			status);
10000 
10001 	return status;
10002 }
10003 
10004 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10005 {
10006 #define HCLGE_DEF_VLAN_TYPE		0x8100
10007 
10008 	struct hnae3_handle *handle = &hdev->vport[0].nic;
10009 	struct hclge_vport *vport;
10010 	int ret;
10011 	int i;
10012 
10013 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
10014 		/* for revision 0x21, vf vlan filter is per function */
10015 		for (i = 0; i < hdev->num_alloc_vport; i++) {
10016 			vport = &hdev->vport[i];
10017 			ret = hclge_set_vlan_filter_ctrl(hdev,
10018 							 HCLGE_FILTER_TYPE_VF,
10019 							 HCLGE_FILTER_FE_EGRESS,
10020 							 true,
10021 							 vport->vport_id);
10022 			if (ret)
10023 				return ret;
10024 			vport->cur_vlan_fltr_en = true;
10025 		}
10026 
10027 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10028 						 HCLGE_FILTER_FE_INGRESS, true,
10029 						 0);
10030 		if (ret)
10031 			return ret;
10032 	} else {
10033 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10034 						 HCLGE_FILTER_FE_EGRESS_V1_B,
10035 						 true, 0);
10036 		if (ret)
10037 			return ret;
10038 	}
10039 
10040 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10041 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10042 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10043 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10044 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
10045 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
10046 
10047 	ret = hclge_set_vlan_protocol_type(hdev);
10048 	if (ret)
10049 		return ret;
10050 
10051 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10052 		u16 vlan_tag;
10053 		u8 qos;
10054 
10055 		vport = &hdev->vport[i];
10056 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10057 		qos = vport->port_base_vlan_cfg.vlan_info.qos;
10058 
10059 		ret = hclge_vlan_offload_cfg(vport,
10060 					     vport->port_base_vlan_cfg.state,
10061 					     vlan_tag, qos);
10062 		if (ret)
10063 			return ret;
10064 	}
10065 
10066 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10067 }
10068 
10069 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10070 				       bool writen_to_tbl)
10071 {
10072 	struct hclge_vport_vlan_cfg *vlan;
10073 
10074 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10075 	if (!vlan)
10076 		return;
10077 
10078 	vlan->hd_tbl_status = writen_to_tbl;
10079 	vlan->vlan_id = vlan_id;
10080 
10081 	list_add_tail(&vlan->node, &vport->vlan_list);
10082 }
10083 
10084 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10085 {
10086 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10087 	struct hclge_dev *hdev = vport->back;
10088 	int ret;
10089 
10090 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10091 		if (!vlan->hd_tbl_status) {
10092 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10093 						       vport->vport_id,
10094 						       vlan->vlan_id, false);
10095 			if (ret) {
10096 				dev_err(&hdev->pdev->dev,
10097 					"restore vport vlan list failed, ret=%d\n",
10098 					ret);
10099 				return ret;
10100 			}
10101 		}
10102 		vlan->hd_tbl_status = true;
10103 	}
10104 
10105 	return 0;
10106 }
10107 
10108 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10109 				      bool is_write_tbl)
10110 {
10111 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10112 	struct hclge_dev *hdev = vport->back;
10113 
10114 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10115 		if (vlan->vlan_id == vlan_id) {
10116 			if (is_write_tbl && vlan->hd_tbl_status)
10117 				hclge_set_vlan_filter_hw(hdev,
10118 							 htons(ETH_P_8021Q),
10119 							 vport->vport_id,
10120 							 vlan_id,
10121 							 true);
10122 
10123 			list_del(&vlan->node);
10124 			kfree(vlan);
10125 			break;
10126 		}
10127 	}
10128 }
10129 
10130 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10131 {
10132 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10133 	struct hclge_dev *hdev = vport->back;
10134 
10135 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10136 		if (vlan->hd_tbl_status)
10137 			hclge_set_vlan_filter_hw(hdev,
10138 						 htons(ETH_P_8021Q),
10139 						 vport->vport_id,
10140 						 vlan->vlan_id,
10141 						 true);
10142 
10143 		vlan->hd_tbl_status = false;
10144 		if (is_del_list) {
10145 			list_del(&vlan->node);
10146 			kfree(vlan);
10147 		}
10148 	}
10149 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
10150 }
10151 
10152 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10153 {
10154 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10155 	struct hclge_vport *vport;
10156 	int i;
10157 
10158 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10159 		vport = &hdev->vport[i];
10160 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10161 			list_del(&vlan->node);
10162 			kfree(vlan);
10163 		}
10164 	}
10165 }
10166 
10167 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10168 {
10169 	struct hclge_vport_vlan_cfg *vlan, *tmp;
10170 	struct hclge_dev *hdev = vport->back;
10171 	u16 vlan_proto;
10172 	u16 vlan_id;
10173 	u16 state;
10174 	int ret;
10175 
10176 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10177 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10178 	state = vport->port_base_vlan_cfg.state;
10179 
10180 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10181 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10182 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10183 					 vport->vport_id, vlan_id,
10184 					 false);
10185 		return;
10186 	}
10187 
10188 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10189 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10190 					       vport->vport_id,
10191 					       vlan->vlan_id, false);
10192 		if (ret)
10193 			break;
10194 		vlan->hd_tbl_status = true;
10195 	}
10196 }
10197 
10198 /* For global reset and imp reset, hardware will clear the mac table,
10199  * so we change the mac address state from ACTIVE to TO_ADD, then they
10200  * can be restored in the service task after reset complete. Furtherly,
10201  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10202  * be restored after reset, so just remove these mac nodes from mac_list.
10203  */
10204 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10205 {
10206 	struct hclge_mac_node *mac_node, *tmp;
10207 
10208 	list_for_each_entry_safe(mac_node, tmp, list, node) {
10209 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
10210 			mac_node->state = HCLGE_MAC_TO_ADD;
10211 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10212 			list_del(&mac_node->node);
10213 			kfree(mac_node);
10214 		}
10215 	}
10216 }
10217 
10218 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10219 {
10220 	spin_lock_bh(&vport->mac_list_lock);
10221 
10222 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10223 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10224 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10225 
10226 	spin_unlock_bh(&vport->mac_list_lock);
10227 }
10228 
10229 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10230 {
10231 	struct hclge_vport *vport = &hdev->vport[0];
10232 	struct hnae3_handle *handle = &vport->nic;
10233 
10234 	hclge_restore_mac_table_common(vport);
10235 	hclge_restore_vport_vlan_table(vport);
10236 	set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10237 	hclge_restore_fd_entries(handle);
10238 }
10239 
10240 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10241 {
10242 	struct hclge_vport *vport = hclge_get_vport(handle);
10243 
10244 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10245 		vport->rxvlan_cfg.strip_tag1_en = false;
10246 		vport->rxvlan_cfg.strip_tag2_en = enable;
10247 		vport->rxvlan_cfg.strip_tag2_discard_en = false;
10248 	} else {
10249 		vport->rxvlan_cfg.strip_tag1_en = enable;
10250 		vport->rxvlan_cfg.strip_tag2_en = true;
10251 		vport->rxvlan_cfg.strip_tag2_discard_en = true;
10252 	}
10253 
10254 	vport->rxvlan_cfg.strip_tag1_discard_en = false;
10255 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10256 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10257 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10258 
10259 	return hclge_set_vlan_rx_offload_cfg(vport);
10260 }
10261 
10262 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10263 {
10264 	struct hclge_dev *hdev = vport->back;
10265 
10266 	if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10267 		set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10268 }
10269 
10270 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10271 					    u16 port_base_vlan_state,
10272 					    struct hclge_vlan_info *new_info,
10273 					    struct hclge_vlan_info *old_info)
10274 {
10275 	struct hclge_dev *hdev = vport->back;
10276 	int ret;
10277 
10278 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10279 		hclge_rm_vport_all_vlan_table(vport, false);
10280 		/* force clear VLAN 0 */
10281 		ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10282 		if (ret)
10283 			return ret;
10284 		return hclge_set_vlan_filter_hw(hdev,
10285 						 htons(new_info->vlan_proto),
10286 						 vport->vport_id,
10287 						 new_info->vlan_tag,
10288 						 false);
10289 	}
10290 
10291 	/* force add VLAN 0 */
10292 	ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10293 	if (ret)
10294 		return ret;
10295 
10296 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10297 				       vport->vport_id, old_info->vlan_tag,
10298 				       true);
10299 	if (ret)
10300 		return ret;
10301 
10302 	return hclge_add_vport_all_vlan_table(vport);
10303 }
10304 
10305 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10306 					  const struct hclge_vlan_info *old_cfg)
10307 {
10308 	if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10309 		return true;
10310 
10311 	if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10312 		return true;
10313 
10314 	return false;
10315 }
10316 
10317 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10318 				    struct hclge_vlan_info *vlan_info)
10319 {
10320 	struct hnae3_handle *nic = &vport->nic;
10321 	struct hclge_vlan_info *old_vlan_info;
10322 	struct hclge_dev *hdev = vport->back;
10323 	int ret;
10324 
10325 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10326 
10327 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10328 				     vlan_info->qos);
10329 	if (ret)
10330 		return ret;
10331 
10332 	if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10333 		goto out;
10334 
10335 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10336 		/* add new VLAN tag */
10337 		ret = hclge_set_vlan_filter_hw(hdev,
10338 					       htons(vlan_info->vlan_proto),
10339 					       vport->vport_id,
10340 					       vlan_info->vlan_tag,
10341 					       false);
10342 		if (ret)
10343 			return ret;
10344 
10345 		/* remove old VLAN tag */
10346 		if (old_vlan_info->vlan_tag == 0)
10347 			ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10348 						       true, 0);
10349 		else
10350 			ret = hclge_set_vlan_filter_hw(hdev,
10351 						       htons(ETH_P_8021Q),
10352 						       vport->vport_id,
10353 						       old_vlan_info->vlan_tag,
10354 						       true);
10355 		if (ret) {
10356 			dev_err(&hdev->pdev->dev,
10357 				"failed to clear vport%u port base vlan %u, ret = %d.\n",
10358 				vport->vport_id, old_vlan_info->vlan_tag, ret);
10359 			return ret;
10360 		}
10361 
10362 		goto out;
10363 	}
10364 
10365 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10366 					       old_vlan_info);
10367 	if (ret)
10368 		return ret;
10369 
10370 out:
10371 	vport->port_base_vlan_cfg.state = state;
10372 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10373 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10374 	else
10375 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10376 
10377 	vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10378 	hclge_set_vport_vlan_fltr_change(vport);
10379 
10380 	return 0;
10381 }
10382 
10383 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10384 					  enum hnae3_port_base_vlan_state state,
10385 					  u16 vlan, u8 qos)
10386 {
10387 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10388 		if (!vlan && !qos)
10389 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10390 
10391 		return HNAE3_PORT_BASE_VLAN_ENABLE;
10392 	}
10393 
10394 	if (!vlan && !qos)
10395 		return HNAE3_PORT_BASE_VLAN_DISABLE;
10396 
10397 	if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10398 	    vport->port_base_vlan_cfg.vlan_info.qos == qos)
10399 		return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10400 
10401 	return HNAE3_PORT_BASE_VLAN_MODIFY;
10402 }
10403 
10404 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10405 				    u16 vlan, u8 qos, __be16 proto)
10406 {
10407 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10408 	struct hclge_vport *vport = hclge_get_vport(handle);
10409 	struct hclge_dev *hdev = vport->back;
10410 	struct hclge_vlan_info vlan_info;
10411 	u16 state;
10412 	int ret;
10413 
10414 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10415 		return -EOPNOTSUPP;
10416 
10417 	vport = hclge_get_vf_vport(hdev, vfid);
10418 	if (!vport)
10419 		return -EINVAL;
10420 
10421 	/* qos is a 3 bits value, so can not be bigger than 7 */
10422 	if (vlan > VLAN_N_VID - 1 || qos > 7)
10423 		return -EINVAL;
10424 	if (proto != htons(ETH_P_8021Q))
10425 		return -EPROTONOSUPPORT;
10426 
10427 	state = hclge_get_port_base_vlan_state(vport,
10428 					       vport->port_base_vlan_cfg.state,
10429 					       vlan, qos);
10430 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10431 		return 0;
10432 
10433 	vlan_info.vlan_tag = vlan;
10434 	vlan_info.qos = qos;
10435 	vlan_info.vlan_proto = ntohs(proto);
10436 
10437 	ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10438 	if (ret) {
10439 		dev_err(&hdev->pdev->dev,
10440 			"failed to update port base vlan for vf %d, ret = %d\n",
10441 			vfid, ret);
10442 		return ret;
10443 	}
10444 
10445 	/* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10446 	 * VLAN state.
10447 	 */
10448 	if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10449 	    test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10450 		hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10451 						  vport->vport_id, state,
10452 						  &vlan_info);
10453 
10454 	return 0;
10455 }
10456 
10457 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10458 {
10459 	struct hclge_vlan_info *vlan_info;
10460 	struct hclge_vport *vport;
10461 	int ret;
10462 	int vf;
10463 
10464 	/* clear port base vlan for all vf */
10465 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10466 		vport = &hdev->vport[vf];
10467 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10468 
10469 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10470 					       vport->vport_id,
10471 					       vlan_info->vlan_tag, true);
10472 		if (ret)
10473 			dev_err(&hdev->pdev->dev,
10474 				"failed to clear vf vlan for vf%d, ret = %d\n",
10475 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10476 	}
10477 }
10478 
10479 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10480 			  u16 vlan_id, bool is_kill)
10481 {
10482 	struct hclge_vport *vport = hclge_get_vport(handle);
10483 	struct hclge_dev *hdev = vport->back;
10484 	bool writen_to_tbl = false;
10485 	int ret = 0;
10486 
10487 	/* When device is resetting or reset failed, firmware is unable to
10488 	 * handle mailbox. Just record the vlan id, and remove it after
10489 	 * reset finished.
10490 	 */
10491 	if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10492 	     test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10493 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10494 		return -EBUSY;
10495 	}
10496 
10497 	/* when port base vlan enabled, we use port base vlan as the vlan
10498 	 * filter entry. In this case, we don't update vlan filter table
10499 	 * when user add new vlan or remove exist vlan, just update the vport
10500 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
10501 	 * table until port base vlan disabled
10502 	 */
10503 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10504 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10505 					       vlan_id, is_kill);
10506 		writen_to_tbl = true;
10507 	}
10508 
10509 	if (!ret) {
10510 		if (is_kill)
10511 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10512 		else
10513 			hclge_add_vport_vlan_table(vport, vlan_id,
10514 						   writen_to_tbl);
10515 	} else if (is_kill) {
10516 		/* when remove hw vlan filter failed, record the vlan id,
10517 		 * and try to remove it from hw later, to be consistence
10518 		 * with stack
10519 		 */
10520 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
10521 	}
10522 
10523 	hclge_set_vport_vlan_fltr_change(vport);
10524 
10525 	return ret;
10526 }
10527 
10528 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10529 {
10530 	struct hclge_vport *vport;
10531 	int ret;
10532 	u16 i;
10533 
10534 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10535 		vport = &hdev->vport[i];
10536 		if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10537 					&vport->state))
10538 			continue;
10539 
10540 		ret = hclge_enable_vport_vlan_filter(vport,
10541 						     vport->req_vlan_fltr_en);
10542 		if (ret) {
10543 			dev_err(&hdev->pdev->dev,
10544 				"failed to sync vlan filter state for vport%u, ret = %d\n",
10545 				vport->vport_id, ret);
10546 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10547 				&vport->state);
10548 			return;
10549 		}
10550 	}
10551 }
10552 
10553 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10554 {
10555 #define HCLGE_MAX_SYNC_COUNT	60
10556 
10557 	int i, ret, sync_cnt = 0;
10558 	u16 vlan_id;
10559 
10560 	/* start from vport 1 for PF is always alive */
10561 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10562 		struct hclge_vport *vport = &hdev->vport[i];
10563 
10564 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10565 					 VLAN_N_VID);
10566 		while (vlan_id != VLAN_N_VID) {
10567 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10568 						       vport->vport_id, vlan_id,
10569 						       true);
10570 			if (ret && ret != -EINVAL)
10571 				return;
10572 
10573 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10574 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
10575 			hclge_set_vport_vlan_fltr_change(vport);
10576 
10577 			sync_cnt++;
10578 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10579 				return;
10580 
10581 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10582 						 VLAN_N_VID);
10583 		}
10584 	}
10585 
10586 	hclge_sync_vlan_fltr_state(hdev);
10587 }
10588 
10589 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10590 {
10591 	struct hclge_config_max_frm_size_cmd *req;
10592 	struct hclge_desc desc;
10593 
10594 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10595 
10596 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10597 	req->max_frm_size = cpu_to_le16(new_mps);
10598 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10599 
10600 	return hclge_cmd_send(&hdev->hw, &desc, 1);
10601 }
10602 
10603 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10604 {
10605 	struct hclge_vport *vport = hclge_get_vport(handle);
10606 
10607 	return hclge_set_vport_mtu(vport, new_mtu);
10608 }
10609 
10610 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10611 {
10612 	struct hclge_dev *hdev = vport->back;
10613 	int i, max_frm_size, ret;
10614 
10615 	/* HW supprt 2 layer vlan */
10616 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10617 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10618 	    max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10619 		return -EINVAL;
10620 
10621 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10622 	mutex_lock(&hdev->vport_lock);
10623 	/* VF's mps must fit within hdev->mps */
10624 	if (vport->vport_id && max_frm_size > hdev->mps) {
10625 		mutex_unlock(&hdev->vport_lock);
10626 		return -EINVAL;
10627 	} else if (vport->vport_id) {
10628 		vport->mps = max_frm_size;
10629 		mutex_unlock(&hdev->vport_lock);
10630 		return 0;
10631 	}
10632 
10633 	/* PF's mps must be greater then VF's mps */
10634 	for (i = 1; i < hdev->num_alloc_vport; i++)
10635 		if (max_frm_size < hdev->vport[i].mps) {
10636 			mutex_unlock(&hdev->vport_lock);
10637 			return -EINVAL;
10638 		}
10639 
10640 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10641 
10642 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
10643 	if (ret) {
10644 		dev_err(&hdev->pdev->dev,
10645 			"Change mtu fail, ret =%d\n", ret);
10646 		goto out;
10647 	}
10648 
10649 	hdev->mps = max_frm_size;
10650 	vport->mps = max_frm_size;
10651 
10652 	ret = hclge_buffer_alloc(hdev);
10653 	if (ret)
10654 		dev_err(&hdev->pdev->dev,
10655 			"Allocate buffer fail, ret =%d\n", ret);
10656 
10657 out:
10658 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10659 	mutex_unlock(&hdev->vport_lock);
10660 	return ret;
10661 }
10662 
10663 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10664 				    bool enable)
10665 {
10666 	struct hclge_reset_tqp_queue_cmd *req;
10667 	struct hclge_desc desc;
10668 	int ret;
10669 
10670 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10671 
10672 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10673 	req->tqp_id = cpu_to_le16(queue_id);
10674 	if (enable)
10675 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10676 
10677 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10678 	if (ret) {
10679 		dev_err(&hdev->pdev->dev,
10680 			"Send tqp reset cmd error, status =%d\n", ret);
10681 		return ret;
10682 	}
10683 
10684 	return 0;
10685 }
10686 
10687 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10688 {
10689 	struct hclge_reset_tqp_queue_cmd *req;
10690 	struct hclge_desc desc;
10691 	int ret;
10692 
10693 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10694 
10695 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10696 	req->tqp_id = cpu_to_le16(queue_id);
10697 
10698 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10699 	if (ret) {
10700 		dev_err(&hdev->pdev->dev,
10701 			"Get reset status error, status =%d\n", ret);
10702 		return ret;
10703 	}
10704 
10705 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10706 }
10707 
10708 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10709 {
10710 	struct hnae3_queue *queue;
10711 	struct hclge_tqp *tqp;
10712 
10713 	queue = handle->kinfo.tqp[queue_id];
10714 	tqp = container_of(queue, struct hclge_tqp, q);
10715 
10716 	return tqp->index;
10717 }
10718 
10719 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10720 {
10721 	struct hclge_vport *vport = hclge_get_vport(handle);
10722 	struct hclge_dev *hdev = vport->back;
10723 	u16 reset_try_times = 0;
10724 	int reset_status;
10725 	u16 queue_gid;
10726 	int ret;
10727 	u16 i;
10728 
10729 	for (i = 0; i < handle->kinfo.num_tqps; i++) {
10730 		queue_gid = hclge_covert_handle_qid_global(handle, i);
10731 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10732 		if (ret) {
10733 			dev_err(&hdev->pdev->dev,
10734 				"failed to send reset tqp cmd, ret = %d\n",
10735 				ret);
10736 			return ret;
10737 		}
10738 
10739 		while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10740 			reset_status = hclge_get_reset_status(hdev, queue_gid);
10741 			if (reset_status)
10742 				break;
10743 
10744 			/* Wait for tqp hw reset */
10745 			usleep_range(1000, 1200);
10746 		}
10747 
10748 		if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10749 			dev_err(&hdev->pdev->dev,
10750 				"wait for tqp hw reset timeout\n");
10751 			return -ETIME;
10752 		}
10753 
10754 		ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10755 		if (ret) {
10756 			dev_err(&hdev->pdev->dev,
10757 				"failed to deassert soft reset, ret = %d\n",
10758 				ret);
10759 			return ret;
10760 		}
10761 		reset_try_times = 0;
10762 	}
10763 	return 0;
10764 }
10765 
10766 static int hclge_reset_rcb(struct hnae3_handle *handle)
10767 {
10768 #define HCLGE_RESET_RCB_NOT_SUPPORT	0U
10769 #define HCLGE_RESET_RCB_SUCCESS		1U
10770 
10771 	struct hclge_vport *vport = hclge_get_vport(handle);
10772 	struct hclge_dev *hdev = vport->back;
10773 	struct hclge_reset_cmd *req;
10774 	struct hclge_desc desc;
10775 	u8 return_status;
10776 	u16 queue_gid;
10777 	int ret;
10778 
10779 	queue_gid = hclge_covert_handle_qid_global(handle, 0);
10780 
10781 	req = (struct hclge_reset_cmd *)desc.data;
10782 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10783 	hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10784 	req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10785 	req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10786 
10787 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10788 	if (ret) {
10789 		dev_err(&hdev->pdev->dev,
10790 			"failed to send rcb reset cmd, ret = %d\n", ret);
10791 		return ret;
10792 	}
10793 
10794 	return_status = req->fun_reset_rcb_return_status;
10795 	if (return_status == HCLGE_RESET_RCB_SUCCESS)
10796 		return 0;
10797 
10798 	if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10799 		dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10800 			return_status);
10801 		return -EIO;
10802 	}
10803 
10804 	/* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10805 	 * again to reset all tqps
10806 	 */
10807 	return hclge_reset_tqp_cmd(handle);
10808 }
10809 
10810 int hclge_reset_tqp(struct hnae3_handle *handle)
10811 {
10812 	struct hclge_vport *vport = hclge_get_vport(handle);
10813 	struct hclge_dev *hdev = vport->back;
10814 	int ret;
10815 
10816 	/* only need to disable PF's tqp */
10817 	if (!vport->vport_id) {
10818 		ret = hclge_tqp_enable(handle, false);
10819 		if (ret) {
10820 			dev_err(&hdev->pdev->dev,
10821 				"failed to disable tqp, ret = %d\n", ret);
10822 			return ret;
10823 		}
10824 	}
10825 
10826 	return hclge_reset_rcb(handle);
10827 }
10828 
10829 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10830 {
10831 	struct hclge_vport *vport = hclge_get_vport(handle);
10832 	struct hclge_dev *hdev = vport->back;
10833 
10834 	return hdev->fw_version;
10835 }
10836 
10837 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10838 {
10839 	struct phy_device *phydev = hdev->hw.mac.phydev;
10840 
10841 	if (!phydev)
10842 		return;
10843 
10844 	phy_set_asym_pause(phydev, rx_en, tx_en);
10845 }
10846 
10847 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10848 {
10849 	int ret;
10850 
10851 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10852 		return 0;
10853 
10854 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10855 	if (ret)
10856 		dev_err(&hdev->pdev->dev,
10857 			"configure pauseparam error, ret = %d.\n", ret);
10858 
10859 	return ret;
10860 }
10861 
10862 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10863 {
10864 	struct phy_device *phydev = hdev->hw.mac.phydev;
10865 	u16 remote_advertising = 0;
10866 	u16 local_advertising;
10867 	u32 rx_pause, tx_pause;
10868 	u8 flowctl;
10869 
10870 	if (!phydev->link || !phydev->autoneg)
10871 		return 0;
10872 
10873 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10874 
10875 	if (phydev->pause)
10876 		remote_advertising = LPA_PAUSE_CAP;
10877 
10878 	if (phydev->asym_pause)
10879 		remote_advertising |= LPA_PAUSE_ASYM;
10880 
10881 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10882 					   remote_advertising);
10883 	tx_pause = flowctl & FLOW_CTRL_TX;
10884 	rx_pause = flowctl & FLOW_CTRL_RX;
10885 
10886 	if (phydev->duplex == HCLGE_MAC_HALF) {
10887 		tx_pause = 0;
10888 		rx_pause = 0;
10889 	}
10890 
10891 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10892 }
10893 
10894 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10895 				 u32 *rx_en, u32 *tx_en)
10896 {
10897 	struct hclge_vport *vport = hclge_get_vport(handle);
10898 	struct hclge_dev *hdev = vport->back;
10899 	u8 media_type = hdev->hw.mac.media_type;
10900 
10901 	*auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10902 		    hclge_get_autoneg(handle) : 0;
10903 
10904 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10905 		*rx_en = 0;
10906 		*tx_en = 0;
10907 		return;
10908 	}
10909 
10910 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10911 		*rx_en = 1;
10912 		*tx_en = 0;
10913 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10914 		*tx_en = 1;
10915 		*rx_en = 0;
10916 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10917 		*rx_en = 1;
10918 		*tx_en = 1;
10919 	} else {
10920 		*rx_en = 0;
10921 		*tx_en = 0;
10922 	}
10923 }
10924 
10925 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10926 					 u32 rx_en, u32 tx_en)
10927 {
10928 	if (rx_en && tx_en)
10929 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
10930 	else if (rx_en && !tx_en)
10931 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10932 	else if (!rx_en && tx_en)
10933 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10934 	else
10935 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
10936 
10937 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10938 }
10939 
10940 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10941 				u32 rx_en, u32 tx_en)
10942 {
10943 	struct hclge_vport *vport = hclge_get_vport(handle);
10944 	struct hclge_dev *hdev = vport->back;
10945 	struct phy_device *phydev = hdev->hw.mac.phydev;
10946 	u32 fc_autoneg;
10947 
10948 	if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10949 		fc_autoneg = hclge_get_autoneg(handle);
10950 		if (auto_neg != fc_autoneg) {
10951 			dev_info(&hdev->pdev->dev,
10952 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10953 			return -EOPNOTSUPP;
10954 		}
10955 	}
10956 
10957 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10958 		dev_info(&hdev->pdev->dev,
10959 			 "Priority flow control enabled. Cannot set link flow control.\n");
10960 		return -EOPNOTSUPP;
10961 	}
10962 
10963 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10964 
10965 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10966 
10967 	if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10968 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10969 
10970 	if (phydev)
10971 		return phy_start_aneg(phydev);
10972 
10973 	return -EOPNOTSUPP;
10974 }
10975 
10976 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10977 					  u8 *auto_neg, u32 *speed, u8 *duplex)
10978 {
10979 	struct hclge_vport *vport = hclge_get_vport(handle);
10980 	struct hclge_dev *hdev = vport->back;
10981 
10982 	if (speed)
10983 		*speed = hdev->hw.mac.speed;
10984 	if (duplex)
10985 		*duplex = hdev->hw.mac.duplex;
10986 	if (auto_neg)
10987 		*auto_neg = hdev->hw.mac.autoneg;
10988 }
10989 
10990 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10991 				 u8 *module_type)
10992 {
10993 	struct hclge_vport *vport = hclge_get_vport(handle);
10994 	struct hclge_dev *hdev = vport->back;
10995 
10996 	/* When nic is down, the service task is not running, doesn't update
10997 	 * the port information per second. Query the port information before
10998 	 * return the media type, ensure getting the correct media information.
10999 	 */
11000 	hclge_update_port_info(hdev);
11001 
11002 	if (media_type)
11003 		*media_type = hdev->hw.mac.media_type;
11004 
11005 	if (module_type)
11006 		*module_type = hdev->hw.mac.module_type;
11007 }
11008 
11009 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11010 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
11011 {
11012 	struct hclge_vport *vport = hclge_get_vport(handle);
11013 	struct hclge_dev *hdev = vport->back;
11014 	struct phy_device *phydev = hdev->hw.mac.phydev;
11015 	int mdix_ctrl, mdix, is_resolved;
11016 	unsigned int retval;
11017 
11018 	if (!phydev) {
11019 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11020 		*tp_mdix = ETH_TP_MDI_INVALID;
11021 		return;
11022 	}
11023 
11024 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11025 
11026 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11027 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11028 				    HCLGE_PHY_MDIX_CTRL_S);
11029 
11030 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11031 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11032 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11033 
11034 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11035 
11036 	switch (mdix_ctrl) {
11037 	case 0x0:
11038 		*tp_mdix_ctrl = ETH_TP_MDI;
11039 		break;
11040 	case 0x1:
11041 		*tp_mdix_ctrl = ETH_TP_MDI_X;
11042 		break;
11043 	case 0x3:
11044 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11045 		break;
11046 	default:
11047 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11048 		break;
11049 	}
11050 
11051 	if (!is_resolved)
11052 		*tp_mdix = ETH_TP_MDI_INVALID;
11053 	else if (mdix)
11054 		*tp_mdix = ETH_TP_MDI_X;
11055 	else
11056 		*tp_mdix = ETH_TP_MDI;
11057 }
11058 
11059 static void hclge_info_show(struct hclge_dev *hdev)
11060 {
11061 	struct device *dev = &hdev->pdev->dev;
11062 
11063 	dev_info(dev, "PF info begin:\n");
11064 
11065 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11066 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11067 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11068 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11069 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11070 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11071 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11072 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11073 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11074 	dev_info(dev, "This is %s PF\n",
11075 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11076 	dev_info(dev, "DCB %s\n",
11077 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11078 	dev_info(dev, "MQPRIO %s\n",
11079 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11080 	dev_info(dev, "Default tx spare buffer size: %u\n",
11081 		 hdev->tx_spare_buf_size);
11082 
11083 	dev_info(dev, "PF info end.\n");
11084 }
11085 
11086 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11087 					  struct hclge_vport *vport)
11088 {
11089 	struct hnae3_client *client = vport->nic.client;
11090 	struct hclge_dev *hdev = ae_dev->priv;
11091 	int rst_cnt = hdev->rst_stats.reset_cnt;
11092 	int ret;
11093 
11094 	ret = client->ops->init_instance(&vport->nic);
11095 	if (ret)
11096 		return ret;
11097 
11098 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11099 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11100 	    rst_cnt != hdev->rst_stats.reset_cnt) {
11101 		ret = -EBUSY;
11102 		goto init_nic_err;
11103 	}
11104 
11105 	/* Enable nic hw error interrupts */
11106 	ret = hclge_config_nic_hw_error(hdev, true);
11107 	if (ret) {
11108 		dev_err(&ae_dev->pdev->dev,
11109 			"fail(%d) to enable hw error interrupts\n", ret);
11110 		goto init_nic_err;
11111 	}
11112 
11113 	hnae3_set_client_init_flag(client, ae_dev, 1);
11114 
11115 	if (netif_msg_drv(&hdev->vport->nic))
11116 		hclge_info_show(hdev);
11117 
11118 	return ret;
11119 
11120 init_nic_err:
11121 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11122 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11123 		msleep(HCLGE_WAIT_RESET_DONE);
11124 
11125 	client->ops->uninit_instance(&vport->nic, 0);
11126 
11127 	return ret;
11128 }
11129 
11130 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11131 					   struct hclge_vport *vport)
11132 {
11133 	struct hclge_dev *hdev = ae_dev->priv;
11134 	struct hnae3_client *client;
11135 	int rst_cnt;
11136 	int ret;
11137 
11138 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11139 	    !hdev->nic_client)
11140 		return 0;
11141 
11142 	client = hdev->roce_client;
11143 	ret = hclge_init_roce_base_info(vport);
11144 	if (ret)
11145 		return ret;
11146 
11147 	rst_cnt = hdev->rst_stats.reset_cnt;
11148 	ret = client->ops->init_instance(&vport->roce);
11149 	if (ret)
11150 		return ret;
11151 
11152 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11153 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11154 	    rst_cnt != hdev->rst_stats.reset_cnt) {
11155 		ret = -EBUSY;
11156 		goto init_roce_err;
11157 	}
11158 
11159 	/* Enable roce ras interrupts */
11160 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
11161 	if (ret) {
11162 		dev_err(&ae_dev->pdev->dev,
11163 			"fail(%d) to enable roce ras interrupts\n", ret);
11164 		goto init_roce_err;
11165 	}
11166 
11167 	hnae3_set_client_init_flag(client, ae_dev, 1);
11168 
11169 	return 0;
11170 
11171 init_roce_err:
11172 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11173 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11174 		msleep(HCLGE_WAIT_RESET_DONE);
11175 
11176 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11177 
11178 	return ret;
11179 }
11180 
11181 static int hclge_init_client_instance(struct hnae3_client *client,
11182 				      struct hnae3_ae_dev *ae_dev)
11183 {
11184 	struct hclge_dev *hdev = ae_dev->priv;
11185 	struct hclge_vport *vport = &hdev->vport[0];
11186 	int ret;
11187 
11188 	switch (client->type) {
11189 	case HNAE3_CLIENT_KNIC:
11190 		hdev->nic_client = client;
11191 		vport->nic.client = client;
11192 		ret = hclge_init_nic_client_instance(ae_dev, vport);
11193 		if (ret)
11194 			goto clear_nic;
11195 
11196 		ret = hclge_init_roce_client_instance(ae_dev, vport);
11197 		if (ret)
11198 			goto clear_roce;
11199 
11200 		break;
11201 	case HNAE3_CLIENT_ROCE:
11202 		if (hnae3_dev_roce_supported(hdev)) {
11203 			hdev->roce_client = client;
11204 			vport->roce.client = client;
11205 		}
11206 
11207 		ret = hclge_init_roce_client_instance(ae_dev, vport);
11208 		if (ret)
11209 			goto clear_roce;
11210 
11211 		break;
11212 	default:
11213 		return -EINVAL;
11214 	}
11215 
11216 	return 0;
11217 
11218 clear_nic:
11219 	hdev->nic_client = NULL;
11220 	vport->nic.client = NULL;
11221 	return ret;
11222 clear_roce:
11223 	hdev->roce_client = NULL;
11224 	vport->roce.client = NULL;
11225 	return ret;
11226 }
11227 
11228 static void hclge_uninit_client_instance(struct hnae3_client *client,
11229 					 struct hnae3_ae_dev *ae_dev)
11230 {
11231 	struct hclge_dev *hdev = ae_dev->priv;
11232 	struct hclge_vport *vport = &hdev->vport[0];
11233 
11234 	if (hdev->roce_client) {
11235 		clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11236 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11237 			msleep(HCLGE_WAIT_RESET_DONE);
11238 
11239 		hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11240 		hdev->roce_client = NULL;
11241 		vport->roce.client = NULL;
11242 	}
11243 	if (client->type == HNAE3_CLIENT_ROCE)
11244 		return;
11245 	if (hdev->nic_client && client->ops->uninit_instance) {
11246 		clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11247 		while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11248 			msleep(HCLGE_WAIT_RESET_DONE);
11249 
11250 		client->ops->uninit_instance(&vport->nic, 0);
11251 		hdev->nic_client = NULL;
11252 		vport->nic.client = NULL;
11253 	}
11254 }
11255 
11256 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11257 {
11258 #define HCLGE_MEM_BAR		4
11259 
11260 	struct pci_dev *pdev = hdev->pdev;
11261 	struct hclge_hw *hw = &hdev->hw;
11262 
11263 	/* for device does not have device memory, return directly */
11264 	if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11265 		return 0;
11266 
11267 	hw->mem_base = devm_ioremap_wc(&pdev->dev,
11268 				       pci_resource_start(pdev, HCLGE_MEM_BAR),
11269 				       pci_resource_len(pdev, HCLGE_MEM_BAR));
11270 	if (!hw->mem_base) {
11271 		dev_err(&pdev->dev, "failed to map device memory\n");
11272 		return -EFAULT;
11273 	}
11274 
11275 	return 0;
11276 }
11277 
11278 static int hclge_pci_init(struct hclge_dev *hdev)
11279 {
11280 	struct pci_dev *pdev = hdev->pdev;
11281 	struct hclge_hw *hw;
11282 	int ret;
11283 
11284 	ret = pci_enable_device(pdev);
11285 	if (ret) {
11286 		dev_err(&pdev->dev, "failed to enable PCI device\n");
11287 		return ret;
11288 	}
11289 
11290 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11291 	if (ret) {
11292 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11293 		if (ret) {
11294 			dev_err(&pdev->dev,
11295 				"can't set consistent PCI DMA");
11296 			goto err_disable_device;
11297 		}
11298 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11299 	}
11300 
11301 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11302 	if (ret) {
11303 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11304 		goto err_disable_device;
11305 	}
11306 
11307 	pci_set_master(pdev);
11308 	hw = &hdev->hw;
11309 	hw->io_base = pcim_iomap(pdev, 2, 0);
11310 	if (!hw->io_base) {
11311 		dev_err(&pdev->dev, "Can't map configuration register space\n");
11312 		ret = -ENOMEM;
11313 		goto err_clr_master;
11314 	}
11315 
11316 	ret = hclge_dev_mem_map(hdev);
11317 	if (ret)
11318 		goto err_unmap_io_base;
11319 
11320 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11321 
11322 	return 0;
11323 
11324 err_unmap_io_base:
11325 	pcim_iounmap(pdev, hdev->hw.io_base);
11326 err_clr_master:
11327 	pci_clear_master(pdev);
11328 	pci_release_regions(pdev);
11329 err_disable_device:
11330 	pci_disable_device(pdev);
11331 
11332 	return ret;
11333 }
11334 
11335 static void hclge_pci_uninit(struct hclge_dev *hdev)
11336 {
11337 	struct pci_dev *pdev = hdev->pdev;
11338 
11339 	if (hdev->hw.mem_base)
11340 		devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11341 
11342 	pcim_iounmap(pdev, hdev->hw.io_base);
11343 	pci_free_irq_vectors(pdev);
11344 	pci_clear_master(pdev);
11345 	pci_release_mem_regions(pdev);
11346 	pci_disable_device(pdev);
11347 }
11348 
11349 static void hclge_state_init(struct hclge_dev *hdev)
11350 {
11351 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11352 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11353 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11354 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11355 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11356 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11357 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11358 }
11359 
11360 static void hclge_state_uninit(struct hclge_dev *hdev)
11361 {
11362 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11363 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11364 
11365 	if (hdev->reset_timer.function)
11366 		del_timer_sync(&hdev->reset_timer);
11367 	if (hdev->service_task.work.func)
11368 		cancel_delayed_work_sync(&hdev->service_task);
11369 }
11370 
11371 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11372 					enum hnae3_reset_type rst_type)
11373 {
11374 #define HCLGE_RESET_RETRY_WAIT_MS	500
11375 #define HCLGE_RESET_RETRY_CNT	5
11376 
11377 	struct hclge_dev *hdev = ae_dev->priv;
11378 	int retry_cnt = 0;
11379 	int ret;
11380 
11381 retry:
11382 	down(&hdev->reset_sem);
11383 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11384 	hdev->reset_type = rst_type;
11385 	ret = hclge_reset_prepare(hdev);
11386 	if (ret || hdev->reset_pending) {
11387 		dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11388 			ret);
11389 		if (hdev->reset_pending ||
11390 		    retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11391 			dev_err(&hdev->pdev->dev,
11392 				"reset_pending:0x%lx, retry_cnt:%d\n",
11393 				hdev->reset_pending, retry_cnt);
11394 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11395 			up(&hdev->reset_sem);
11396 			msleep(HCLGE_RESET_RETRY_WAIT_MS);
11397 			goto retry;
11398 		}
11399 	}
11400 
11401 	/* disable misc vector before reset done */
11402 	hclge_enable_vector(&hdev->misc_vector, false);
11403 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11404 
11405 	if (hdev->reset_type == HNAE3_FLR_RESET)
11406 		hdev->rst_stats.flr_rst_cnt++;
11407 }
11408 
11409 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11410 {
11411 	struct hclge_dev *hdev = ae_dev->priv;
11412 	int ret;
11413 
11414 	hclge_enable_vector(&hdev->misc_vector, true);
11415 
11416 	ret = hclge_reset_rebuild(hdev);
11417 	if (ret)
11418 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11419 
11420 	hdev->reset_type = HNAE3_NONE_RESET;
11421 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11422 	up(&hdev->reset_sem);
11423 }
11424 
11425 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11426 {
11427 	u16 i;
11428 
11429 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11430 		struct hclge_vport *vport = &hdev->vport[i];
11431 		int ret;
11432 
11433 		 /* Send cmd to clear VF's FUNC_RST_ING */
11434 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11435 		if (ret)
11436 			dev_warn(&hdev->pdev->dev,
11437 				 "clear vf(%u) rst failed %d!\n",
11438 				 vport->vport_id, ret);
11439 	}
11440 }
11441 
11442 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11443 {
11444 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11445 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11446 }
11447 
11448 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11449 {
11450 	if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11451 		hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11452 }
11453 
11454 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11455 {
11456 	struct pci_dev *pdev = ae_dev->pdev;
11457 	struct hclge_dev *hdev;
11458 	int ret;
11459 
11460 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11461 	if (!hdev)
11462 		return -ENOMEM;
11463 
11464 	hdev->pdev = pdev;
11465 	hdev->ae_dev = ae_dev;
11466 	hdev->reset_type = HNAE3_NONE_RESET;
11467 	hdev->reset_level = HNAE3_FUNC_RESET;
11468 	ae_dev->priv = hdev;
11469 
11470 	/* HW supprt 2 layer vlan */
11471 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11472 
11473 	mutex_init(&hdev->vport_lock);
11474 	spin_lock_init(&hdev->fd_rule_lock);
11475 	sema_init(&hdev->reset_sem, 1);
11476 
11477 	ret = hclge_pci_init(hdev);
11478 	if (ret)
11479 		goto out;
11480 
11481 	/* Firmware command queue initialize */
11482 	ret = hclge_cmd_queue_init(hdev);
11483 	if (ret)
11484 		goto err_pci_uninit;
11485 
11486 	/* Firmware command initialize */
11487 	ret = hclge_cmd_init(hdev);
11488 	if (ret)
11489 		goto err_cmd_uninit;
11490 
11491 	ret = hclge_get_cap(hdev);
11492 	if (ret)
11493 		goto err_cmd_uninit;
11494 
11495 	ret = hclge_query_dev_specs(hdev);
11496 	if (ret) {
11497 		dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11498 			ret);
11499 		goto err_cmd_uninit;
11500 	}
11501 
11502 	ret = hclge_configure(hdev);
11503 	if (ret) {
11504 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11505 		goto err_cmd_uninit;
11506 	}
11507 
11508 	ret = hclge_init_msi(hdev);
11509 	if (ret) {
11510 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11511 		goto err_cmd_uninit;
11512 	}
11513 
11514 	ret = hclge_misc_irq_init(hdev);
11515 	if (ret)
11516 		goto err_msi_uninit;
11517 
11518 	ret = hclge_alloc_tqps(hdev);
11519 	if (ret) {
11520 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11521 		goto err_msi_irq_uninit;
11522 	}
11523 
11524 	ret = hclge_alloc_vport(hdev);
11525 	if (ret)
11526 		goto err_msi_irq_uninit;
11527 
11528 	ret = hclge_map_tqp(hdev);
11529 	if (ret)
11530 		goto err_msi_irq_uninit;
11531 
11532 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11533 	    !hnae3_dev_phy_imp_supported(hdev)) {
11534 		ret = hclge_mac_mdio_config(hdev);
11535 		if (ret)
11536 			goto err_msi_irq_uninit;
11537 	}
11538 
11539 	ret = hclge_init_umv_space(hdev);
11540 	if (ret)
11541 		goto err_mdiobus_unreg;
11542 
11543 	ret = hclge_mac_init(hdev);
11544 	if (ret) {
11545 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11546 		goto err_mdiobus_unreg;
11547 	}
11548 
11549 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11550 	if (ret) {
11551 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11552 		goto err_mdiobus_unreg;
11553 	}
11554 
11555 	ret = hclge_config_gro(hdev, true);
11556 	if (ret)
11557 		goto err_mdiobus_unreg;
11558 
11559 	ret = hclge_init_vlan_config(hdev);
11560 	if (ret) {
11561 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11562 		goto err_mdiobus_unreg;
11563 	}
11564 
11565 	ret = hclge_tm_schd_init(hdev);
11566 	if (ret) {
11567 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11568 		goto err_mdiobus_unreg;
11569 	}
11570 
11571 	ret = hclge_rss_init_cfg(hdev);
11572 	if (ret) {
11573 		dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11574 		goto err_mdiobus_unreg;
11575 	}
11576 
11577 	ret = hclge_rss_init_hw(hdev);
11578 	if (ret) {
11579 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11580 		goto err_mdiobus_unreg;
11581 	}
11582 
11583 	ret = init_mgr_tbl(hdev);
11584 	if (ret) {
11585 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11586 		goto err_mdiobus_unreg;
11587 	}
11588 
11589 	ret = hclge_init_fd_config(hdev);
11590 	if (ret) {
11591 		dev_err(&pdev->dev,
11592 			"fd table init fail, ret=%d\n", ret);
11593 		goto err_mdiobus_unreg;
11594 	}
11595 
11596 	ret = hclge_ptp_init(hdev);
11597 	if (ret)
11598 		goto err_mdiobus_unreg;
11599 
11600 	INIT_KFIFO(hdev->mac_tnl_log);
11601 
11602 	hclge_dcb_ops_set(hdev);
11603 
11604 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11605 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11606 
11607 	/* Setup affinity after service timer setup because add_timer_on
11608 	 * is called in affinity notify.
11609 	 */
11610 	hclge_misc_affinity_setup(hdev);
11611 
11612 	hclge_clear_all_event_cause(hdev);
11613 	hclge_clear_resetting_state(hdev);
11614 
11615 	/* Log and clear the hw errors those already occurred */
11616 	if (hnae3_dev_ras_imp_supported(hdev))
11617 		hclge_handle_occurred_error(hdev);
11618 	else
11619 		hclge_handle_all_hns_hw_errors(ae_dev);
11620 
11621 	/* request delayed reset for the error recovery because an immediate
11622 	 * global reset on a PF affecting pending initialization of other PFs
11623 	 */
11624 	if (ae_dev->hw_err_reset_req) {
11625 		enum hnae3_reset_type reset_level;
11626 
11627 		reset_level = hclge_get_reset_level(ae_dev,
11628 						    &ae_dev->hw_err_reset_req);
11629 		hclge_set_def_reset_request(ae_dev, reset_level);
11630 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11631 	}
11632 
11633 	hclge_init_rxd_adv_layout(hdev);
11634 
11635 	/* Enable MISC vector(vector0) */
11636 	hclge_enable_vector(&hdev->misc_vector, true);
11637 
11638 	hclge_state_init(hdev);
11639 	hdev->last_reset_time = jiffies;
11640 
11641 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11642 		 HCLGE_DRIVER_NAME);
11643 
11644 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11645 
11646 	return 0;
11647 
11648 err_mdiobus_unreg:
11649 	if (hdev->hw.mac.phydev)
11650 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
11651 err_msi_irq_uninit:
11652 	hclge_misc_irq_uninit(hdev);
11653 err_msi_uninit:
11654 	pci_free_irq_vectors(pdev);
11655 err_cmd_uninit:
11656 	hclge_cmd_uninit(hdev);
11657 err_pci_uninit:
11658 	pcim_iounmap(pdev, hdev->hw.io_base);
11659 	pci_clear_master(pdev);
11660 	pci_release_regions(pdev);
11661 	pci_disable_device(pdev);
11662 out:
11663 	mutex_destroy(&hdev->vport_lock);
11664 	return ret;
11665 }
11666 
11667 static void hclge_stats_clear(struct hclge_dev *hdev)
11668 {
11669 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11670 }
11671 
11672 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11673 {
11674 	return hclge_config_switch_param(hdev, vf, enable,
11675 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11676 }
11677 
11678 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11679 {
11680 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11681 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
11682 					  enable, vf);
11683 }
11684 
11685 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11686 {
11687 	int ret;
11688 
11689 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11690 	if (ret) {
11691 		dev_err(&hdev->pdev->dev,
11692 			"Set vf %d mac spoof check %s failed, ret=%d\n",
11693 			vf, enable ? "on" : "off", ret);
11694 		return ret;
11695 	}
11696 
11697 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11698 	if (ret)
11699 		dev_err(&hdev->pdev->dev,
11700 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
11701 			vf, enable ? "on" : "off", ret);
11702 
11703 	return ret;
11704 }
11705 
11706 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11707 				 bool enable)
11708 {
11709 	struct hclge_vport *vport = hclge_get_vport(handle);
11710 	struct hclge_dev *hdev = vport->back;
11711 	u32 new_spoofchk = enable ? 1 : 0;
11712 	int ret;
11713 
11714 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11715 		return -EOPNOTSUPP;
11716 
11717 	vport = hclge_get_vf_vport(hdev, vf);
11718 	if (!vport)
11719 		return -EINVAL;
11720 
11721 	if (vport->vf_info.spoofchk == new_spoofchk)
11722 		return 0;
11723 
11724 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11725 		dev_warn(&hdev->pdev->dev,
11726 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11727 			 vf);
11728 	else if (enable && hclge_is_umv_space_full(vport, true))
11729 		dev_warn(&hdev->pdev->dev,
11730 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11731 			 vf);
11732 
11733 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11734 	if (ret)
11735 		return ret;
11736 
11737 	vport->vf_info.spoofchk = new_spoofchk;
11738 	return 0;
11739 }
11740 
11741 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11742 {
11743 	struct hclge_vport *vport = hdev->vport;
11744 	int ret;
11745 	int i;
11746 
11747 	if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11748 		return 0;
11749 
11750 	/* resume the vf spoof check state after reset */
11751 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11752 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11753 					       vport->vf_info.spoofchk);
11754 		if (ret)
11755 			return ret;
11756 
11757 		vport++;
11758 	}
11759 
11760 	return 0;
11761 }
11762 
11763 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11764 {
11765 	struct hclge_vport *vport = hclge_get_vport(handle);
11766 	struct hclge_dev *hdev = vport->back;
11767 	u32 new_trusted = enable ? 1 : 0;
11768 
11769 	vport = hclge_get_vf_vport(hdev, vf);
11770 	if (!vport)
11771 		return -EINVAL;
11772 
11773 	if (vport->vf_info.trusted == new_trusted)
11774 		return 0;
11775 
11776 	vport->vf_info.trusted = new_trusted;
11777 	set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11778 	hclge_task_schedule(hdev, 0);
11779 
11780 	return 0;
11781 }
11782 
11783 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11784 {
11785 	int ret;
11786 	int vf;
11787 
11788 	/* reset vf rate to default value */
11789 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11790 		struct hclge_vport *vport = &hdev->vport[vf];
11791 
11792 		vport->vf_info.max_tx_rate = 0;
11793 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11794 		if (ret)
11795 			dev_err(&hdev->pdev->dev,
11796 				"vf%d failed to reset to default, ret=%d\n",
11797 				vf - HCLGE_VF_VPORT_START_NUM, ret);
11798 	}
11799 }
11800 
11801 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11802 				     int min_tx_rate, int max_tx_rate)
11803 {
11804 	if (min_tx_rate != 0 ||
11805 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11806 		dev_err(&hdev->pdev->dev,
11807 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11808 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11809 		return -EINVAL;
11810 	}
11811 
11812 	return 0;
11813 }
11814 
11815 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11816 			     int min_tx_rate, int max_tx_rate, bool force)
11817 {
11818 	struct hclge_vport *vport = hclge_get_vport(handle);
11819 	struct hclge_dev *hdev = vport->back;
11820 	int ret;
11821 
11822 	ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11823 	if (ret)
11824 		return ret;
11825 
11826 	vport = hclge_get_vf_vport(hdev, vf);
11827 	if (!vport)
11828 		return -EINVAL;
11829 
11830 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11831 		return 0;
11832 
11833 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11834 	if (ret)
11835 		return ret;
11836 
11837 	vport->vf_info.max_tx_rate = max_tx_rate;
11838 
11839 	return 0;
11840 }
11841 
11842 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11843 {
11844 	struct hnae3_handle *handle = &hdev->vport->nic;
11845 	struct hclge_vport *vport;
11846 	int ret;
11847 	int vf;
11848 
11849 	/* resume the vf max_tx_rate after reset */
11850 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11851 		vport = hclge_get_vf_vport(hdev, vf);
11852 		if (!vport)
11853 			return -EINVAL;
11854 
11855 		/* zero means max rate, after reset, firmware already set it to
11856 		 * max rate, so just continue.
11857 		 */
11858 		if (!vport->vf_info.max_tx_rate)
11859 			continue;
11860 
11861 		ret = hclge_set_vf_rate(handle, vf, 0,
11862 					vport->vf_info.max_tx_rate, true);
11863 		if (ret) {
11864 			dev_err(&hdev->pdev->dev,
11865 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
11866 				vf, vport->vf_info.max_tx_rate, ret);
11867 			return ret;
11868 		}
11869 	}
11870 
11871 	return 0;
11872 }
11873 
11874 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11875 {
11876 	struct hclge_vport *vport = hdev->vport;
11877 	int i;
11878 
11879 	for (i = 0; i < hdev->num_alloc_vport; i++) {
11880 		hclge_vport_stop(vport);
11881 		vport++;
11882 	}
11883 }
11884 
11885 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11886 {
11887 	struct hclge_dev *hdev = ae_dev->priv;
11888 	struct pci_dev *pdev = ae_dev->pdev;
11889 	int ret;
11890 
11891 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
11892 
11893 	hclge_stats_clear(hdev);
11894 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11895 	 * so here should not clean table in memory.
11896 	 */
11897 	if (hdev->reset_type == HNAE3_IMP_RESET ||
11898 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
11899 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11900 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11901 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11902 		hclge_reset_umv_space(hdev);
11903 	}
11904 
11905 	ret = hclge_cmd_init(hdev);
11906 	if (ret) {
11907 		dev_err(&pdev->dev, "Cmd queue init failed\n");
11908 		return ret;
11909 	}
11910 
11911 	ret = hclge_map_tqp(hdev);
11912 	if (ret) {
11913 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11914 		return ret;
11915 	}
11916 
11917 	ret = hclge_mac_init(hdev);
11918 	if (ret) {
11919 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11920 		return ret;
11921 	}
11922 
11923 	ret = hclge_tp_port_init(hdev);
11924 	if (ret) {
11925 		dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11926 			ret);
11927 		return ret;
11928 	}
11929 
11930 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11931 	if (ret) {
11932 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11933 		return ret;
11934 	}
11935 
11936 	ret = hclge_config_gro(hdev, true);
11937 	if (ret)
11938 		return ret;
11939 
11940 	ret = hclge_init_vlan_config(hdev);
11941 	if (ret) {
11942 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11943 		return ret;
11944 	}
11945 
11946 	ret = hclge_tm_init_hw(hdev, true);
11947 	if (ret) {
11948 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11949 		return ret;
11950 	}
11951 
11952 	ret = hclge_rss_init_hw(hdev);
11953 	if (ret) {
11954 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11955 		return ret;
11956 	}
11957 
11958 	ret = init_mgr_tbl(hdev);
11959 	if (ret) {
11960 		dev_err(&pdev->dev,
11961 			"failed to reinit manager table, ret = %d\n", ret);
11962 		return ret;
11963 	}
11964 
11965 	ret = hclge_init_fd_config(hdev);
11966 	if (ret) {
11967 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11968 		return ret;
11969 	}
11970 
11971 	ret = hclge_ptp_init(hdev);
11972 	if (ret)
11973 		return ret;
11974 
11975 	/* Log and clear the hw errors those already occurred */
11976 	if (hnae3_dev_ras_imp_supported(hdev))
11977 		hclge_handle_occurred_error(hdev);
11978 	else
11979 		hclge_handle_all_hns_hw_errors(ae_dev);
11980 
11981 	/* Re-enable the hw error interrupts because
11982 	 * the interrupts get disabled on global reset.
11983 	 */
11984 	ret = hclge_config_nic_hw_error(hdev, true);
11985 	if (ret) {
11986 		dev_err(&pdev->dev,
11987 			"fail(%d) to re-enable NIC hw error interrupts\n",
11988 			ret);
11989 		return ret;
11990 	}
11991 
11992 	if (hdev->roce_client) {
11993 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
11994 		if (ret) {
11995 			dev_err(&pdev->dev,
11996 				"fail(%d) to re-enable roce ras interrupts\n",
11997 				ret);
11998 			return ret;
11999 		}
12000 	}
12001 
12002 	hclge_reset_vport_state(hdev);
12003 	ret = hclge_reset_vport_spoofchk(hdev);
12004 	if (ret)
12005 		return ret;
12006 
12007 	ret = hclge_resume_vf_rate(hdev);
12008 	if (ret)
12009 		return ret;
12010 
12011 	hclge_init_rxd_adv_layout(hdev);
12012 
12013 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12014 		 HCLGE_DRIVER_NAME);
12015 
12016 	return 0;
12017 }
12018 
12019 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12020 {
12021 	struct hclge_dev *hdev = ae_dev->priv;
12022 	struct hclge_mac *mac = &hdev->hw.mac;
12023 
12024 	hclge_reset_vf_rate(hdev);
12025 	hclge_clear_vf_vlan(hdev);
12026 	hclge_misc_affinity_teardown(hdev);
12027 	hclge_state_uninit(hdev);
12028 	hclge_ptp_uninit(hdev);
12029 	hclge_uninit_rxd_adv_layout(hdev);
12030 	hclge_uninit_mac_table(hdev);
12031 	hclge_del_all_fd_entries(hdev);
12032 
12033 	if (mac->phydev)
12034 		mdiobus_unregister(mac->mdio_bus);
12035 
12036 	/* Disable MISC vector(vector0) */
12037 	hclge_enable_vector(&hdev->misc_vector, false);
12038 	synchronize_irq(hdev->misc_vector.vector_irq);
12039 
12040 	/* Disable all hw interrupts */
12041 	hclge_config_mac_tnl_int(hdev, false);
12042 	hclge_config_nic_hw_error(hdev, false);
12043 	hclge_config_rocee_ras_interrupt(hdev, false);
12044 
12045 	hclge_cmd_uninit(hdev);
12046 	hclge_misc_irq_uninit(hdev);
12047 	hclge_pci_uninit(hdev);
12048 	mutex_destroy(&hdev->vport_lock);
12049 	hclge_uninit_vport_vlan_table(hdev);
12050 	ae_dev->priv = NULL;
12051 }
12052 
12053 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12054 {
12055 	struct hclge_vport *vport = hclge_get_vport(handle);
12056 	struct hclge_dev *hdev = vport->back;
12057 
12058 	return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12059 }
12060 
12061 static void hclge_get_channels(struct hnae3_handle *handle,
12062 			       struct ethtool_channels *ch)
12063 {
12064 	ch->max_combined = hclge_get_max_channels(handle);
12065 	ch->other_count = 1;
12066 	ch->max_other = 1;
12067 	ch->combined_count = handle->kinfo.rss_size;
12068 }
12069 
12070 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12071 					u16 *alloc_tqps, u16 *max_rss_size)
12072 {
12073 	struct hclge_vport *vport = hclge_get_vport(handle);
12074 	struct hclge_dev *hdev = vport->back;
12075 
12076 	*alloc_tqps = vport->alloc_tqps;
12077 	*max_rss_size = hdev->pf_rss_size_max;
12078 }
12079 
12080 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12081 			      bool rxfh_configured)
12082 {
12083 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12084 	struct hclge_vport *vport = hclge_get_vport(handle);
12085 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12086 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12087 	struct hclge_dev *hdev = vport->back;
12088 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12089 	u16 cur_rss_size = kinfo->rss_size;
12090 	u16 cur_tqps = kinfo->num_tqps;
12091 	u16 tc_valid[HCLGE_MAX_TC_NUM];
12092 	u16 roundup_size;
12093 	u32 *rss_indir;
12094 	unsigned int i;
12095 	int ret;
12096 
12097 	kinfo->req_rss_size = new_tqps_num;
12098 
12099 	ret = hclge_tm_vport_map_update(hdev);
12100 	if (ret) {
12101 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12102 		return ret;
12103 	}
12104 
12105 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
12106 	roundup_size = ilog2(roundup_size);
12107 	/* Set the RSS TC mode according to the new RSS size */
12108 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12109 		tc_valid[i] = 0;
12110 
12111 		if (!(hdev->hw_tc_map & BIT(i)))
12112 			continue;
12113 
12114 		tc_valid[i] = 1;
12115 		tc_size[i] = roundup_size;
12116 		tc_offset[i] = kinfo->rss_size * i;
12117 	}
12118 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12119 	if (ret)
12120 		return ret;
12121 
12122 	/* RSS indirection table has been configured by user */
12123 	if (rxfh_configured)
12124 		goto out;
12125 
12126 	/* Reinitializes the rss indirect table according to the new RSS size */
12127 	rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12128 			    GFP_KERNEL);
12129 	if (!rss_indir)
12130 		return -ENOMEM;
12131 
12132 	for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12133 		rss_indir[i] = i % kinfo->rss_size;
12134 
12135 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12136 	if (ret)
12137 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12138 			ret);
12139 
12140 	kfree(rss_indir);
12141 
12142 out:
12143 	if (!ret)
12144 		dev_info(&hdev->pdev->dev,
12145 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12146 			 cur_rss_size, kinfo->rss_size,
12147 			 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12148 
12149 	return ret;
12150 }
12151 
12152 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12153 			      u32 *regs_num_64_bit)
12154 {
12155 	struct hclge_desc desc;
12156 	u32 total_num;
12157 	int ret;
12158 
12159 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12160 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12161 	if (ret) {
12162 		dev_err(&hdev->pdev->dev,
12163 			"Query register number cmd failed, ret = %d.\n", ret);
12164 		return ret;
12165 	}
12166 
12167 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
12168 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
12169 
12170 	total_num = *regs_num_32_bit + *regs_num_64_bit;
12171 	if (!total_num)
12172 		return -EINVAL;
12173 
12174 	return 0;
12175 }
12176 
12177 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12178 				 void *data)
12179 {
12180 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12181 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12182 
12183 	struct hclge_desc *desc;
12184 	u32 *reg_val = data;
12185 	__le32 *desc_data;
12186 	int nodata_num;
12187 	int cmd_num;
12188 	int i, k, n;
12189 	int ret;
12190 
12191 	if (regs_num == 0)
12192 		return 0;
12193 
12194 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12195 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12196 			       HCLGE_32_BIT_REG_RTN_DATANUM);
12197 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12198 	if (!desc)
12199 		return -ENOMEM;
12200 
12201 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12202 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12203 	if (ret) {
12204 		dev_err(&hdev->pdev->dev,
12205 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
12206 		kfree(desc);
12207 		return ret;
12208 	}
12209 
12210 	for (i = 0; i < cmd_num; i++) {
12211 		if (i == 0) {
12212 			desc_data = (__le32 *)(&desc[i].data[0]);
12213 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12214 		} else {
12215 			desc_data = (__le32 *)(&desc[i]);
12216 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
12217 		}
12218 		for (k = 0; k < n; k++) {
12219 			*reg_val++ = le32_to_cpu(*desc_data++);
12220 
12221 			regs_num--;
12222 			if (!regs_num)
12223 				break;
12224 		}
12225 	}
12226 
12227 	kfree(desc);
12228 	return 0;
12229 }
12230 
12231 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12232 				 void *data)
12233 {
12234 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12235 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12236 
12237 	struct hclge_desc *desc;
12238 	u64 *reg_val = data;
12239 	__le64 *desc_data;
12240 	int nodata_len;
12241 	int cmd_num;
12242 	int i, k, n;
12243 	int ret;
12244 
12245 	if (regs_num == 0)
12246 		return 0;
12247 
12248 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12249 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12250 			       HCLGE_64_BIT_REG_RTN_DATANUM);
12251 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12252 	if (!desc)
12253 		return -ENOMEM;
12254 
12255 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12256 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12257 	if (ret) {
12258 		dev_err(&hdev->pdev->dev,
12259 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
12260 		kfree(desc);
12261 		return ret;
12262 	}
12263 
12264 	for (i = 0; i < cmd_num; i++) {
12265 		if (i == 0) {
12266 			desc_data = (__le64 *)(&desc[i].data[0]);
12267 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12268 		} else {
12269 			desc_data = (__le64 *)(&desc[i]);
12270 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
12271 		}
12272 		for (k = 0; k < n; k++) {
12273 			*reg_val++ = le64_to_cpu(*desc_data++);
12274 
12275 			regs_num--;
12276 			if (!regs_num)
12277 				break;
12278 		}
12279 	}
12280 
12281 	kfree(desc);
12282 	return 0;
12283 }
12284 
12285 #define MAX_SEPARATE_NUM	4
12286 #define SEPARATOR_VALUE		0xFDFCFBFA
12287 #define REG_NUM_PER_LINE	4
12288 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
12289 #define REG_SEPARATOR_LINE	1
12290 #define REG_NUM_REMAIN_MASK	3
12291 
12292 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12293 {
12294 	int i;
12295 
12296 	/* initialize command BD except the last one */
12297 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12298 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12299 					   true);
12300 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12301 	}
12302 
12303 	/* initialize the last command BD */
12304 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12305 
12306 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12307 }
12308 
12309 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12310 				    int *bd_num_list,
12311 				    u32 type_num)
12312 {
12313 	u32 entries_per_desc, desc_index, index, offset, i;
12314 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12315 	int ret;
12316 
12317 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
12318 	if (ret) {
12319 		dev_err(&hdev->pdev->dev,
12320 			"Get dfx bd num fail, status is %d.\n", ret);
12321 		return ret;
12322 	}
12323 
12324 	entries_per_desc = ARRAY_SIZE(desc[0].data);
12325 	for (i = 0; i < type_num; i++) {
12326 		offset = hclge_dfx_bd_offset_list[i];
12327 		index = offset % entries_per_desc;
12328 		desc_index = offset / entries_per_desc;
12329 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12330 	}
12331 
12332 	return ret;
12333 }
12334 
12335 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12336 				  struct hclge_desc *desc_src, int bd_num,
12337 				  enum hclge_opcode_type cmd)
12338 {
12339 	struct hclge_desc *desc = desc_src;
12340 	int i, ret;
12341 
12342 	hclge_cmd_setup_basic_desc(desc, cmd, true);
12343 	for (i = 0; i < bd_num - 1; i++) {
12344 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12345 		desc++;
12346 		hclge_cmd_setup_basic_desc(desc, cmd, true);
12347 	}
12348 
12349 	desc = desc_src;
12350 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12351 	if (ret)
12352 		dev_err(&hdev->pdev->dev,
12353 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12354 			cmd, ret);
12355 
12356 	return ret;
12357 }
12358 
12359 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12360 				    void *data)
12361 {
12362 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12363 	struct hclge_desc *desc = desc_src;
12364 	u32 *reg = data;
12365 
12366 	entries_per_desc = ARRAY_SIZE(desc->data);
12367 	reg_num = entries_per_desc * bd_num;
12368 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12369 	for (i = 0; i < reg_num; i++) {
12370 		index = i % entries_per_desc;
12371 		desc_index = i / entries_per_desc;
12372 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
12373 	}
12374 	for (i = 0; i < separator_num; i++)
12375 		*reg++ = SEPARATOR_VALUE;
12376 
12377 	return reg_num + separator_num;
12378 }
12379 
12380 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12381 {
12382 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12383 	int data_len_per_desc, bd_num, i;
12384 	int *bd_num_list;
12385 	u32 data_len;
12386 	int ret;
12387 
12388 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12389 	if (!bd_num_list)
12390 		return -ENOMEM;
12391 
12392 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12393 	if (ret) {
12394 		dev_err(&hdev->pdev->dev,
12395 			"Get dfx reg bd num fail, status is %d.\n", ret);
12396 		goto out;
12397 	}
12398 
12399 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
12400 	*len = 0;
12401 	for (i = 0; i < dfx_reg_type_num; i++) {
12402 		bd_num = bd_num_list[i];
12403 		data_len = data_len_per_desc * bd_num;
12404 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12405 	}
12406 
12407 out:
12408 	kfree(bd_num_list);
12409 	return ret;
12410 }
12411 
12412 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12413 {
12414 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12415 	int bd_num, bd_num_max, buf_len, i;
12416 	struct hclge_desc *desc_src;
12417 	int *bd_num_list;
12418 	u32 *reg = data;
12419 	int ret;
12420 
12421 	bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12422 	if (!bd_num_list)
12423 		return -ENOMEM;
12424 
12425 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12426 	if (ret) {
12427 		dev_err(&hdev->pdev->dev,
12428 			"Get dfx reg bd num fail, status is %d.\n", ret);
12429 		goto out;
12430 	}
12431 
12432 	bd_num_max = bd_num_list[0];
12433 	for (i = 1; i < dfx_reg_type_num; i++)
12434 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12435 
12436 	buf_len = sizeof(*desc_src) * bd_num_max;
12437 	desc_src = kzalloc(buf_len, GFP_KERNEL);
12438 	if (!desc_src) {
12439 		ret = -ENOMEM;
12440 		goto out;
12441 	}
12442 
12443 	for (i = 0; i < dfx_reg_type_num; i++) {
12444 		bd_num = bd_num_list[i];
12445 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12446 					     hclge_dfx_reg_opcode_list[i]);
12447 		if (ret) {
12448 			dev_err(&hdev->pdev->dev,
12449 				"Get dfx reg fail, status is %d.\n", ret);
12450 			break;
12451 		}
12452 
12453 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12454 	}
12455 
12456 	kfree(desc_src);
12457 out:
12458 	kfree(bd_num_list);
12459 	return ret;
12460 }
12461 
12462 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12463 			      struct hnae3_knic_private_info *kinfo)
12464 {
12465 #define HCLGE_RING_REG_OFFSET		0x200
12466 #define HCLGE_RING_INT_REG_OFFSET	0x4
12467 
12468 	int i, j, reg_num, separator_num;
12469 	int data_num_sum;
12470 	u32 *reg = data;
12471 
12472 	/* fetching per-PF registers valus from PF PCIe register space */
12473 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12474 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12475 	for (i = 0; i < reg_num; i++)
12476 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12477 	for (i = 0; i < separator_num; i++)
12478 		*reg++ = SEPARATOR_VALUE;
12479 	data_num_sum = reg_num + separator_num;
12480 
12481 	reg_num = ARRAY_SIZE(common_reg_addr_list);
12482 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12483 	for (i = 0; i < reg_num; i++)
12484 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12485 	for (i = 0; i < separator_num; i++)
12486 		*reg++ = SEPARATOR_VALUE;
12487 	data_num_sum += reg_num + separator_num;
12488 
12489 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
12490 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12491 	for (j = 0; j < kinfo->num_tqps; j++) {
12492 		for (i = 0; i < reg_num; i++)
12493 			*reg++ = hclge_read_dev(&hdev->hw,
12494 						ring_reg_addr_list[i] +
12495 						HCLGE_RING_REG_OFFSET * j);
12496 		for (i = 0; i < separator_num; i++)
12497 			*reg++ = SEPARATOR_VALUE;
12498 	}
12499 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12500 
12501 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12502 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12503 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
12504 		for (i = 0; i < reg_num; i++)
12505 			*reg++ = hclge_read_dev(&hdev->hw,
12506 						tqp_intr_reg_addr_list[i] +
12507 						HCLGE_RING_INT_REG_OFFSET * j);
12508 		for (i = 0; i < separator_num; i++)
12509 			*reg++ = SEPARATOR_VALUE;
12510 	}
12511 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12512 
12513 	return data_num_sum;
12514 }
12515 
12516 static int hclge_get_regs_len(struct hnae3_handle *handle)
12517 {
12518 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12519 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12520 	struct hclge_vport *vport = hclge_get_vport(handle);
12521 	struct hclge_dev *hdev = vport->back;
12522 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12523 	int regs_lines_32_bit, regs_lines_64_bit;
12524 	int ret;
12525 
12526 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12527 	if (ret) {
12528 		dev_err(&hdev->pdev->dev,
12529 			"Get register number failed, ret = %d.\n", ret);
12530 		return ret;
12531 	}
12532 
12533 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12534 	if (ret) {
12535 		dev_err(&hdev->pdev->dev,
12536 			"Get dfx reg len failed, ret = %d.\n", ret);
12537 		return ret;
12538 	}
12539 
12540 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12541 		REG_SEPARATOR_LINE;
12542 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12543 		REG_SEPARATOR_LINE;
12544 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12545 		REG_SEPARATOR_LINE;
12546 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12547 		REG_SEPARATOR_LINE;
12548 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12549 		REG_SEPARATOR_LINE;
12550 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12551 		REG_SEPARATOR_LINE;
12552 
12553 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12554 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12555 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12556 }
12557 
12558 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12559 			   void *data)
12560 {
12561 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12562 	struct hclge_vport *vport = hclge_get_vport(handle);
12563 	struct hclge_dev *hdev = vport->back;
12564 	u32 regs_num_32_bit, regs_num_64_bit;
12565 	int i, reg_num, separator_num, ret;
12566 	u32 *reg = data;
12567 
12568 	*version = hdev->fw_version;
12569 
12570 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12571 	if (ret) {
12572 		dev_err(&hdev->pdev->dev,
12573 			"Get register number failed, ret = %d.\n", ret);
12574 		return;
12575 	}
12576 
12577 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12578 
12579 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12580 	if (ret) {
12581 		dev_err(&hdev->pdev->dev,
12582 			"Get 32 bit register failed, ret = %d.\n", ret);
12583 		return;
12584 	}
12585 	reg_num = regs_num_32_bit;
12586 	reg += reg_num;
12587 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12588 	for (i = 0; i < separator_num; i++)
12589 		*reg++ = SEPARATOR_VALUE;
12590 
12591 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12592 	if (ret) {
12593 		dev_err(&hdev->pdev->dev,
12594 			"Get 64 bit register failed, ret = %d.\n", ret);
12595 		return;
12596 	}
12597 	reg_num = regs_num_64_bit * 2;
12598 	reg += reg_num;
12599 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12600 	for (i = 0; i < separator_num; i++)
12601 		*reg++ = SEPARATOR_VALUE;
12602 
12603 	ret = hclge_get_dfx_reg(hdev, reg);
12604 	if (ret)
12605 		dev_err(&hdev->pdev->dev,
12606 			"Get dfx register failed, ret = %d.\n", ret);
12607 }
12608 
12609 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12610 {
12611 	struct hclge_set_led_state_cmd *req;
12612 	struct hclge_desc desc;
12613 	int ret;
12614 
12615 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12616 
12617 	req = (struct hclge_set_led_state_cmd *)desc.data;
12618 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12619 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12620 
12621 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12622 	if (ret)
12623 		dev_err(&hdev->pdev->dev,
12624 			"Send set led state cmd error, ret =%d\n", ret);
12625 
12626 	return ret;
12627 }
12628 
12629 enum hclge_led_status {
12630 	HCLGE_LED_OFF,
12631 	HCLGE_LED_ON,
12632 	HCLGE_LED_NO_CHANGE = 0xFF,
12633 };
12634 
12635 static int hclge_set_led_id(struct hnae3_handle *handle,
12636 			    enum ethtool_phys_id_state status)
12637 {
12638 	struct hclge_vport *vport = hclge_get_vport(handle);
12639 	struct hclge_dev *hdev = vport->back;
12640 
12641 	switch (status) {
12642 	case ETHTOOL_ID_ACTIVE:
12643 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
12644 	case ETHTOOL_ID_INACTIVE:
12645 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12646 	default:
12647 		return -EINVAL;
12648 	}
12649 }
12650 
12651 static void hclge_get_link_mode(struct hnae3_handle *handle,
12652 				unsigned long *supported,
12653 				unsigned long *advertising)
12654 {
12655 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12656 	struct hclge_vport *vport = hclge_get_vport(handle);
12657 	struct hclge_dev *hdev = vport->back;
12658 	unsigned int idx = 0;
12659 
12660 	for (; idx < size; idx++) {
12661 		supported[idx] = hdev->hw.mac.supported[idx];
12662 		advertising[idx] = hdev->hw.mac.advertising[idx];
12663 	}
12664 }
12665 
12666 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12667 {
12668 	struct hclge_vport *vport = hclge_get_vport(handle);
12669 	struct hclge_dev *hdev = vport->back;
12670 
12671 	return hclge_config_gro(hdev, enable);
12672 }
12673 
12674 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12675 {
12676 	struct hclge_vport *vport = &hdev->vport[0];
12677 	struct hnae3_handle *handle = &vport->nic;
12678 	u8 tmp_flags;
12679 	int ret;
12680 	u16 i;
12681 
12682 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12683 		set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12684 		vport->last_promisc_flags = vport->overflow_promisc_flags;
12685 	}
12686 
12687 	if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12688 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12689 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12690 					     tmp_flags & HNAE3_MPE);
12691 		if (!ret) {
12692 			clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12693 				  &vport->state);
12694 			set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12695 				&vport->state);
12696 		}
12697 	}
12698 
12699 	for (i = 1; i < hdev->num_alloc_vport; i++) {
12700 		bool uc_en = false;
12701 		bool mc_en = false;
12702 		bool bc_en;
12703 
12704 		vport = &hdev->vport[i];
12705 
12706 		if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12707 					&vport->state))
12708 			continue;
12709 
12710 		if (vport->vf_info.trusted) {
12711 			uc_en = vport->vf_info.request_uc_en > 0;
12712 			mc_en = vport->vf_info.request_mc_en > 0;
12713 		}
12714 		bc_en = vport->vf_info.request_bc_en > 0;
12715 
12716 		ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12717 						 mc_en, bc_en);
12718 		if (ret) {
12719 			set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12720 				&vport->state);
12721 			return;
12722 		}
12723 		hclge_set_vport_vlan_fltr_change(vport);
12724 	}
12725 }
12726 
12727 static bool hclge_module_existed(struct hclge_dev *hdev)
12728 {
12729 	struct hclge_desc desc;
12730 	u32 existed;
12731 	int ret;
12732 
12733 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12734 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12735 	if (ret) {
12736 		dev_err(&hdev->pdev->dev,
12737 			"failed to get SFP exist state, ret = %d\n", ret);
12738 		return false;
12739 	}
12740 
12741 	existed = le32_to_cpu(desc.data[0]);
12742 
12743 	return existed != 0;
12744 }
12745 
12746 /* need 6 bds(total 140 bytes) in one reading
12747  * return the number of bytes actually read, 0 means read failed.
12748  */
12749 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12750 				     u32 len, u8 *data)
12751 {
12752 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12753 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12754 	u16 read_len;
12755 	u16 copy_len;
12756 	int ret;
12757 	int i;
12758 
12759 	/* setup all 6 bds to read module eeprom info. */
12760 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12761 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12762 					   true);
12763 
12764 		/* bd0~bd4 need next flag */
12765 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12766 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12767 	}
12768 
12769 	/* setup bd0, this bd contains offset and read length. */
12770 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12771 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12772 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12773 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
12774 
12775 	ret = hclge_cmd_send(&hdev->hw, desc, i);
12776 	if (ret) {
12777 		dev_err(&hdev->pdev->dev,
12778 			"failed to get SFP eeprom info, ret = %d\n", ret);
12779 		return 0;
12780 	}
12781 
12782 	/* copy sfp info from bd0 to out buffer. */
12783 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12784 	memcpy(data, sfp_info_bd0->data, copy_len);
12785 	read_len = copy_len;
12786 
12787 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
12788 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12789 		if (read_len >= len)
12790 			return read_len;
12791 
12792 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12793 		memcpy(data + read_len, desc[i].data, copy_len);
12794 		read_len += copy_len;
12795 	}
12796 
12797 	return read_len;
12798 }
12799 
12800 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12801 				   u32 len, u8 *data)
12802 {
12803 	struct hclge_vport *vport = hclge_get_vport(handle);
12804 	struct hclge_dev *hdev = vport->back;
12805 	u32 read_len = 0;
12806 	u16 data_len;
12807 
12808 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12809 		return -EOPNOTSUPP;
12810 
12811 	if (!hclge_module_existed(hdev))
12812 		return -ENXIO;
12813 
12814 	while (read_len < len) {
12815 		data_len = hclge_get_sfp_eeprom_info(hdev,
12816 						     offset + read_len,
12817 						     len - read_len,
12818 						     data + read_len);
12819 		if (!data_len)
12820 			return -EIO;
12821 
12822 		read_len += data_len;
12823 	}
12824 
12825 	return 0;
12826 }
12827 
12828 static const struct hnae3_ae_ops hclge_ops = {
12829 	.init_ae_dev = hclge_init_ae_dev,
12830 	.uninit_ae_dev = hclge_uninit_ae_dev,
12831 	.reset_prepare = hclge_reset_prepare_general,
12832 	.reset_done = hclge_reset_done,
12833 	.init_client_instance = hclge_init_client_instance,
12834 	.uninit_client_instance = hclge_uninit_client_instance,
12835 	.map_ring_to_vector = hclge_map_ring_to_vector,
12836 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12837 	.get_vector = hclge_get_vector,
12838 	.put_vector = hclge_put_vector,
12839 	.set_promisc_mode = hclge_set_promisc_mode,
12840 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
12841 	.set_loopback = hclge_set_loopback,
12842 	.start = hclge_ae_start,
12843 	.stop = hclge_ae_stop,
12844 	.client_start = hclge_client_start,
12845 	.client_stop = hclge_client_stop,
12846 	.get_status = hclge_get_status,
12847 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
12848 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12849 	.get_media_type = hclge_get_media_type,
12850 	.check_port_speed = hclge_check_port_speed,
12851 	.get_fec = hclge_get_fec,
12852 	.set_fec = hclge_set_fec,
12853 	.get_rss_key_size = hclge_get_rss_key_size,
12854 	.get_rss = hclge_get_rss,
12855 	.set_rss = hclge_set_rss,
12856 	.set_rss_tuple = hclge_set_rss_tuple,
12857 	.get_rss_tuple = hclge_get_rss_tuple,
12858 	.get_tc_size = hclge_get_tc_size,
12859 	.get_mac_addr = hclge_get_mac_addr,
12860 	.set_mac_addr = hclge_set_mac_addr,
12861 	.do_ioctl = hclge_do_ioctl,
12862 	.add_uc_addr = hclge_add_uc_addr,
12863 	.rm_uc_addr = hclge_rm_uc_addr,
12864 	.add_mc_addr = hclge_add_mc_addr,
12865 	.rm_mc_addr = hclge_rm_mc_addr,
12866 	.set_autoneg = hclge_set_autoneg,
12867 	.get_autoneg = hclge_get_autoneg,
12868 	.restart_autoneg = hclge_restart_autoneg,
12869 	.halt_autoneg = hclge_halt_autoneg,
12870 	.get_pauseparam = hclge_get_pauseparam,
12871 	.set_pauseparam = hclge_set_pauseparam,
12872 	.set_mtu = hclge_set_mtu,
12873 	.reset_queue = hclge_reset_tqp,
12874 	.get_stats = hclge_get_stats,
12875 	.get_mac_stats = hclge_get_mac_stat,
12876 	.update_stats = hclge_update_stats,
12877 	.get_strings = hclge_get_strings,
12878 	.get_sset_count = hclge_get_sset_count,
12879 	.get_fw_version = hclge_get_fw_version,
12880 	.get_mdix_mode = hclge_get_mdix_mode,
12881 	.enable_vlan_filter = hclge_enable_vlan_filter,
12882 	.set_vlan_filter = hclge_set_vlan_filter,
12883 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12884 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12885 	.reset_event = hclge_reset_event,
12886 	.get_reset_level = hclge_get_reset_level,
12887 	.set_default_reset_request = hclge_set_def_reset_request,
12888 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12889 	.set_channels = hclge_set_channels,
12890 	.get_channels = hclge_get_channels,
12891 	.get_regs_len = hclge_get_regs_len,
12892 	.get_regs = hclge_get_regs,
12893 	.set_led_id = hclge_set_led_id,
12894 	.get_link_mode = hclge_get_link_mode,
12895 	.add_fd_entry = hclge_add_fd_entry,
12896 	.del_fd_entry = hclge_del_fd_entry,
12897 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12898 	.get_fd_rule_info = hclge_get_fd_rule_info,
12899 	.get_fd_all_rules = hclge_get_all_rules,
12900 	.enable_fd = hclge_enable_fd,
12901 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
12902 	.dbg_read_cmd = hclge_dbg_read_cmd,
12903 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
12904 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
12905 	.ae_dev_resetting = hclge_ae_dev_resetting,
12906 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12907 	.set_gro_en = hclge_gro_en,
12908 	.get_global_queue_id = hclge_covert_handle_qid_global,
12909 	.set_timer_task = hclge_set_timer_task,
12910 	.mac_connect_phy = hclge_mac_connect_phy,
12911 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
12912 	.get_vf_config = hclge_get_vf_config,
12913 	.set_vf_link_state = hclge_set_vf_link_state,
12914 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
12915 	.set_vf_trust = hclge_set_vf_trust,
12916 	.set_vf_rate = hclge_set_vf_rate,
12917 	.set_vf_mac = hclge_set_vf_mac,
12918 	.get_module_eeprom = hclge_get_module_eeprom,
12919 	.get_cmdq_stat = hclge_get_cmdq_stat,
12920 	.add_cls_flower = hclge_add_cls_flower,
12921 	.del_cls_flower = hclge_del_cls_flower,
12922 	.cls_flower_active = hclge_is_cls_flower_active,
12923 	.get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12924 	.set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12925 	.set_tx_hwts_info = hclge_ptp_set_tx_info,
12926 	.get_rx_hwts = hclge_ptp_get_rx_hwts,
12927 	.get_ts_info = hclge_ptp_get_ts_info,
12928 };
12929 
12930 static struct hnae3_ae_algo ae_algo = {
12931 	.ops = &hclge_ops,
12932 	.pdev_id_table = ae_algo_pci_tbl,
12933 };
12934 
12935 static int hclge_init(void)
12936 {
12937 	pr_info("%s is initializing\n", HCLGE_NAME);
12938 
12939 	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
12940 	if (!hclge_wq) {
12941 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12942 		return -ENOMEM;
12943 	}
12944 
12945 	hnae3_register_ae_algo(&ae_algo);
12946 
12947 	return 0;
12948 }
12949 
12950 static void hclge_exit(void)
12951 {
12952 	hnae3_unregister_ae_algo(&ae_algo);
12953 	destroy_workqueue(hclge_wq);
12954 }
12955 module_init(hclge_init);
12956 module_exit(hclge_exit);
12957 
12958 MODULE_LICENSE("GPL");
12959 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12960 MODULE_DESCRIPTION("HCLGE Driver");
12961 MODULE_VERSION(HCLGE_MOD_VERSION);
12962