1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256U
31 #define HCLGE_BUF_MUL_BY	2
32 #define HCLGE_BUF_DIV_BY	2
33 #define NEED_RESERVE_TC_NUM	2
34 #define BUF_MAX_PERCENT		100
35 #define BUF_RESERVE_PERCENT	90
36 
37 #define HCLGE_RESET_MAX_FAIL_CNT	5
38 #define HCLGE_RESET_SYNC_TIME		100
39 #define HCLGE_PF_RESET_SYNC_TIME	20
40 #define HCLGE_PF_RESET_SYNC_CNT		1500
41 
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55 
56 #define HCLGE_LINK_STATUS_MS	10
57 
58 #define HCLGE_VF_VPORT_START_NUM	1
59 
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66 			       u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
70 						   unsigned long *addr);
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
72 
73 static struct hnae3_ae_algo ae_algo;
74 
75 static struct workqueue_struct *hclge_wq;
76 
77 static const struct pci_device_id ae_algo_pci_tbl[] = {
78 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
79 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
85 	/* required last entry */
86 	{0, }
87 };
88 
89 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
90 
91 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
92 					 HCLGE_CMDQ_TX_ADDR_H_REG,
93 					 HCLGE_CMDQ_TX_DEPTH_REG,
94 					 HCLGE_CMDQ_TX_TAIL_REG,
95 					 HCLGE_CMDQ_TX_HEAD_REG,
96 					 HCLGE_CMDQ_RX_ADDR_L_REG,
97 					 HCLGE_CMDQ_RX_ADDR_H_REG,
98 					 HCLGE_CMDQ_RX_DEPTH_REG,
99 					 HCLGE_CMDQ_RX_TAIL_REG,
100 					 HCLGE_CMDQ_RX_HEAD_REG,
101 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
102 					 HCLGE_CMDQ_INTR_STS_REG,
103 					 HCLGE_CMDQ_INTR_EN_REG,
104 					 HCLGE_CMDQ_INTR_GEN_REG};
105 
106 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
107 					   HCLGE_VECTOR0_OTER_EN_REG,
108 					   HCLGE_MISC_RESET_STS_REG,
109 					   HCLGE_MISC_VECTOR_INT_STS,
110 					   HCLGE_GLOBAL_RESET_REG,
111 					   HCLGE_FUN_RST_ING,
112 					   HCLGE_GRO_EN_REG};
113 
114 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
115 					 HCLGE_RING_RX_ADDR_H_REG,
116 					 HCLGE_RING_RX_BD_NUM_REG,
117 					 HCLGE_RING_RX_BD_LENGTH_REG,
118 					 HCLGE_RING_RX_MERGE_EN_REG,
119 					 HCLGE_RING_RX_TAIL_REG,
120 					 HCLGE_RING_RX_HEAD_REG,
121 					 HCLGE_RING_RX_FBD_NUM_REG,
122 					 HCLGE_RING_RX_OFFSET_REG,
123 					 HCLGE_RING_RX_FBD_OFFSET_REG,
124 					 HCLGE_RING_RX_STASH_REG,
125 					 HCLGE_RING_RX_BD_ERR_REG,
126 					 HCLGE_RING_TX_ADDR_L_REG,
127 					 HCLGE_RING_TX_ADDR_H_REG,
128 					 HCLGE_RING_TX_BD_NUM_REG,
129 					 HCLGE_RING_TX_PRIORITY_REG,
130 					 HCLGE_RING_TX_TC_REG,
131 					 HCLGE_RING_TX_MERGE_EN_REG,
132 					 HCLGE_RING_TX_TAIL_REG,
133 					 HCLGE_RING_TX_HEAD_REG,
134 					 HCLGE_RING_TX_FBD_NUM_REG,
135 					 HCLGE_RING_TX_OFFSET_REG,
136 					 HCLGE_RING_TX_EBD_NUM_REG,
137 					 HCLGE_RING_TX_EBD_OFFSET_REG,
138 					 HCLGE_RING_TX_BD_ERR_REG,
139 					 HCLGE_RING_EN_REG};
140 
141 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
142 					     HCLGE_TQP_INTR_GL0_REG,
143 					     HCLGE_TQP_INTR_GL1_REG,
144 					     HCLGE_TQP_INTR_GL2_REG,
145 					     HCLGE_TQP_INTR_RL_REG};
146 
147 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
148 	"App    Loopback test",
149 	"Serdes serial Loopback test",
150 	"Serdes parallel Loopback test",
151 	"Phy    Loopback test"
152 };
153 
154 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
155 	{"mac_tx_mac_pause_num",
156 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
157 	{"mac_rx_mac_pause_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
159 	{"mac_tx_control_pkt_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
161 	{"mac_rx_control_pkt_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
163 	{"mac_tx_pfc_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
165 	{"mac_tx_pfc_pri0_pkt_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
167 	{"mac_tx_pfc_pri1_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
169 	{"mac_tx_pfc_pri2_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
171 	{"mac_tx_pfc_pri3_pkt_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
173 	{"mac_tx_pfc_pri4_pkt_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
175 	{"mac_tx_pfc_pri5_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
177 	{"mac_tx_pfc_pri6_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
179 	{"mac_tx_pfc_pri7_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
181 	{"mac_rx_pfc_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
183 	{"mac_rx_pfc_pri0_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
185 	{"mac_rx_pfc_pri1_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
187 	{"mac_rx_pfc_pri2_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
189 	{"mac_rx_pfc_pri3_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
191 	{"mac_rx_pfc_pri4_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
193 	{"mac_rx_pfc_pri5_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
195 	{"mac_rx_pfc_pri6_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
197 	{"mac_rx_pfc_pri7_pkt_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
199 	{"mac_tx_total_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
201 	{"mac_tx_total_oct_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
203 	{"mac_tx_good_pkt_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
205 	{"mac_tx_bad_pkt_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
207 	{"mac_tx_good_oct_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
209 	{"mac_tx_bad_oct_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
211 	{"mac_tx_uni_pkt_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
213 	{"mac_tx_multi_pkt_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
215 	{"mac_tx_broad_pkt_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
217 	{"mac_tx_undersize_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
219 	{"mac_tx_oversize_pkt_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
221 	{"mac_tx_64_oct_pkt_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
223 	{"mac_tx_65_127_oct_pkt_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
225 	{"mac_tx_128_255_oct_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
227 	{"mac_tx_256_511_oct_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
229 	{"mac_tx_512_1023_oct_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
231 	{"mac_tx_1024_1518_oct_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
233 	{"mac_tx_1519_2047_oct_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
235 	{"mac_tx_2048_4095_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
237 	{"mac_tx_4096_8191_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
239 	{"mac_tx_8192_9216_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
241 	{"mac_tx_9217_12287_oct_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
243 	{"mac_tx_12288_16383_oct_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
245 	{"mac_tx_1519_max_good_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
247 	{"mac_tx_1519_max_bad_pkt_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
249 	{"mac_rx_total_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
251 	{"mac_rx_total_oct_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
253 	{"mac_rx_good_pkt_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
255 	{"mac_rx_bad_pkt_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
257 	{"mac_rx_good_oct_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
259 	{"mac_rx_bad_oct_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
261 	{"mac_rx_uni_pkt_num",
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
263 	{"mac_rx_multi_pkt_num",
264 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
265 	{"mac_rx_broad_pkt_num",
266 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
267 	{"mac_rx_undersize_pkt_num",
268 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
269 	{"mac_rx_oversize_pkt_num",
270 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
271 	{"mac_rx_64_oct_pkt_num",
272 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
273 	{"mac_rx_65_127_oct_pkt_num",
274 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
275 	{"mac_rx_128_255_oct_pkt_num",
276 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
277 	{"mac_rx_256_511_oct_pkt_num",
278 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
279 	{"mac_rx_512_1023_oct_pkt_num",
280 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
281 	{"mac_rx_1024_1518_oct_pkt_num",
282 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
283 	{"mac_rx_1519_2047_oct_pkt_num",
284 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
285 	{"mac_rx_2048_4095_oct_pkt_num",
286 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
287 	{"mac_rx_4096_8191_oct_pkt_num",
288 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
289 	{"mac_rx_8192_9216_oct_pkt_num",
290 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
291 	{"mac_rx_9217_12287_oct_pkt_num",
292 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
293 	{"mac_rx_12288_16383_oct_pkt_num",
294 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
295 	{"mac_rx_1519_max_good_pkt_num",
296 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
297 	{"mac_rx_1519_max_bad_pkt_num",
298 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
299 
300 	{"mac_tx_fragment_pkt_num",
301 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
302 	{"mac_tx_undermin_pkt_num",
303 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
304 	{"mac_tx_jabber_pkt_num",
305 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
306 	{"mac_tx_err_all_pkt_num",
307 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
308 	{"mac_tx_from_app_good_pkt_num",
309 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
310 	{"mac_tx_from_app_bad_pkt_num",
311 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
312 	{"mac_rx_fragment_pkt_num",
313 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
314 	{"mac_rx_undermin_pkt_num",
315 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
316 	{"mac_rx_jabber_pkt_num",
317 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
318 	{"mac_rx_fcs_err_pkt_num",
319 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
320 	{"mac_rx_send_app_good_pkt_num",
321 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
322 	{"mac_rx_send_app_bad_pkt_num",
323 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
324 };
325 
326 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
327 	{
328 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
329 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
330 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
331 		.i_port_bitmap = 0x1,
332 	},
333 };
334 
335 static const u8 hclge_hash_key[] = {
336 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
337 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
338 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
339 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
340 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
341 };
342 
343 static const u32 hclge_dfx_bd_offset_list[] = {
344 	HCLGE_DFX_BIOS_BD_OFFSET,
345 	HCLGE_DFX_SSU_0_BD_OFFSET,
346 	HCLGE_DFX_SSU_1_BD_OFFSET,
347 	HCLGE_DFX_IGU_BD_OFFSET,
348 	HCLGE_DFX_RPU_0_BD_OFFSET,
349 	HCLGE_DFX_RPU_1_BD_OFFSET,
350 	HCLGE_DFX_NCSI_BD_OFFSET,
351 	HCLGE_DFX_RTC_BD_OFFSET,
352 	HCLGE_DFX_PPP_BD_OFFSET,
353 	HCLGE_DFX_RCB_BD_OFFSET,
354 	HCLGE_DFX_TQP_BD_OFFSET,
355 	HCLGE_DFX_SSU_2_BD_OFFSET
356 };
357 
358 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
359 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
360 	HCLGE_OPC_DFX_SSU_REG_0,
361 	HCLGE_OPC_DFX_SSU_REG_1,
362 	HCLGE_OPC_DFX_IGU_EGU_REG,
363 	HCLGE_OPC_DFX_RPU_REG_0,
364 	HCLGE_OPC_DFX_RPU_REG_1,
365 	HCLGE_OPC_DFX_NCSI_REG,
366 	HCLGE_OPC_DFX_RTC_REG,
367 	HCLGE_OPC_DFX_PPP_REG,
368 	HCLGE_OPC_DFX_RCB_REG,
369 	HCLGE_OPC_DFX_TQP_REG,
370 	HCLGE_OPC_DFX_SSU_REG_2
371 };
372 
373 static const struct key_info meta_data_key_info[] = {
374 	{ PACKET_TYPE_ID, 6},
375 	{ IP_FRAGEMENT, 1},
376 	{ ROCE_TYPE, 1},
377 	{ NEXT_KEY, 5},
378 	{ VLAN_NUMBER, 2},
379 	{ SRC_VPORT, 12},
380 	{ DST_VPORT, 12},
381 	{ TUNNEL_PACKET, 1},
382 };
383 
384 static const struct key_info tuple_key_info[] = {
385 	{ OUTER_DST_MAC, 48},
386 	{ OUTER_SRC_MAC, 48},
387 	{ OUTER_VLAN_TAG_FST, 16},
388 	{ OUTER_VLAN_TAG_SEC, 16},
389 	{ OUTER_ETH_TYPE, 16},
390 	{ OUTER_L2_RSV, 16},
391 	{ OUTER_IP_TOS, 8},
392 	{ OUTER_IP_PROTO, 8},
393 	{ OUTER_SRC_IP, 32},
394 	{ OUTER_DST_IP, 32},
395 	{ OUTER_L3_RSV, 16},
396 	{ OUTER_SRC_PORT, 16},
397 	{ OUTER_DST_PORT, 16},
398 	{ OUTER_L4_RSV, 32},
399 	{ OUTER_TUN_VNI, 24},
400 	{ OUTER_TUN_FLOW_ID, 8},
401 	{ INNER_DST_MAC, 48},
402 	{ INNER_SRC_MAC, 48},
403 	{ INNER_VLAN_TAG_FST, 16},
404 	{ INNER_VLAN_TAG_SEC, 16},
405 	{ INNER_ETH_TYPE, 16},
406 	{ INNER_L2_RSV, 16},
407 	{ INNER_IP_TOS, 8},
408 	{ INNER_IP_PROTO, 8},
409 	{ INNER_SRC_IP, 32},
410 	{ INNER_DST_IP, 32},
411 	{ INNER_L3_RSV, 16},
412 	{ INNER_SRC_PORT, 16},
413 	{ INNER_DST_PORT, 16},
414 	{ INNER_L4_RSV, 32},
415 };
416 
417 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
418 {
419 #define HCLGE_MAC_CMD_NUM 21
420 
421 	u64 *data = (u64 *)(&hdev->mac_stats);
422 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
423 	__le64 *desc_data;
424 	int i, k, n;
425 	int ret;
426 
427 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
428 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
429 	if (ret) {
430 		dev_err(&hdev->pdev->dev,
431 			"Get MAC pkt stats fail, status = %d.\n", ret);
432 
433 		return ret;
434 	}
435 
436 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
437 		/* for special opcode 0032, only the first desc has the head */
438 		if (unlikely(i == 0)) {
439 			desc_data = (__le64 *)(&desc[i].data[0]);
440 			n = HCLGE_RD_FIRST_STATS_NUM;
441 		} else {
442 			desc_data = (__le64 *)(&desc[i]);
443 			n = HCLGE_RD_OTHER_STATS_NUM;
444 		}
445 
446 		for (k = 0; k < n; k++) {
447 			*data += le64_to_cpu(*desc_data);
448 			data++;
449 			desc_data++;
450 		}
451 	}
452 
453 	return 0;
454 }
455 
456 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
457 {
458 	u64 *data = (u64 *)(&hdev->mac_stats);
459 	struct hclge_desc *desc;
460 	__le64 *desc_data;
461 	u16 i, k, n;
462 	int ret;
463 
464 	/* This may be called inside atomic sections,
465 	 * so GFP_ATOMIC is more suitalbe here
466 	 */
467 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
468 	if (!desc)
469 		return -ENOMEM;
470 
471 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
472 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
473 	if (ret) {
474 		kfree(desc);
475 		return ret;
476 	}
477 
478 	for (i = 0; i < desc_num; i++) {
479 		/* for special opcode 0034, only the first desc has the head */
480 		if (i == 0) {
481 			desc_data = (__le64 *)(&desc[i].data[0]);
482 			n = HCLGE_RD_FIRST_STATS_NUM;
483 		} else {
484 			desc_data = (__le64 *)(&desc[i]);
485 			n = HCLGE_RD_OTHER_STATS_NUM;
486 		}
487 
488 		for (k = 0; k < n; k++) {
489 			*data += le64_to_cpu(*desc_data);
490 			data++;
491 			desc_data++;
492 		}
493 	}
494 
495 	kfree(desc);
496 
497 	return 0;
498 }
499 
500 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
501 {
502 	struct hclge_desc desc;
503 	__le32 *desc_data;
504 	u32 reg_num;
505 	int ret;
506 
507 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
508 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
509 	if (ret)
510 		return ret;
511 
512 	desc_data = (__le32 *)(&desc.data[0]);
513 	reg_num = le32_to_cpu(*desc_data);
514 
515 	*desc_num = 1 + ((reg_num - 3) >> 2) +
516 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
517 
518 	return 0;
519 }
520 
521 static int hclge_mac_update_stats(struct hclge_dev *hdev)
522 {
523 	u32 desc_num;
524 	int ret;
525 
526 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
527 
528 	/* The firmware supports the new statistics acquisition method */
529 	if (!ret)
530 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
531 	else if (ret == -EOPNOTSUPP)
532 		ret = hclge_mac_update_stats_defective(hdev);
533 	else
534 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
535 
536 	return ret;
537 }
538 
539 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
540 {
541 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
542 	struct hclge_vport *vport = hclge_get_vport(handle);
543 	struct hclge_dev *hdev = vport->back;
544 	struct hnae3_queue *queue;
545 	struct hclge_desc desc[1];
546 	struct hclge_tqp *tqp;
547 	int ret, i;
548 
549 	for (i = 0; i < kinfo->num_tqps; i++) {
550 		queue = handle->kinfo.tqp[i];
551 		tqp = container_of(queue, struct hclge_tqp, q);
552 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
553 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
554 					   true);
555 
556 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
557 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
558 		if (ret) {
559 			dev_err(&hdev->pdev->dev,
560 				"Query tqp stat fail, status = %d,queue = %d\n",
561 				ret, i);
562 			return ret;
563 		}
564 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
565 			le32_to_cpu(desc[0].data[1]);
566 	}
567 
568 	for (i = 0; i < kinfo->num_tqps; i++) {
569 		queue = handle->kinfo.tqp[i];
570 		tqp = container_of(queue, struct hclge_tqp, q);
571 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
572 		hclge_cmd_setup_basic_desc(&desc[0],
573 					   HCLGE_OPC_QUERY_TX_STATUS,
574 					   true);
575 
576 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
577 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
578 		if (ret) {
579 			dev_err(&hdev->pdev->dev,
580 				"Query tqp stat fail, status = %d,queue = %d\n",
581 				ret, i);
582 			return ret;
583 		}
584 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
585 			le32_to_cpu(desc[0].data[1]);
586 	}
587 
588 	return 0;
589 }
590 
591 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
592 {
593 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
594 	struct hclge_tqp *tqp;
595 	u64 *buff = data;
596 	int i;
597 
598 	for (i = 0; i < kinfo->num_tqps; i++) {
599 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
600 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
601 	}
602 
603 	for (i = 0; i < kinfo->num_tqps; i++) {
604 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
605 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
606 	}
607 
608 	return buff;
609 }
610 
611 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
612 {
613 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
614 
615 	/* each tqp has TX & RX two queues */
616 	return kinfo->num_tqps * (2);
617 }
618 
619 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
620 {
621 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622 	u8 *buff = data;
623 	int i = 0;
624 
625 	for (i = 0; i < kinfo->num_tqps; i++) {
626 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
627 			struct hclge_tqp, q);
628 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
629 			 tqp->index);
630 		buff = buff + ETH_GSTRING_LEN;
631 	}
632 
633 	for (i = 0; i < kinfo->num_tqps; i++) {
634 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
635 			struct hclge_tqp, q);
636 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
637 			 tqp->index);
638 		buff = buff + ETH_GSTRING_LEN;
639 	}
640 
641 	return buff;
642 }
643 
644 static u64 *hclge_comm_get_stats(const void *comm_stats,
645 				 const struct hclge_comm_stats_str strs[],
646 				 int size, u64 *data)
647 {
648 	u64 *buf = data;
649 	u32 i;
650 
651 	for (i = 0; i < size; i++)
652 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
653 
654 	return buf + size;
655 }
656 
657 static u8 *hclge_comm_get_strings(u32 stringset,
658 				  const struct hclge_comm_stats_str strs[],
659 				  int size, u8 *data)
660 {
661 	char *buff = (char *)data;
662 	u32 i;
663 
664 	if (stringset != ETH_SS_STATS)
665 		return buff;
666 
667 	for (i = 0; i < size; i++) {
668 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
669 		buff = buff + ETH_GSTRING_LEN;
670 	}
671 
672 	return (u8 *)buff;
673 }
674 
675 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
676 {
677 	struct hnae3_handle *handle;
678 	int status;
679 
680 	handle = &hdev->vport[0].nic;
681 	if (handle->client) {
682 		status = hclge_tqps_update_stats(handle);
683 		if (status) {
684 			dev_err(&hdev->pdev->dev,
685 				"Update TQPS stats fail, status = %d.\n",
686 				status);
687 		}
688 	}
689 
690 	status = hclge_mac_update_stats(hdev);
691 	if (status)
692 		dev_err(&hdev->pdev->dev,
693 			"Update MAC stats fail, status = %d.\n", status);
694 }
695 
696 static void hclge_update_stats(struct hnae3_handle *handle,
697 			       struct net_device_stats *net_stats)
698 {
699 	struct hclge_vport *vport = hclge_get_vport(handle);
700 	struct hclge_dev *hdev = vport->back;
701 	int status;
702 
703 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
704 		return;
705 
706 	status = hclge_mac_update_stats(hdev);
707 	if (status)
708 		dev_err(&hdev->pdev->dev,
709 			"Update MAC stats fail, status = %d.\n",
710 			status);
711 
712 	status = hclge_tqps_update_stats(handle);
713 	if (status)
714 		dev_err(&hdev->pdev->dev,
715 			"Update TQPS stats fail, status = %d.\n",
716 			status);
717 
718 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
719 }
720 
721 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
722 {
723 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
724 		HNAE3_SUPPORT_PHY_LOOPBACK |\
725 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
726 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
727 
728 	struct hclge_vport *vport = hclge_get_vport(handle);
729 	struct hclge_dev *hdev = vport->back;
730 	int count = 0;
731 
732 	/* Loopback test support rules:
733 	 * mac: only GE mode support
734 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
735 	 * phy: only support when phy device exist on board
736 	 */
737 	if (stringset == ETH_SS_TEST) {
738 		/* clear loopback bit flags at first */
739 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
740 		if (hdev->pdev->revision >= 0x21 ||
741 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
742 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
743 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
744 			count += 1;
745 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
746 		}
747 
748 		count += 2;
749 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
750 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
751 
752 		if (hdev->hw.mac.phydev) {
753 			count += 1;
754 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
755 		}
756 
757 	} else if (stringset == ETH_SS_STATS) {
758 		count = ARRAY_SIZE(g_mac_stats_string) +
759 			hclge_tqps_get_sset_count(handle, stringset);
760 	}
761 
762 	return count;
763 }
764 
765 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
766 			      u8 *data)
767 {
768 	u8 *p = (char *)data;
769 	int size;
770 
771 	if (stringset == ETH_SS_STATS) {
772 		size = ARRAY_SIZE(g_mac_stats_string);
773 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
774 					   size, p);
775 		p = hclge_tqps_get_strings(handle, p);
776 	} else if (stringset == ETH_SS_TEST) {
777 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
778 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
779 			       ETH_GSTRING_LEN);
780 			p += ETH_GSTRING_LEN;
781 		}
782 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
783 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
784 			       ETH_GSTRING_LEN);
785 			p += ETH_GSTRING_LEN;
786 		}
787 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
788 			memcpy(p,
789 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
790 			       ETH_GSTRING_LEN);
791 			p += ETH_GSTRING_LEN;
792 		}
793 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
794 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
795 			       ETH_GSTRING_LEN);
796 			p += ETH_GSTRING_LEN;
797 		}
798 	}
799 }
800 
801 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
802 {
803 	struct hclge_vport *vport = hclge_get_vport(handle);
804 	struct hclge_dev *hdev = vport->back;
805 	u64 *p;
806 
807 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
808 				 ARRAY_SIZE(g_mac_stats_string), data);
809 	p = hclge_tqps_get_stats(handle, p);
810 }
811 
812 static void hclge_get_mac_stat(struct hnae3_handle *handle,
813 			       struct hns3_mac_stats *mac_stats)
814 {
815 	struct hclge_vport *vport = hclge_get_vport(handle);
816 	struct hclge_dev *hdev = vport->back;
817 
818 	hclge_update_stats(handle, NULL);
819 
820 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
821 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
822 }
823 
824 static int hclge_parse_func_status(struct hclge_dev *hdev,
825 				   struct hclge_func_status_cmd *status)
826 {
827 #define HCLGE_MAC_ID_MASK	0xF
828 
829 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
830 		return -EINVAL;
831 
832 	/* Set the pf to main pf */
833 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
834 		hdev->flag |= HCLGE_FLAG_MAIN;
835 	else
836 		hdev->flag &= ~HCLGE_FLAG_MAIN;
837 
838 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
839 	return 0;
840 }
841 
842 static int hclge_query_function_status(struct hclge_dev *hdev)
843 {
844 #define HCLGE_QUERY_MAX_CNT	5
845 
846 	struct hclge_func_status_cmd *req;
847 	struct hclge_desc desc;
848 	int timeout = 0;
849 	int ret;
850 
851 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
852 	req = (struct hclge_func_status_cmd *)desc.data;
853 
854 	do {
855 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
856 		if (ret) {
857 			dev_err(&hdev->pdev->dev,
858 				"query function status failed %d.\n", ret);
859 			return ret;
860 		}
861 
862 		/* Check pf reset is done */
863 		if (req->pf_state)
864 			break;
865 		usleep_range(1000, 2000);
866 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
867 
868 	return hclge_parse_func_status(hdev, req);
869 }
870 
871 static int hclge_query_pf_resource(struct hclge_dev *hdev)
872 {
873 	struct hclge_pf_res_cmd *req;
874 	struct hclge_desc desc;
875 	int ret;
876 
877 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
878 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
879 	if (ret) {
880 		dev_err(&hdev->pdev->dev,
881 			"query pf resource failed %d.\n", ret);
882 		return ret;
883 	}
884 
885 	req = (struct hclge_pf_res_cmd *)desc.data;
886 	hdev->num_tqps = le16_to_cpu(req->tqp_num);
887 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
888 
889 	if (req->tx_buf_size)
890 		hdev->tx_buf_size =
891 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
892 	else
893 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
894 
895 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
896 
897 	if (req->dv_buf_size)
898 		hdev->dv_buf_size =
899 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
900 	else
901 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
902 
903 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
904 
905 	if (hnae3_dev_roce_supported(hdev)) {
906 		hdev->roce_base_msix_offset =
907 		hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
908 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
909 		hdev->num_roce_msi =
910 		hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
911 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
912 
913 		/* nic's msix numbers is always equals to the roce's. */
914 		hdev->num_nic_msi = hdev->num_roce_msi;
915 
916 		/* PF should have NIC vectors and Roce vectors,
917 		 * NIC vectors are queued before Roce vectors.
918 		 */
919 		hdev->num_msi = hdev->num_roce_msi +
920 				hdev->roce_base_msix_offset;
921 	} else {
922 		hdev->num_msi =
923 		hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
924 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
925 
926 		hdev->num_nic_msi = hdev->num_msi;
927 	}
928 
929 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
930 		dev_err(&hdev->pdev->dev,
931 			"Just %u msi resources, not enough for pf(min:2).\n",
932 			hdev->num_nic_msi);
933 		return -EINVAL;
934 	}
935 
936 	return 0;
937 }
938 
939 static int hclge_parse_speed(int speed_cmd, int *speed)
940 {
941 	switch (speed_cmd) {
942 	case 6:
943 		*speed = HCLGE_MAC_SPEED_10M;
944 		break;
945 	case 7:
946 		*speed = HCLGE_MAC_SPEED_100M;
947 		break;
948 	case 0:
949 		*speed = HCLGE_MAC_SPEED_1G;
950 		break;
951 	case 1:
952 		*speed = HCLGE_MAC_SPEED_10G;
953 		break;
954 	case 2:
955 		*speed = HCLGE_MAC_SPEED_25G;
956 		break;
957 	case 3:
958 		*speed = HCLGE_MAC_SPEED_40G;
959 		break;
960 	case 4:
961 		*speed = HCLGE_MAC_SPEED_50G;
962 		break;
963 	case 5:
964 		*speed = HCLGE_MAC_SPEED_100G;
965 		break;
966 	default:
967 		return -EINVAL;
968 	}
969 
970 	return 0;
971 }
972 
973 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
974 {
975 	struct hclge_vport *vport = hclge_get_vport(handle);
976 	struct hclge_dev *hdev = vport->back;
977 	u32 speed_ability = hdev->hw.mac.speed_ability;
978 	u32 speed_bit = 0;
979 
980 	switch (speed) {
981 	case HCLGE_MAC_SPEED_10M:
982 		speed_bit = HCLGE_SUPPORT_10M_BIT;
983 		break;
984 	case HCLGE_MAC_SPEED_100M:
985 		speed_bit = HCLGE_SUPPORT_100M_BIT;
986 		break;
987 	case HCLGE_MAC_SPEED_1G:
988 		speed_bit = HCLGE_SUPPORT_1G_BIT;
989 		break;
990 	case HCLGE_MAC_SPEED_10G:
991 		speed_bit = HCLGE_SUPPORT_10G_BIT;
992 		break;
993 	case HCLGE_MAC_SPEED_25G:
994 		speed_bit = HCLGE_SUPPORT_25G_BIT;
995 		break;
996 	case HCLGE_MAC_SPEED_40G:
997 		speed_bit = HCLGE_SUPPORT_40G_BIT;
998 		break;
999 	case HCLGE_MAC_SPEED_50G:
1000 		speed_bit = HCLGE_SUPPORT_50G_BIT;
1001 		break;
1002 	case HCLGE_MAC_SPEED_100G:
1003 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1004 		break;
1005 	default:
1006 		return -EINVAL;
1007 	}
1008 
1009 	if (speed_bit & speed_ability)
1010 		return 0;
1011 
1012 	return -EINVAL;
1013 }
1014 
1015 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1016 {
1017 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1018 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1019 				 mac->supported);
1020 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1021 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1022 				 mac->supported);
1023 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1024 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1025 				 mac->supported);
1026 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1027 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1028 				 mac->supported);
1029 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1030 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1031 				 mac->supported);
1032 }
1033 
1034 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1035 {
1036 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1037 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1038 				 mac->supported);
1039 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1040 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1041 				 mac->supported);
1042 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1043 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1044 				 mac->supported);
1045 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1046 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1047 				 mac->supported);
1048 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1049 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1050 				 mac->supported);
1051 }
1052 
1053 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1054 {
1055 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1056 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1057 				 mac->supported);
1058 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1059 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1060 				 mac->supported);
1061 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1062 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1063 				 mac->supported);
1064 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1065 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1066 				 mac->supported);
1067 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1068 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1069 				 mac->supported);
1070 }
1071 
1072 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1073 {
1074 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1075 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1076 				 mac->supported);
1077 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1078 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1079 				 mac->supported);
1080 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1081 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1082 				 mac->supported);
1083 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1084 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1085 				 mac->supported);
1086 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1087 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1088 				 mac->supported);
1089 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1090 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1091 				 mac->supported);
1092 }
1093 
1094 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1095 {
1096 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1097 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1098 
1099 	switch (mac->speed) {
1100 	case HCLGE_MAC_SPEED_10G:
1101 	case HCLGE_MAC_SPEED_40G:
1102 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1103 				 mac->supported);
1104 		mac->fec_ability =
1105 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1106 		break;
1107 	case HCLGE_MAC_SPEED_25G:
1108 	case HCLGE_MAC_SPEED_50G:
1109 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1110 				 mac->supported);
1111 		mac->fec_ability =
1112 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1113 			BIT(HNAE3_FEC_AUTO);
1114 		break;
1115 	case HCLGE_MAC_SPEED_100G:
1116 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1117 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1118 		break;
1119 	default:
1120 		mac->fec_ability = 0;
1121 		break;
1122 	}
1123 }
1124 
1125 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1126 					u8 speed_ability)
1127 {
1128 	struct hclge_mac *mac = &hdev->hw.mac;
1129 
1130 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1131 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1132 				 mac->supported);
1133 
1134 	hclge_convert_setting_sr(mac, speed_ability);
1135 	hclge_convert_setting_lr(mac, speed_ability);
1136 	hclge_convert_setting_cr(mac, speed_ability);
1137 	if (hdev->pdev->revision >= 0x21)
1138 		hclge_convert_setting_fec(mac);
1139 
1140 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1141 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1142 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1143 }
1144 
1145 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1146 					    u8 speed_ability)
1147 {
1148 	struct hclge_mac *mac = &hdev->hw.mac;
1149 
1150 	hclge_convert_setting_kr(mac, speed_ability);
1151 	if (hdev->pdev->revision >= 0x21)
1152 		hclge_convert_setting_fec(mac);
1153 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1154 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1155 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1156 }
1157 
1158 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1159 					 u8 speed_ability)
1160 {
1161 	unsigned long *supported = hdev->hw.mac.supported;
1162 
1163 	/* default to support all speed for GE port */
1164 	if (!speed_ability)
1165 		speed_ability = HCLGE_SUPPORT_GE;
1166 
1167 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1168 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1169 				 supported);
1170 
1171 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1172 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1173 				 supported);
1174 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1175 				 supported);
1176 	}
1177 
1178 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1179 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1180 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1181 	}
1182 
1183 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1184 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1185 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1186 	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1187 }
1188 
1189 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1190 {
1191 	u8 media_type = hdev->hw.mac.media_type;
1192 
1193 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1194 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1195 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1196 		hclge_parse_copper_link_mode(hdev, speed_ability);
1197 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1198 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1199 }
1200 
1201 static u32 hclge_get_max_speed(u8 speed_ability)
1202 {
1203 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1204 		return HCLGE_MAC_SPEED_100G;
1205 
1206 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1207 		return HCLGE_MAC_SPEED_50G;
1208 
1209 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1210 		return HCLGE_MAC_SPEED_40G;
1211 
1212 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1213 		return HCLGE_MAC_SPEED_25G;
1214 
1215 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1216 		return HCLGE_MAC_SPEED_10G;
1217 
1218 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1219 		return HCLGE_MAC_SPEED_1G;
1220 
1221 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1222 		return HCLGE_MAC_SPEED_100M;
1223 
1224 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1225 		return HCLGE_MAC_SPEED_10M;
1226 
1227 	return HCLGE_MAC_SPEED_1G;
1228 }
1229 
1230 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1231 {
1232 	struct hclge_cfg_param_cmd *req;
1233 	u64 mac_addr_tmp_high;
1234 	u64 mac_addr_tmp;
1235 	unsigned int i;
1236 
1237 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1238 
1239 	/* get the configuration */
1240 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1241 					      HCLGE_CFG_VMDQ_M,
1242 					      HCLGE_CFG_VMDQ_S);
1243 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1244 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1245 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1246 					    HCLGE_CFG_TQP_DESC_N_M,
1247 					    HCLGE_CFG_TQP_DESC_N_S);
1248 
1249 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1250 					HCLGE_CFG_PHY_ADDR_M,
1251 					HCLGE_CFG_PHY_ADDR_S);
1252 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1253 					  HCLGE_CFG_MEDIA_TP_M,
1254 					  HCLGE_CFG_MEDIA_TP_S);
1255 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1256 					  HCLGE_CFG_RX_BUF_LEN_M,
1257 					  HCLGE_CFG_RX_BUF_LEN_S);
1258 	/* get mac_address */
1259 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1260 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1261 					    HCLGE_CFG_MAC_ADDR_H_M,
1262 					    HCLGE_CFG_MAC_ADDR_H_S);
1263 
1264 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1265 
1266 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1267 					     HCLGE_CFG_DEFAULT_SPEED_M,
1268 					     HCLGE_CFG_DEFAULT_SPEED_S);
1269 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1270 					    HCLGE_CFG_RSS_SIZE_M,
1271 					    HCLGE_CFG_RSS_SIZE_S);
1272 
1273 	for (i = 0; i < ETH_ALEN; i++)
1274 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1275 
1276 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1277 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1278 
1279 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1280 					     HCLGE_CFG_SPEED_ABILITY_M,
1281 					     HCLGE_CFG_SPEED_ABILITY_S);
1282 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1283 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1284 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1285 	if (!cfg->umv_space)
1286 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1287 }
1288 
1289 /* hclge_get_cfg: query the static parameter from flash
1290  * @hdev: pointer to struct hclge_dev
1291  * @hcfg: the config structure to be getted
1292  */
1293 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1294 {
1295 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1296 	struct hclge_cfg_param_cmd *req;
1297 	unsigned int i;
1298 	int ret;
1299 
1300 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1301 		u32 offset = 0;
1302 
1303 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1304 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1305 					   true);
1306 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1307 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1308 		/* Len should be united by 4 bytes when send to hardware */
1309 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1310 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1311 		req->offset = cpu_to_le32(offset);
1312 	}
1313 
1314 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1315 	if (ret) {
1316 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1317 		return ret;
1318 	}
1319 
1320 	hclge_parse_cfg(hcfg, desc);
1321 
1322 	return 0;
1323 }
1324 
1325 static int hclge_get_cap(struct hclge_dev *hdev)
1326 {
1327 	int ret;
1328 
1329 	ret = hclge_query_function_status(hdev);
1330 	if (ret) {
1331 		dev_err(&hdev->pdev->dev,
1332 			"query function status error %d.\n", ret);
1333 		return ret;
1334 	}
1335 
1336 	/* get pf resource */
1337 	return hclge_query_pf_resource(hdev);
1338 }
1339 
1340 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1341 {
1342 #define HCLGE_MIN_TX_DESC	64
1343 #define HCLGE_MIN_RX_DESC	64
1344 
1345 	if (!is_kdump_kernel())
1346 		return;
1347 
1348 	dev_info(&hdev->pdev->dev,
1349 		 "Running kdump kernel. Using minimal resources\n");
1350 
1351 	/* minimal queue pairs equals to the number of vports */
1352 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1353 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1354 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1355 }
1356 
1357 static int hclge_configure(struct hclge_dev *hdev)
1358 {
1359 	struct hclge_cfg cfg;
1360 	unsigned int i;
1361 	int ret;
1362 
1363 	ret = hclge_get_cfg(hdev, &cfg);
1364 	if (ret) {
1365 		dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1366 		return ret;
1367 	}
1368 
1369 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1370 	hdev->base_tqp_pid = 0;
1371 	hdev->rss_size_max = cfg.rss_size_max;
1372 	hdev->rx_buf_len = cfg.rx_buf_len;
1373 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1374 	hdev->hw.mac.media_type = cfg.media_type;
1375 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1376 	hdev->num_tx_desc = cfg.tqp_desc_num;
1377 	hdev->num_rx_desc = cfg.tqp_desc_num;
1378 	hdev->tm_info.num_pg = 1;
1379 	hdev->tc_max = cfg.tc_num;
1380 	hdev->tm_info.hw_pfc_map = 0;
1381 	hdev->wanted_umv_size = cfg.umv_space;
1382 
1383 	if (hnae3_dev_fd_supported(hdev)) {
1384 		hdev->fd_en = true;
1385 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1386 	}
1387 
1388 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1389 	if (ret) {
1390 		dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1391 		return ret;
1392 	}
1393 
1394 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1395 
1396 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1397 
1398 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1399 	    (hdev->tc_max < 1)) {
1400 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1401 			 hdev->tc_max);
1402 		hdev->tc_max = 1;
1403 	}
1404 
1405 	/* Dev does not support DCB */
1406 	if (!hnae3_dev_dcb_supported(hdev)) {
1407 		hdev->tc_max = 1;
1408 		hdev->pfc_max = 0;
1409 	} else {
1410 		hdev->pfc_max = hdev->tc_max;
1411 	}
1412 
1413 	hdev->tm_info.num_tc = 1;
1414 
1415 	/* Currently not support uncontiuous tc */
1416 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1417 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1418 
1419 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1420 
1421 	hclge_init_kdump_kernel_config(hdev);
1422 
1423 	/* Set the init affinity based on pci func number */
1424 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1425 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1426 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1427 			&hdev->affinity_mask);
1428 
1429 	return ret;
1430 }
1431 
1432 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1433 			    unsigned int tso_mss_max)
1434 {
1435 	struct hclge_cfg_tso_status_cmd *req;
1436 	struct hclge_desc desc;
1437 	u16 tso_mss;
1438 
1439 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1440 
1441 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1442 
1443 	tso_mss = 0;
1444 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1445 			HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1446 	req->tso_mss_min = cpu_to_le16(tso_mss);
1447 
1448 	tso_mss = 0;
1449 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1450 			HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1451 	req->tso_mss_max = cpu_to_le16(tso_mss);
1452 
1453 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1454 }
1455 
1456 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1457 {
1458 	struct hclge_cfg_gro_status_cmd *req;
1459 	struct hclge_desc desc;
1460 	int ret;
1461 
1462 	if (!hnae3_dev_gro_supported(hdev))
1463 		return 0;
1464 
1465 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1466 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1467 
1468 	req->gro_en = cpu_to_le16(en ? 1 : 0);
1469 
1470 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1471 	if (ret)
1472 		dev_err(&hdev->pdev->dev,
1473 			"GRO hardware config cmd failed, ret = %d\n", ret);
1474 
1475 	return ret;
1476 }
1477 
1478 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1479 {
1480 	struct hclge_tqp *tqp;
1481 	int i;
1482 
1483 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1484 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1485 	if (!hdev->htqp)
1486 		return -ENOMEM;
1487 
1488 	tqp = hdev->htqp;
1489 
1490 	for (i = 0; i < hdev->num_tqps; i++) {
1491 		tqp->dev = &hdev->pdev->dev;
1492 		tqp->index = i;
1493 
1494 		tqp->q.ae_algo = &ae_algo;
1495 		tqp->q.buf_size = hdev->rx_buf_len;
1496 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1497 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1498 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1499 			i * HCLGE_TQP_REG_SIZE;
1500 
1501 		tqp++;
1502 	}
1503 
1504 	return 0;
1505 }
1506 
1507 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1508 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1509 {
1510 	struct hclge_tqp_map_cmd *req;
1511 	struct hclge_desc desc;
1512 	int ret;
1513 
1514 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1515 
1516 	req = (struct hclge_tqp_map_cmd *)desc.data;
1517 	req->tqp_id = cpu_to_le16(tqp_pid);
1518 	req->tqp_vf = func_id;
1519 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1520 	if (!is_pf)
1521 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1522 	req->tqp_vid = cpu_to_le16(tqp_vid);
1523 
1524 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1525 	if (ret)
1526 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1527 
1528 	return ret;
1529 }
1530 
1531 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1532 {
1533 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1534 	struct hclge_dev *hdev = vport->back;
1535 	int i, alloced;
1536 
1537 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1538 	     alloced < num_tqps; i++) {
1539 		if (!hdev->htqp[i].alloced) {
1540 			hdev->htqp[i].q.handle = &vport->nic;
1541 			hdev->htqp[i].q.tqp_index = alloced;
1542 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1543 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1544 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1545 			hdev->htqp[i].alloced = true;
1546 			alloced++;
1547 		}
1548 	}
1549 	vport->alloc_tqps = alloced;
1550 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1551 				vport->alloc_tqps / hdev->tm_info.num_tc);
1552 
1553 	/* ensure one to one mapping between irq and queue at default */
1554 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1555 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1556 
1557 	return 0;
1558 }
1559 
1560 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1561 			    u16 num_tx_desc, u16 num_rx_desc)
1562 
1563 {
1564 	struct hnae3_handle *nic = &vport->nic;
1565 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1566 	struct hclge_dev *hdev = vport->back;
1567 	int ret;
1568 
1569 	kinfo->num_tx_desc = num_tx_desc;
1570 	kinfo->num_rx_desc = num_rx_desc;
1571 
1572 	kinfo->rx_buf_len = hdev->rx_buf_len;
1573 
1574 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1575 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1576 	if (!kinfo->tqp)
1577 		return -ENOMEM;
1578 
1579 	ret = hclge_assign_tqp(vport, num_tqps);
1580 	if (ret)
1581 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1582 
1583 	return ret;
1584 }
1585 
1586 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1587 				  struct hclge_vport *vport)
1588 {
1589 	struct hnae3_handle *nic = &vport->nic;
1590 	struct hnae3_knic_private_info *kinfo;
1591 	u16 i;
1592 
1593 	kinfo = &nic->kinfo;
1594 	for (i = 0; i < vport->alloc_tqps; i++) {
1595 		struct hclge_tqp *q =
1596 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1597 		bool is_pf;
1598 		int ret;
1599 
1600 		is_pf = !(vport->vport_id);
1601 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1602 					     i, is_pf);
1603 		if (ret)
1604 			return ret;
1605 	}
1606 
1607 	return 0;
1608 }
1609 
1610 static int hclge_map_tqp(struct hclge_dev *hdev)
1611 {
1612 	struct hclge_vport *vport = hdev->vport;
1613 	u16 i, num_vport;
1614 
1615 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1616 	for (i = 0; i < num_vport; i++)	{
1617 		int ret;
1618 
1619 		ret = hclge_map_tqp_to_vport(hdev, vport);
1620 		if (ret)
1621 			return ret;
1622 
1623 		vport++;
1624 	}
1625 
1626 	return 0;
1627 }
1628 
1629 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1630 {
1631 	struct hnae3_handle *nic = &vport->nic;
1632 	struct hclge_dev *hdev = vport->back;
1633 	int ret;
1634 
1635 	nic->pdev = hdev->pdev;
1636 	nic->ae_algo = &ae_algo;
1637 	nic->numa_node_mask = hdev->numa_node_mask;
1638 
1639 	ret = hclge_knic_setup(vport, num_tqps,
1640 			       hdev->num_tx_desc, hdev->num_rx_desc);
1641 	if (ret)
1642 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1643 
1644 	return ret;
1645 }
1646 
1647 static int hclge_alloc_vport(struct hclge_dev *hdev)
1648 {
1649 	struct pci_dev *pdev = hdev->pdev;
1650 	struct hclge_vport *vport;
1651 	u32 tqp_main_vport;
1652 	u32 tqp_per_vport;
1653 	int num_vport, i;
1654 	int ret;
1655 
1656 	/* We need to alloc a vport for main NIC of PF */
1657 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1658 
1659 	if (hdev->num_tqps < num_vport) {
1660 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1661 			hdev->num_tqps, num_vport);
1662 		return -EINVAL;
1663 	}
1664 
1665 	/* Alloc the same number of TQPs for every vport */
1666 	tqp_per_vport = hdev->num_tqps / num_vport;
1667 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1668 
1669 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1670 			     GFP_KERNEL);
1671 	if (!vport)
1672 		return -ENOMEM;
1673 
1674 	hdev->vport = vport;
1675 	hdev->num_alloc_vport = num_vport;
1676 
1677 	if (IS_ENABLED(CONFIG_PCI_IOV))
1678 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1679 
1680 	for (i = 0; i < num_vport; i++) {
1681 		vport->back = hdev;
1682 		vport->vport_id = i;
1683 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1684 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1685 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1686 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1687 		INIT_LIST_HEAD(&vport->vlan_list);
1688 		INIT_LIST_HEAD(&vport->uc_mac_list);
1689 		INIT_LIST_HEAD(&vport->mc_mac_list);
1690 
1691 		if (i == 0)
1692 			ret = hclge_vport_setup(vport, tqp_main_vport);
1693 		else
1694 			ret = hclge_vport_setup(vport, tqp_per_vport);
1695 		if (ret) {
1696 			dev_err(&pdev->dev,
1697 				"vport setup failed for vport %d, %d\n",
1698 				i, ret);
1699 			return ret;
1700 		}
1701 
1702 		vport++;
1703 	}
1704 
1705 	return 0;
1706 }
1707 
1708 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1709 				    struct hclge_pkt_buf_alloc *buf_alloc)
1710 {
1711 /* TX buffer size is unit by 128 byte */
1712 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1713 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1714 	struct hclge_tx_buff_alloc_cmd *req;
1715 	struct hclge_desc desc;
1716 	int ret;
1717 	u8 i;
1718 
1719 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1720 
1721 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1722 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1723 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1724 
1725 		req->tx_pkt_buff[i] =
1726 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1727 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1728 	}
1729 
1730 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1731 	if (ret)
1732 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1733 			ret);
1734 
1735 	return ret;
1736 }
1737 
1738 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1739 				 struct hclge_pkt_buf_alloc *buf_alloc)
1740 {
1741 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1742 
1743 	if (ret)
1744 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1745 
1746 	return ret;
1747 }
1748 
1749 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1750 {
1751 	unsigned int i;
1752 	u32 cnt = 0;
1753 
1754 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1755 		if (hdev->hw_tc_map & BIT(i))
1756 			cnt++;
1757 	return cnt;
1758 }
1759 
1760 /* Get the number of pfc enabled TCs, which have private buffer */
1761 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1762 				  struct hclge_pkt_buf_alloc *buf_alloc)
1763 {
1764 	struct hclge_priv_buf *priv;
1765 	unsigned int i;
1766 	int cnt = 0;
1767 
1768 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1769 		priv = &buf_alloc->priv_buf[i];
1770 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1771 		    priv->enable)
1772 			cnt++;
1773 	}
1774 
1775 	return cnt;
1776 }
1777 
1778 /* Get the number of pfc disabled TCs, which have private buffer */
1779 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1780 				     struct hclge_pkt_buf_alloc *buf_alloc)
1781 {
1782 	struct hclge_priv_buf *priv;
1783 	unsigned int i;
1784 	int cnt = 0;
1785 
1786 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1787 		priv = &buf_alloc->priv_buf[i];
1788 		if (hdev->hw_tc_map & BIT(i) &&
1789 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1790 		    priv->enable)
1791 			cnt++;
1792 	}
1793 
1794 	return cnt;
1795 }
1796 
1797 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1798 {
1799 	struct hclge_priv_buf *priv;
1800 	u32 rx_priv = 0;
1801 	int i;
1802 
1803 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1804 		priv = &buf_alloc->priv_buf[i];
1805 		if (priv->enable)
1806 			rx_priv += priv->buf_size;
1807 	}
1808 	return rx_priv;
1809 }
1810 
1811 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1812 {
1813 	u32 i, total_tx_size = 0;
1814 
1815 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1816 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1817 
1818 	return total_tx_size;
1819 }
1820 
1821 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1822 				struct hclge_pkt_buf_alloc *buf_alloc,
1823 				u32 rx_all)
1824 {
1825 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1826 	u32 tc_num = hclge_get_tc_num(hdev);
1827 	u32 shared_buf, aligned_mps;
1828 	u32 rx_priv;
1829 	int i;
1830 
1831 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1832 
1833 	if (hnae3_dev_dcb_supported(hdev))
1834 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1835 					hdev->dv_buf_size;
1836 	else
1837 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1838 					+ hdev->dv_buf_size;
1839 
1840 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1841 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1842 			     HCLGE_BUF_SIZE_UNIT);
1843 
1844 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1845 	if (rx_all < rx_priv + shared_std)
1846 		return false;
1847 
1848 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1849 	buf_alloc->s_buf.buf_size = shared_buf;
1850 	if (hnae3_dev_dcb_supported(hdev)) {
1851 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1852 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1853 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1854 				  HCLGE_BUF_SIZE_UNIT);
1855 	} else {
1856 		buf_alloc->s_buf.self.high = aligned_mps +
1857 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1858 		buf_alloc->s_buf.self.low = aligned_mps;
1859 	}
1860 
1861 	if (hnae3_dev_dcb_supported(hdev)) {
1862 		hi_thrd = shared_buf - hdev->dv_buf_size;
1863 
1864 		if (tc_num <= NEED_RESERVE_TC_NUM)
1865 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1866 					/ BUF_MAX_PERCENT;
1867 
1868 		if (tc_num)
1869 			hi_thrd = hi_thrd / tc_num;
1870 
1871 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1872 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1873 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1874 	} else {
1875 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1876 		lo_thrd = aligned_mps;
1877 	}
1878 
1879 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1880 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1881 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1882 	}
1883 
1884 	return true;
1885 }
1886 
1887 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1888 				struct hclge_pkt_buf_alloc *buf_alloc)
1889 {
1890 	u32 i, total_size;
1891 
1892 	total_size = hdev->pkt_buf_size;
1893 
1894 	/* alloc tx buffer for all enabled tc */
1895 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1896 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1897 
1898 		if (hdev->hw_tc_map & BIT(i)) {
1899 			if (total_size < hdev->tx_buf_size)
1900 				return -ENOMEM;
1901 
1902 			priv->tx_buf_size = hdev->tx_buf_size;
1903 		} else {
1904 			priv->tx_buf_size = 0;
1905 		}
1906 
1907 		total_size -= priv->tx_buf_size;
1908 	}
1909 
1910 	return 0;
1911 }
1912 
1913 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1914 				  struct hclge_pkt_buf_alloc *buf_alloc)
1915 {
1916 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1917 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1918 	unsigned int i;
1919 
1920 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1921 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1922 
1923 		priv->enable = 0;
1924 		priv->wl.low = 0;
1925 		priv->wl.high = 0;
1926 		priv->buf_size = 0;
1927 
1928 		if (!(hdev->hw_tc_map & BIT(i)))
1929 			continue;
1930 
1931 		priv->enable = 1;
1932 
1933 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1934 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1935 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
1936 						HCLGE_BUF_SIZE_UNIT);
1937 		} else {
1938 			priv->wl.low = 0;
1939 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1940 					aligned_mps;
1941 		}
1942 
1943 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1944 	}
1945 
1946 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1947 }
1948 
1949 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1950 					  struct hclge_pkt_buf_alloc *buf_alloc)
1951 {
1952 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1953 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1954 	int i;
1955 
1956 	/* let the last to be cleared first */
1957 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1958 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1959 		unsigned int mask = BIT((unsigned int)i);
1960 
1961 		if (hdev->hw_tc_map & mask &&
1962 		    !(hdev->tm_info.hw_pfc_map & mask)) {
1963 			/* Clear the no pfc TC private buffer */
1964 			priv->wl.low = 0;
1965 			priv->wl.high = 0;
1966 			priv->buf_size = 0;
1967 			priv->enable = 0;
1968 			no_pfc_priv_num--;
1969 		}
1970 
1971 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1972 		    no_pfc_priv_num == 0)
1973 			break;
1974 	}
1975 
1976 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1977 }
1978 
1979 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1980 					struct hclge_pkt_buf_alloc *buf_alloc)
1981 {
1982 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1983 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1984 	int i;
1985 
1986 	/* let the last to be cleared first */
1987 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1988 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1989 		unsigned int mask = BIT((unsigned int)i);
1990 
1991 		if (hdev->hw_tc_map & mask &&
1992 		    hdev->tm_info.hw_pfc_map & mask) {
1993 			/* Reduce the number of pfc TC with private buffer */
1994 			priv->wl.low = 0;
1995 			priv->enable = 0;
1996 			priv->wl.high = 0;
1997 			priv->buf_size = 0;
1998 			pfc_priv_num--;
1999 		}
2000 
2001 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2002 		    pfc_priv_num == 0)
2003 			break;
2004 	}
2005 
2006 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2007 }
2008 
2009 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2010 				      struct hclge_pkt_buf_alloc *buf_alloc)
2011 {
2012 #define COMPENSATE_BUFFER	0x3C00
2013 #define COMPENSATE_HALF_MPS_NUM	5
2014 #define PRIV_WL_GAP		0x1800
2015 
2016 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2017 	u32 tc_num = hclge_get_tc_num(hdev);
2018 	u32 half_mps = hdev->mps >> 1;
2019 	u32 min_rx_priv;
2020 	unsigned int i;
2021 
2022 	if (tc_num)
2023 		rx_priv = rx_priv / tc_num;
2024 
2025 	if (tc_num <= NEED_RESERVE_TC_NUM)
2026 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2027 
2028 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2029 			COMPENSATE_HALF_MPS_NUM * half_mps;
2030 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2031 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2032 
2033 	if (rx_priv < min_rx_priv)
2034 		return false;
2035 
2036 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2037 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2038 
2039 		priv->enable = 0;
2040 		priv->wl.low = 0;
2041 		priv->wl.high = 0;
2042 		priv->buf_size = 0;
2043 
2044 		if (!(hdev->hw_tc_map & BIT(i)))
2045 			continue;
2046 
2047 		priv->enable = 1;
2048 		priv->buf_size = rx_priv;
2049 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2050 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2051 	}
2052 
2053 	buf_alloc->s_buf.buf_size = 0;
2054 
2055 	return true;
2056 }
2057 
2058 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2059  * @hdev: pointer to struct hclge_dev
2060  * @buf_alloc: pointer to buffer calculation data
2061  * @return: 0: calculate sucessful, negative: fail
2062  */
2063 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2064 				struct hclge_pkt_buf_alloc *buf_alloc)
2065 {
2066 	/* When DCB is not supported, rx private buffer is not allocated. */
2067 	if (!hnae3_dev_dcb_supported(hdev)) {
2068 		u32 rx_all = hdev->pkt_buf_size;
2069 
2070 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2071 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2072 			return -ENOMEM;
2073 
2074 		return 0;
2075 	}
2076 
2077 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2078 		return 0;
2079 
2080 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2081 		return 0;
2082 
2083 	/* try to decrease the buffer size */
2084 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2085 		return 0;
2086 
2087 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2088 		return 0;
2089 
2090 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2091 		return 0;
2092 
2093 	return -ENOMEM;
2094 }
2095 
2096 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2097 				   struct hclge_pkt_buf_alloc *buf_alloc)
2098 {
2099 	struct hclge_rx_priv_buff_cmd *req;
2100 	struct hclge_desc desc;
2101 	int ret;
2102 	int i;
2103 
2104 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2105 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2106 
2107 	/* Alloc private buffer TCs */
2108 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2109 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2110 
2111 		req->buf_num[i] =
2112 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2113 		req->buf_num[i] |=
2114 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2115 	}
2116 
2117 	req->shared_buf =
2118 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2119 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2120 
2121 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2122 	if (ret)
2123 		dev_err(&hdev->pdev->dev,
2124 			"rx private buffer alloc cmd failed %d\n", ret);
2125 
2126 	return ret;
2127 }
2128 
2129 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2130 				   struct hclge_pkt_buf_alloc *buf_alloc)
2131 {
2132 	struct hclge_rx_priv_wl_buf *req;
2133 	struct hclge_priv_buf *priv;
2134 	struct hclge_desc desc[2];
2135 	int i, j;
2136 	int ret;
2137 
2138 	for (i = 0; i < 2; i++) {
2139 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2140 					   false);
2141 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2142 
2143 		/* The first descriptor set the NEXT bit to 1 */
2144 		if (i == 0)
2145 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2146 		else
2147 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2148 
2149 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2150 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2151 
2152 			priv = &buf_alloc->priv_buf[idx];
2153 			req->tc_wl[j].high =
2154 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2155 			req->tc_wl[j].high |=
2156 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2157 			req->tc_wl[j].low =
2158 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2159 			req->tc_wl[j].low |=
2160 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2161 		}
2162 	}
2163 
2164 	/* Send 2 descriptor at one time */
2165 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2166 	if (ret)
2167 		dev_err(&hdev->pdev->dev,
2168 			"rx private waterline config cmd failed %d\n",
2169 			ret);
2170 	return ret;
2171 }
2172 
2173 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2174 				    struct hclge_pkt_buf_alloc *buf_alloc)
2175 {
2176 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2177 	struct hclge_rx_com_thrd *req;
2178 	struct hclge_desc desc[2];
2179 	struct hclge_tc_thrd *tc;
2180 	int i, j;
2181 	int ret;
2182 
2183 	for (i = 0; i < 2; i++) {
2184 		hclge_cmd_setup_basic_desc(&desc[i],
2185 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2186 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2187 
2188 		/* The first descriptor set the NEXT bit to 1 */
2189 		if (i == 0)
2190 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2191 		else
2192 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2193 
2194 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2195 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2196 
2197 			req->com_thrd[j].high =
2198 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2199 			req->com_thrd[j].high |=
2200 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2201 			req->com_thrd[j].low =
2202 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2203 			req->com_thrd[j].low |=
2204 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2205 		}
2206 	}
2207 
2208 	/* Send 2 descriptors at one time */
2209 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2210 	if (ret)
2211 		dev_err(&hdev->pdev->dev,
2212 			"common threshold config cmd failed %d\n", ret);
2213 	return ret;
2214 }
2215 
2216 static int hclge_common_wl_config(struct hclge_dev *hdev,
2217 				  struct hclge_pkt_buf_alloc *buf_alloc)
2218 {
2219 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2220 	struct hclge_rx_com_wl *req;
2221 	struct hclge_desc desc;
2222 	int ret;
2223 
2224 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2225 
2226 	req = (struct hclge_rx_com_wl *)desc.data;
2227 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2228 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2229 
2230 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2231 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2232 
2233 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2234 	if (ret)
2235 		dev_err(&hdev->pdev->dev,
2236 			"common waterline config cmd failed %d\n", ret);
2237 
2238 	return ret;
2239 }
2240 
2241 int hclge_buffer_alloc(struct hclge_dev *hdev)
2242 {
2243 	struct hclge_pkt_buf_alloc *pkt_buf;
2244 	int ret;
2245 
2246 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2247 	if (!pkt_buf)
2248 		return -ENOMEM;
2249 
2250 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2251 	if (ret) {
2252 		dev_err(&hdev->pdev->dev,
2253 			"could not calc tx buffer size for all TCs %d\n", ret);
2254 		goto out;
2255 	}
2256 
2257 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2258 	if (ret) {
2259 		dev_err(&hdev->pdev->dev,
2260 			"could not alloc tx buffers %d\n", ret);
2261 		goto out;
2262 	}
2263 
2264 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2265 	if (ret) {
2266 		dev_err(&hdev->pdev->dev,
2267 			"could not calc rx priv buffer size for all TCs %d\n",
2268 			ret);
2269 		goto out;
2270 	}
2271 
2272 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2273 	if (ret) {
2274 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2275 			ret);
2276 		goto out;
2277 	}
2278 
2279 	if (hnae3_dev_dcb_supported(hdev)) {
2280 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2281 		if (ret) {
2282 			dev_err(&hdev->pdev->dev,
2283 				"could not configure rx private waterline %d\n",
2284 				ret);
2285 			goto out;
2286 		}
2287 
2288 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2289 		if (ret) {
2290 			dev_err(&hdev->pdev->dev,
2291 				"could not configure common threshold %d\n",
2292 				ret);
2293 			goto out;
2294 		}
2295 	}
2296 
2297 	ret = hclge_common_wl_config(hdev, pkt_buf);
2298 	if (ret)
2299 		dev_err(&hdev->pdev->dev,
2300 			"could not configure common waterline %d\n", ret);
2301 
2302 out:
2303 	kfree(pkt_buf);
2304 	return ret;
2305 }
2306 
2307 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2308 {
2309 	struct hnae3_handle *roce = &vport->roce;
2310 	struct hnae3_handle *nic = &vport->nic;
2311 
2312 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2313 
2314 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2315 	    vport->back->num_msi_left == 0)
2316 		return -EINVAL;
2317 
2318 	roce->rinfo.base_vector = vport->back->roce_base_vector;
2319 
2320 	roce->rinfo.netdev = nic->kinfo.netdev;
2321 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
2322 
2323 	roce->pdev = nic->pdev;
2324 	roce->ae_algo = nic->ae_algo;
2325 	roce->numa_node_mask = nic->numa_node_mask;
2326 
2327 	return 0;
2328 }
2329 
2330 static int hclge_init_msi(struct hclge_dev *hdev)
2331 {
2332 	struct pci_dev *pdev = hdev->pdev;
2333 	int vectors;
2334 	int i;
2335 
2336 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2337 					hdev->num_msi,
2338 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2339 	if (vectors < 0) {
2340 		dev_err(&pdev->dev,
2341 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2342 			vectors);
2343 		return vectors;
2344 	}
2345 	if (vectors < hdev->num_msi)
2346 		dev_warn(&hdev->pdev->dev,
2347 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2348 			 hdev->num_msi, vectors);
2349 
2350 	hdev->num_msi = vectors;
2351 	hdev->num_msi_left = vectors;
2352 
2353 	hdev->base_msi_vector = pdev->irq;
2354 	hdev->roce_base_vector = hdev->base_msi_vector +
2355 				hdev->roce_base_msix_offset;
2356 
2357 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2358 					   sizeof(u16), GFP_KERNEL);
2359 	if (!hdev->vector_status) {
2360 		pci_free_irq_vectors(pdev);
2361 		return -ENOMEM;
2362 	}
2363 
2364 	for (i = 0; i < hdev->num_msi; i++)
2365 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2366 
2367 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2368 					sizeof(int), GFP_KERNEL);
2369 	if (!hdev->vector_irq) {
2370 		pci_free_irq_vectors(pdev);
2371 		return -ENOMEM;
2372 	}
2373 
2374 	return 0;
2375 }
2376 
2377 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2378 {
2379 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2380 		duplex = HCLGE_MAC_FULL;
2381 
2382 	return duplex;
2383 }
2384 
2385 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2386 				      u8 duplex)
2387 {
2388 	struct hclge_config_mac_speed_dup_cmd *req;
2389 	struct hclge_desc desc;
2390 	int ret;
2391 
2392 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2393 
2394 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2395 
2396 	if (duplex)
2397 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2398 
2399 	switch (speed) {
2400 	case HCLGE_MAC_SPEED_10M:
2401 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2402 				HCLGE_CFG_SPEED_S, 6);
2403 		break;
2404 	case HCLGE_MAC_SPEED_100M:
2405 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2406 				HCLGE_CFG_SPEED_S, 7);
2407 		break;
2408 	case HCLGE_MAC_SPEED_1G:
2409 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2410 				HCLGE_CFG_SPEED_S, 0);
2411 		break;
2412 	case HCLGE_MAC_SPEED_10G:
2413 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2414 				HCLGE_CFG_SPEED_S, 1);
2415 		break;
2416 	case HCLGE_MAC_SPEED_25G:
2417 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2418 				HCLGE_CFG_SPEED_S, 2);
2419 		break;
2420 	case HCLGE_MAC_SPEED_40G:
2421 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2422 				HCLGE_CFG_SPEED_S, 3);
2423 		break;
2424 	case HCLGE_MAC_SPEED_50G:
2425 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2426 				HCLGE_CFG_SPEED_S, 4);
2427 		break;
2428 	case HCLGE_MAC_SPEED_100G:
2429 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2430 				HCLGE_CFG_SPEED_S, 5);
2431 		break;
2432 	default:
2433 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2434 		return -EINVAL;
2435 	}
2436 
2437 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2438 		      1);
2439 
2440 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2441 	if (ret) {
2442 		dev_err(&hdev->pdev->dev,
2443 			"mac speed/duplex config cmd failed %d.\n", ret);
2444 		return ret;
2445 	}
2446 
2447 	return 0;
2448 }
2449 
2450 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2451 {
2452 	int ret;
2453 
2454 	duplex = hclge_check_speed_dup(duplex, speed);
2455 	if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2456 		return 0;
2457 
2458 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2459 	if (ret)
2460 		return ret;
2461 
2462 	hdev->hw.mac.speed = speed;
2463 	hdev->hw.mac.duplex = duplex;
2464 
2465 	return 0;
2466 }
2467 
2468 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2469 				     u8 duplex)
2470 {
2471 	struct hclge_vport *vport = hclge_get_vport(handle);
2472 	struct hclge_dev *hdev = vport->back;
2473 
2474 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2475 }
2476 
2477 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2478 {
2479 	struct hclge_config_auto_neg_cmd *req;
2480 	struct hclge_desc desc;
2481 	u32 flag = 0;
2482 	int ret;
2483 
2484 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2485 
2486 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2487 	if (enable)
2488 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2489 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2490 
2491 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2492 	if (ret)
2493 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2494 			ret);
2495 
2496 	return ret;
2497 }
2498 
2499 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2500 {
2501 	struct hclge_vport *vport = hclge_get_vport(handle);
2502 	struct hclge_dev *hdev = vport->back;
2503 
2504 	if (!hdev->hw.mac.support_autoneg) {
2505 		if (enable) {
2506 			dev_err(&hdev->pdev->dev,
2507 				"autoneg is not supported by current port\n");
2508 			return -EOPNOTSUPP;
2509 		} else {
2510 			return 0;
2511 		}
2512 	}
2513 
2514 	return hclge_set_autoneg_en(hdev, enable);
2515 }
2516 
2517 static int hclge_get_autoneg(struct hnae3_handle *handle)
2518 {
2519 	struct hclge_vport *vport = hclge_get_vport(handle);
2520 	struct hclge_dev *hdev = vport->back;
2521 	struct phy_device *phydev = hdev->hw.mac.phydev;
2522 
2523 	if (phydev)
2524 		return phydev->autoneg;
2525 
2526 	return hdev->hw.mac.autoneg;
2527 }
2528 
2529 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2530 {
2531 	struct hclge_vport *vport = hclge_get_vport(handle);
2532 	struct hclge_dev *hdev = vport->back;
2533 	int ret;
2534 
2535 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2536 
2537 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2538 	if (ret)
2539 		return ret;
2540 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2541 }
2542 
2543 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2544 {
2545 	struct hclge_vport *vport = hclge_get_vport(handle);
2546 	struct hclge_dev *hdev = vport->back;
2547 
2548 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2549 		return hclge_set_autoneg_en(hdev, !halt);
2550 
2551 	return 0;
2552 }
2553 
2554 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2555 {
2556 	struct hclge_config_fec_cmd *req;
2557 	struct hclge_desc desc;
2558 	int ret;
2559 
2560 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2561 
2562 	req = (struct hclge_config_fec_cmd *)desc.data;
2563 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2564 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2565 	if (fec_mode & BIT(HNAE3_FEC_RS))
2566 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2567 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2568 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2569 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2570 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2571 
2572 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2573 	if (ret)
2574 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2575 
2576 	return ret;
2577 }
2578 
2579 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2580 {
2581 	struct hclge_vport *vport = hclge_get_vport(handle);
2582 	struct hclge_dev *hdev = vport->back;
2583 	struct hclge_mac *mac = &hdev->hw.mac;
2584 	int ret;
2585 
2586 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2587 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2588 		return -EINVAL;
2589 	}
2590 
2591 	ret = hclge_set_fec_hw(hdev, fec_mode);
2592 	if (ret)
2593 		return ret;
2594 
2595 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2596 	return 0;
2597 }
2598 
2599 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2600 			  u8 *fec_mode)
2601 {
2602 	struct hclge_vport *vport = hclge_get_vport(handle);
2603 	struct hclge_dev *hdev = vport->back;
2604 	struct hclge_mac *mac = &hdev->hw.mac;
2605 
2606 	if (fec_ability)
2607 		*fec_ability = mac->fec_ability;
2608 	if (fec_mode)
2609 		*fec_mode = mac->fec_mode;
2610 }
2611 
2612 static int hclge_mac_init(struct hclge_dev *hdev)
2613 {
2614 	struct hclge_mac *mac = &hdev->hw.mac;
2615 	int ret;
2616 
2617 	hdev->support_sfp_query = true;
2618 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2619 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2620 					 hdev->hw.mac.duplex);
2621 	if (ret)
2622 		return ret;
2623 
2624 	if (hdev->hw.mac.support_autoneg) {
2625 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2626 		if (ret)
2627 			return ret;
2628 	}
2629 
2630 	mac->link = 0;
2631 
2632 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2633 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2634 		if (ret)
2635 			return ret;
2636 	}
2637 
2638 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2639 	if (ret) {
2640 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2641 		return ret;
2642 	}
2643 
2644 	ret = hclge_set_default_loopback(hdev);
2645 	if (ret)
2646 		return ret;
2647 
2648 	ret = hclge_buffer_alloc(hdev);
2649 	if (ret)
2650 		dev_err(&hdev->pdev->dev,
2651 			"allocate buffer fail, ret=%d\n", ret);
2652 
2653 	return ret;
2654 }
2655 
2656 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2657 {
2658 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2659 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2660 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2661 				    hclge_wq, &hdev->service_task, 0);
2662 }
2663 
2664 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2665 {
2666 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2667 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2668 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2669 				    hclge_wq, &hdev->service_task, 0);
2670 }
2671 
2672 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2673 {
2674 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2675 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2676 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2677 				    hclge_wq, &hdev->service_task,
2678 				    delay_time);
2679 }
2680 
2681 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2682 {
2683 	struct hclge_link_status_cmd *req;
2684 	struct hclge_desc desc;
2685 	int link_status;
2686 	int ret;
2687 
2688 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2689 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2690 	if (ret) {
2691 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2692 			ret);
2693 		return ret;
2694 	}
2695 
2696 	req = (struct hclge_link_status_cmd *)desc.data;
2697 	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2698 
2699 	return !!link_status;
2700 }
2701 
2702 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2703 {
2704 	unsigned int mac_state;
2705 	int link_stat;
2706 
2707 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2708 		return 0;
2709 
2710 	mac_state = hclge_get_mac_link_status(hdev);
2711 
2712 	if (hdev->hw.mac.phydev) {
2713 		if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2714 			link_stat = mac_state &
2715 				hdev->hw.mac.phydev->link;
2716 		else
2717 			link_stat = 0;
2718 
2719 	} else {
2720 		link_stat = mac_state;
2721 	}
2722 
2723 	return !!link_stat;
2724 }
2725 
2726 static void hclge_update_link_status(struct hclge_dev *hdev)
2727 {
2728 	struct hnae3_client *rclient = hdev->roce_client;
2729 	struct hnae3_client *client = hdev->nic_client;
2730 	struct hnae3_handle *rhandle;
2731 	struct hnae3_handle *handle;
2732 	int state;
2733 	int i;
2734 
2735 	if (!client)
2736 		return;
2737 
2738 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2739 		return;
2740 
2741 	state = hclge_get_mac_phy_link(hdev);
2742 	if (state != hdev->hw.mac.link) {
2743 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2744 			handle = &hdev->vport[i].nic;
2745 			client->ops->link_status_change(handle, state);
2746 			hclge_config_mac_tnl_int(hdev, state);
2747 			rhandle = &hdev->vport[i].roce;
2748 			if (rclient && rclient->ops->link_status_change)
2749 				rclient->ops->link_status_change(rhandle,
2750 								 state);
2751 		}
2752 		hdev->hw.mac.link = state;
2753 	}
2754 
2755 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2756 }
2757 
2758 static void hclge_update_port_capability(struct hclge_mac *mac)
2759 {
2760 	/* update fec ability by speed */
2761 	hclge_convert_setting_fec(mac);
2762 
2763 	/* firmware can not identify back plane type, the media type
2764 	 * read from configuration can help deal it
2765 	 */
2766 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2767 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2768 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2769 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2770 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2771 
2772 	if (mac->support_autoneg) {
2773 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2774 		linkmode_copy(mac->advertising, mac->supported);
2775 	} else {
2776 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2777 				   mac->supported);
2778 		linkmode_zero(mac->advertising);
2779 	}
2780 }
2781 
2782 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2783 {
2784 	struct hclge_sfp_info_cmd *resp;
2785 	struct hclge_desc desc;
2786 	int ret;
2787 
2788 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2789 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2790 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2791 	if (ret == -EOPNOTSUPP) {
2792 		dev_warn(&hdev->pdev->dev,
2793 			 "IMP do not support get SFP speed %d\n", ret);
2794 		return ret;
2795 	} else if (ret) {
2796 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2797 		return ret;
2798 	}
2799 
2800 	*speed = le32_to_cpu(resp->speed);
2801 
2802 	return 0;
2803 }
2804 
2805 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2806 {
2807 	struct hclge_sfp_info_cmd *resp;
2808 	struct hclge_desc desc;
2809 	int ret;
2810 
2811 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2812 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2813 
2814 	resp->query_type = QUERY_ACTIVE_SPEED;
2815 
2816 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2817 	if (ret == -EOPNOTSUPP) {
2818 		dev_warn(&hdev->pdev->dev,
2819 			 "IMP does not support get SFP info %d\n", ret);
2820 		return ret;
2821 	} else if (ret) {
2822 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2823 		return ret;
2824 	}
2825 
2826 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
2827 	 * set to mac->speed.
2828 	 */
2829 	if (!le32_to_cpu(resp->speed))
2830 		return 0;
2831 
2832 	mac->speed = le32_to_cpu(resp->speed);
2833 	/* if resp->speed_ability is 0, it means it's an old version
2834 	 * firmware, do not update these params
2835 	 */
2836 	if (resp->speed_ability) {
2837 		mac->module_type = le32_to_cpu(resp->module_type);
2838 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
2839 		mac->autoneg = resp->autoneg;
2840 		mac->support_autoneg = resp->autoneg_ability;
2841 		mac->speed_type = QUERY_ACTIVE_SPEED;
2842 		if (!resp->active_fec)
2843 			mac->fec_mode = 0;
2844 		else
2845 			mac->fec_mode = BIT(resp->active_fec);
2846 	} else {
2847 		mac->speed_type = QUERY_SFP_SPEED;
2848 	}
2849 
2850 	return 0;
2851 }
2852 
2853 static int hclge_update_port_info(struct hclge_dev *hdev)
2854 {
2855 	struct hclge_mac *mac = &hdev->hw.mac;
2856 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
2857 	int ret;
2858 
2859 	/* get the port info from SFP cmd if not copper port */
2860 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2861 		return 0;
2862 
2863 	/* if IMP does not support get SFP/qSFP info, return directly */
2864 	if (!hdev->support_sfp_query)
2865 		return 0;
2866 
2867 	if (hdev->pdev->revision >= 0x21)
2868 		ret = hclge_get_sfp_info(hdev, mac);
2869 	else
2870 		ret = hclge_get_sfp_speed(hdev, &speed);
2871 
2872 	if (ret == -EOPNOTSUPP) {
2873 		hdev->support_sfp_query = false;
2874 		return ret;
2875 	} else if (ret) {
2876 		return ret;
2877 	}
2878 
2879 	if (hdev->pdev->revision >= 0x21) {
2880 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2881 			hclge_update_port_capability(mac);
2882 			return 0;
2883 		}
2884 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2885 					       HCLGE_MAC_FULL);
2886 	} else {
2887 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2888 			return 0; /* do nothing if no SFP */
2889 
2890 		/* must config full duplex for SFP */
2891 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2892 	}
2893 }
2894 
2895 static int hclge_get_status(struct hnae3_handle *handle)
2896 {
2897 	struct hclge_vport *vport = hclge_get_vport(handle);
2898 	struct hclge_dev *hdev = vport->back;
2899 
2900 	hclge_update_link_status(hdev);
2901 
2902 	return hdev->hw.mac.link;
2903 }
2904 
2905 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2906 {
2907 	if (!pci_num_vf(hdev->pdev)) {
2908 		dev_err(&hdev->pdev->dev,
2909 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
2910 		return NULL;
2911 	}
2912 
2913 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2914 		dev_err(&hdev->pdev->dev,
2915 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
2916 			vf, pci_num_vf(hdev->pdev));
2917 		return NULL;
2918 	}
2919 
2920 	/* VF start from 1 in vport */
2921 	vf += HCLGE_VF_VPORT_START_NUM;
2922 	return &hdev->vport[vf];
2923 }
2924 
2925 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2926 			       struct ifla_vf_info *ivf)
2927 {
2928 	struct hclge_vport *vport = hclge_get_vport(handle);
2929 	struct hclge_dev *hdev = vport->back;
2930 
2931 	vport = hclge_get_vf_vport(hdev, vf);
2932 	if (!vport)
2933 		return -EINVAL;
2934 
2935 	ivf->vf = vf;
2936 	ivf->linkstate = vport->vf_info.link_state;
2937 	ivf->spoofchk = vport->vf_info.spoofchk;
2938 	ivf->trusted = vport->vf_info.trusted;
2939 	ivf->min_tx_rate = 0;
2940 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2941 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2942 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2943 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2944 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
2945 
2946 	return 0;
2947 }
2948 
2949 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2950 				   int link_state)
2951 {
2952 	struct hclge_vport *vport = hclge_get_vport(handle);
2953 	struct hclge_dev *hdev = vport->back;
2954 
2955 	vport = hclge_get_vf_vport(hdev, vf);
2956 	if (!vport)
2957 		return -EINVAL;
2958 
2959 	vport->vf_info.link_state = link_state;
2960 
2961 	return 0;
2962 }
2963 
2964 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2965 {
2966 	u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2967 
2968 	/* fetch the events from their corresponding regs */
2969 	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2970 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2971 	msix_src_reg = hclge_read_dev(&hdev->hw,
2972 				      HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2973 
2974 	/* Assumption: If by any chance reset and mailbox events are reported
2975 	 * together then we will only process reset event in this go and will
2976 	 * defer the processing of the mailbox events. Since, we would have not
2977 	 * cleared RX CMDQ event this time we would receive again another
2978 	 * interrupt from H/W just for the mailbox.
2979 	 *
2980 	 * check for vector0 reset event sources
2981 	 */
2982 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2983 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2984 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2985 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2986 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2987 		hdev->rst_stats.imp_rst_cnt++;
2988 		return HCLGE_VECTOR0_EVENT_RST;
2989 	}
2990 
2991 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2992 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2993 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2994 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2995 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2996 		hdev->rst_stats.global_rst_cnt++;
2997 		return HCLGE_VECTOR0_EVENT_RST;
2998 	}
2999 
3000 	/* check for vector0 msix event source */
3001 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3002 		*clearval = msix_src_reg;
3003 		return HCLGE_VECTOR0_EVENT_ERR;
3004 	}
3005 
3006 	/* check for vector0 mailbox(=CMDQ RX) event source */
3007 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3008 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3009 		*clearval = cmdq_src_reg;
3010 		return HCLGE_VECTOR0_EVENT_MBX;
3011 	}
3012 
3013 	/* print other vector0 event source */
3014 	dev_info(&hdev->pdev->dev,
3015 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3016 		 cmdq_src_reg, msix_src_reg);
3017 	*clearval = msix_src_reg;
3018 
3019 	return HCLGE_VECTOR0_EVENT_OTHER;
3020 }
3021 
3022 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3023 				    u32 regclr)
3024 {
3025 	switch (event_type) {
3026 	case HCLGE_VECTOR0_EVENT_RST:
3027 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3028 		break;
3029 	case HCLGE_VECTOR0_EVENT_MBX:
3030 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3031 		break;
3032 	default:
3033 		break;
3034 	}
3035 }
3036 
3037 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3038 {
3039 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3040 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3041 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3042 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3043 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3044 }
3045 
3046 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3047 {
3048 	writel(enable ? 1 : 0, vector->addr);
3049 }
3050 
3051 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3052 {
3053 	struct hclge_dev *hdev = data;
3054 	u32 clearval = 0;
3055 	u32 event_cause;
3056 
3057 	hclge_enable_vector(&hdev->misc_vector, false);
3058 	event_cause = hclge_check_event_cause(hdev, &clearval);
3059 
3060 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
3061 	switch (event_cause) {
3062 	case HCLGE_VECTOR0_EVENT_ERR:
3063 		/* we do not know what type of reset is required now. This could
3064 		 * only be decided after we fetch the type of errors which
3065 		 * caused this event. Therefore, we will do below for now:
3066 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3067 		 *    have defered type of reset to be used.
3068 		 * 2. Schedule the reset serivce task.
3069 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3070 		 *    will fetch the correct type of reset.  This would be done
3071 		 *    by first decoding the types of errors.
3072 		 */
3073 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3074 		/* fall through */
3075 	case HCLGE_VECTOR0_EVENT_RST:
3076 		hclge_reset_task_schedule(hdev);
3077 		break;
3078 	case HCLGE_VECTOR0_EVENT_MBX:
3079 		/* If we are here then,
3080 		 * 1. Either we are not handling any mbx task and we are not
3081 		 *    scheduled as well
3082 		 *                        OR
3083 		 * 2. We could be handling a mbx task but nothing more is
3084 		 *    scheduled.
3085 		 * In both cases, we should schedule mbx task as there are more
3086 		 * mbx messages reported by this interrupt.
3087 		 */
3088 		hclge_mbx_task_schedule(hdev);
3089 		break;
3090 	default:
3091 		dev_warn(&hdev->pdev->dev,
3092 			 "received unknown or unhandled event of vector0\n");
3093 		break;
3094 	}
3095 
3096 	hclge_clear_event_cause(hdev, event_cause, clearval);
3097 
3098 	/* Enable interrupt if it is not cause by reset. And when
3099 	 * clearval equal to 0, it means interrupt status may be
3100 	 * cleared by hardware before driver reads status register.
3101 	 * For this case, vector0 interrupt also should be enabled.
3102 	 */
3103 	if (!clearval ||
3104 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3105 		hclge_enable_vector(&hdev->misc_vector, true);
3106 	}
3107 
3108 	return IRQ_HANDLED;
3109 }
3110 
3111 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3112 {
3113 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3114 		dev_warn(&hdev->pdev->dev,
3115 			 "vector(vector_id %d) has been freed.\n", vector_id);
3116 		return;
3117 	}
3118 
3119 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3120 	hdev->num_msi_left += 1;
3121 	hdev->num_msi_used -= 1;
3122 }
3123 
3124 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3125 {
3126 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3127 
3128 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3129 
3130 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3131 	hdev->vector_status[0] = 0;
3132 
3133 	hdev->num_msi_left -= 1;
3134 	hdev->num_msi_used += 1;
3135 }
3136 
3137 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3138 				      const cpumask_t *mask)
3139 {
3140 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3141 					      affinity_notify);
3142 
3143 	cpumask_copy(&hdev->affinity_mask, mask);
3144 }
3145 
3146 static void hclge_irq_affinity_release(struct kref *ref)
3147 {
3148 }
3149 
3150 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3151 {
3152 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3153 			      &hdev->affinity_mask);
3154 
3155 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3156 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3157 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3158 				  &hdev->affinity_notify);
3159 }
3160 
3161 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3162 {
3163 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3164 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3165 }
3166 
3167 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3168 {
3169 	int ret;
3170 
3171 	hclge_get_misc_vector(hdev);
3172 
3173 	/* this would be explicitly freed in the end */
3174 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3175 		 HCLGE_NAME, pci_name(hdev->pdev));
3176 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3177 			  0, hdev->misc_vector.name, hdev);
3178 	if (ret) {
3179 		hclge_free_vector(hdev, 0);
3180 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3181 			hdev->misc_vector.vector_irq);
3182 	}
3183 
3184 	return ret;
3185 }
3186 
3187 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3188 {
3189 	free_irq(hdev->misc_vector.vector_irq, hdev);
3190 	hclge_free_vector(hdev, 0);
3191 }
3192 
3193 int hclge_notify_client(struct hclge_dev *hdev,
3194 			enum hnae3_reset_notify_type type)
3195 {
3196 	struct hnae3_client *client = hdev->nic_client;
3197 	u16 i;
3198 
3199 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3200 		return 0;
3201 
3202 	if (!client->ops->reset_notify)
3203 		return -EOPNOTSUPP;
3204 
3205 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3206 		struct hnae3_handle *handle = &hdev->vport[i].nic;
3207 		int ret;
3208 
3209 		ret = client->ops->reset_notify(handle, type);
3210 		if (ret) {
3211 			dev_err(&hdev->pdev->dev,
3212 				"notify nic client failed %d(%d)\n", type, ret);
3213 			return ret;
3214 		}
3215 	}
3216 
3217 	return 0;
3218 }
3219 
3220 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3221 				    enum hnae3_reset_notify_type type)
3222 {
3223 	struct hnae3_client *client = hdev->roce_client;
3224 	int ret = 0;
3225 	u16 i;
3226 
3227 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3228 		return 0;
3229 
3230 	if (!client->ops->reset_notify)
3231 		return -EOPNOTSUPP;
3232 
3233 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3234 		struct hnae3_handle *handle = &hdev->vport[i].roce;
3235 
3236 		ret = client->ops->reset_notify(handle, type);
3237 		if (ret) {
3238 			dev_err(&hdev->pdev->dev,
3239 				"notify roce client failed %d(%d)",
3240 				type, ret);
3241 			return ret;
3242 		}
3243 	}
3244 
3245 	return ret;
3246 }
3247 
3248 static int hclge_reset_wait(struct hclge_dev *hdev)
3249 {
3250 #define HCLGE_RESET_WATI_MS	100
3251 #define HCLGE_RESET_WAIT_CNT	350
3252 
3253 	u32 val, reg, reg_bit;
3254 	u32 cnt = 0;
3255 
3256 	switch (hdev->reset_type) {
3257 	case HNAE3_IMP_RESET:
3258 		reg = HCLGE_GLOBAL_RESET_REG;
3259 		reg_bit = HCLGE_IMP_RESET_BIT;
3260 		break;
3261 	case HNAE3_GLOBAL_RESET:
3262 		reg = HCLGE_GLOBAL_RESET_REG;
3263 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3264 		break;
3265 	case HNAE3_FUNC_RESET:
3266 		reg = HCLGE_FUN_RST_ING;
3267 		reg_bit = HCLGE_FUN_RST_ING_B;
3268 		break;
3269 	default:
3270 		dev_err(&hdev->pdev->dev,
3271 			"Wait for unsupported reset type: %d\n",
3272 			hdev->reset_type);
3273 		return -EINVAL;
3274 	}
3275 
3276 	val = hclge_read_dev(&hdev->hw, reg);
3277 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3278 		msleep(HCLGE_RESET_WATI_MS);
3279 		val = hclge_read_dev(&hdev->hw, reg);
3280 		cnt++;
3281 	}
3282 
3283 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3284 		dev_warn(&hdev->pdev->dev,
3285 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3286 		return -EBUSY;
3287 	}
3288 
3289 	return 0;
3290 }
3291 
3292 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3293 {
3294 	struct hclge_vf_rst_cmd *req;
3295 	struct hclge_desc desc;
3296 
3297 	req = (struct hclge_vf_rst_cmd *)desc.data;
3298 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3299 	req->dest_vfid = func_id;
3300 
3301 	if (reset)
3302 		req->vf_rst = 0x1;
3303 
3304 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3305 }
3306 
3307 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3308 {
3309 	int i;
3310 
3311 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3312 		struct hclge_vport *vport = &hdev->vport[i];
3313 		int ret;
3314 
3315 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3316 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3317 		if (ret) {
3318 			dev_err(&hdev->pdev->dev,
3319 				"set vf(%u) rst failed %d!\n",
3320 				vport->vport_id, ret);
3321 			return ret;
3322 		}
3323 
3324 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3325 			continue;
3326 
3327 		/* Inform VF to process the reset.
3328 		 * hclge_inform_reset_assert_to_vf may fail if VF
3329 		 * driver is not loaded.
3330 		 */
3331 		ret = hclge_inform_reset_assert_to_vf(vport);
3332 		if (ret)
3333 			dev_warn(&hdev->pdev->dev,
3334 				 "inform reset to vf(%u) failed %d!\n",
3335 				 vport->vport_id, ret);
3336 	}
3337 
3338 	return 0;
3339 }
3340 
3341 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3342 {
3343 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3344 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3345 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3346 		return;
3347 
3348 	hclge_mbx_handler(hdev);
3349 
3350 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3351 }
3352 
3353 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3354 {
3355 	struct hclge_pf_rst_sync_cmd *req;
3356 	struct hclge_desc desc;
3357 	int cnt = 0;
3358 	int ret;
3359 
3360 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3361 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3362 
3363 	do {
3364 		/* vf need to down netdev by mbx during PF or FLR reset */
3365 		hclge_mailbox_service_task(hdev);
3366 
3367 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3368 		/* for compatible with old firmware, wait
3369 		 * 100 ms for VF to stop IO
3370 		 */
3371 		if (ret == -EOPNOTSUPP) {
3372 			msleep(HCLGE_RESET_SYNC_TIME);
3373 			return;
3374 		} else if (ret) {
3375 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3376 				 ret);
3377 			return;
3378 		} else if (req->all_vf_ready) {
3379 			return;
3380 		}
3381 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3382 		hclge_cmd_reuse_desc(&desc, true);
3383 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3384 
3385 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3386 }
3387 
3388 void hclge_report_hw_error(struct hclge_dev *hdev,
3389 			   enum hnae3_hw_error_type type)
3390 {
3391 	struct hnae3_client *client = hdev->nic_client;
3392 	u16 i;
3393 
3394 	if (!client || !client->ops->process_hw_error ||
3395 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3396 		return;
3397 
3398 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3399 		client->ops->process_hw_error(&hdev->vport[i].nic, type);
3400 }
3401 
3402 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3403 {
3404 	u32 reg_val;
3405 
3406 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3407 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3408 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3409 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3410 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3411 	}
3412 
3413 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3414 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3415 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3416 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3417 	}
3418 }
3419 
3420 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3421 {
3422 	struct hclge_desc desc;
3423 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3424 	int ret;
3425 
3426 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3427 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3428 	req->fun_reset_vfid = func_id;
3429 
3430 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3431 	if (ret)
3432 		dev_err(&hdev->pdev->dev,
3433 			"send function reset cmd fail, status =%d\n", ret);
3434 
3435 	return ret;
3436 }
3437 
3438 static void hclge_do_reset(struct hclge_dev *hdev)
3439 {
3440 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3441 	struct pci_dev *pdev = hdev->pdev;
3442 	u32 val;
3443 
3444 	if (hclge_get_hw_reset_stat(handle)) {
3445 		dev_info(&pdev->dev, "hardware reset not finish\n");
3446 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3447 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3448 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3449 		return;
3450 	}
3451 
3452 	switch (hdev->reset_type) {
3453 	case HNAE3_GLOBAL_RESET:
3454 		dev_info(&pdev->dev, "global reset requested\n");
3455 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3456 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3457 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3458 		break;
3459 	case HNAE3_FUNC_RESET:
3460 		dev_info(&pdev->dev, "PF reset requested\n");
3461 		/* schedule again to check later */
3462 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3463 		hclge_reset_task_schedule(hdev);
3464 		break;
3465 	default:
3466 		dev_warn(&pdev->dev,
3467 			 "unsupported reset type: %d\n", hdev->reset_type);
3468 		break;
3469 	}
3470 }
3471 
3472 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3473 						   unsigned long *addr)
3474 {
3475 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3476 	struct hclge_dev *hdev = ae_dev->priv;
3477 
3478 	/* first, resolve any unknown reset type to the known type(s) */
3479 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3480 		u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3481 					HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
3482 		/* we will intentionally ignore any errors from this function
3483 		 *  as we will end up in *some* reset request in any case
3484 		 */
3485 		if (hclge_handle_hw_msix_error(hdev, addr))
3486 			dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3487 				 msix_sts_reg);
3488 
3489 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3490 		/* We defered the clearing of the error event which caused
3491 		 * interrupt since it was not posssible to do that in
3492 		 * interrupt context (and this is the reason we introduced
3493 		 * new UNKNOWN reset type). Now, the errors have been
3494 		 * handled and cleared in hardware we can safely enable
3495 		 * interrupts. This is an exception to the norm.
3496 		 */
3497 		hclge_enable_vector(&hdev->misc_vector, true);
3498 	}
3499 
3500 	/* return the highest priority reset level amongst all */
3501 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3502 		rst_level = HNAE3_IMP_RESET;
3503 		clear_bit(HNAE3_IMP_RESET, addr);
3504 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3505 		clear_bit(HNAE3_FUNC_RESET, addr);
3506 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3507 		rst_level = HNAE3_GLOBAL_RESET;
3508 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3509 		clear_bit(HNAE3_FUNC_RESET, addr);
3510 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3511 		rst_level = HNAE3_FUNC_RESET;
3512 		clear_bit(HNAE3_FUNC_RESET, addr);
3513 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3514 		rst_level = HNAE3_FLR_RESET;
3515 		clear_bit(HNAE3_FLR_RESET, addr);
3516 	}
3517 
3518 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3519 	    rst_level < hdev->reset_type)
3520 		return HNAE3_NONE_RESET;
3521 
3522 	return rst_level;
3523 }
3524 
3525 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3526 {
3527 	u32 clearval = 0;
3528 
3529 	switch (hdev->reset_type) {
3530 	case HNAE3_IMP_RESET:
3531 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3532 		break;
3533 	case HNAE3_GLOBAL_RESET:
3534 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3535 		break;
3536 	default:
3537 		break;
3538 	}
3539 
3540 	if (!clearval)
3541 		return;
3542 
3543 	/* For revision 0x20, the reset interrupt source
3544 	 * can only be cleared after hardware reset done
3545 	 */
3546 	if (hdev->pdev->revision == 0x20)
3547 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3548 				clearval);
3549 
3550 	hclge_enable_vector(&hdev->misc_vector, true);
3551 }
3552 
3553 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3554 {
3555 	u32 reg_val;
3556 
3557 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3558 	if (enable)
3559 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3560 	else
3561 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3562 
3563 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3564 }
3565 
3566 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3567 {
3568 	int ret;
3569 
3570 	ret = hclge_set_all_vf_rst(hdev, true);
3571 	if (ret)
3572 		return ret;
3573 
3574 	hclge_func_reset_sync_vf(hdev);
3575 
3576 	return 0;
3577 }
3578 
3579 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3580 {
3581 	u32 reg_val;
3582 	int ret = 0;
3583 
3584 	switch (hdev->reset_type) {
3585 	case HNAE3_FUNC_RESET:
3586 		ret = hclge_func_reset_notify_vf(hdev);
3587 		if (ret)
3588 			return ret;
3589 
3590 		ret = hclge_func_reset_cmd(hdev, 0);
3591 		if (ret) {
3592 			dev_err(&hdev->pdev->dev,
3593 				"asserting function reset fail %d!\n", ret);
3594 			return ret;
3595 		}
3596 
3597 		/* After performaning pf reset, it is not necessary to do the
3598 		 * mailbox handling or send any command to firmware, because
3599 		 * any mailbox handling or command to firmware is only valid
3600 		 * after hclge_cmd_init is called.
3601 		 */
3602 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3603 		hdev->rst_stats.pf_rst_cnt++;
3604 		break;
3605 	case HNAE3_FLR_RESET:
3606 		ret = hclge_func_reset_notify_vf(hdev);
3607 		if (ret)
3608 			return ret;
3609 		break;
3610 	case HNAE3_IMP_RESET:
3611 		hclge_handle_imp_error(hdev);
3612 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3613 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3614 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3615 		break;
3616 	default:
3617 		break;
3618 	}
3619 
3620 	/* inform hardware that preparatory work is done */
3621 	msleep(HCLGE_RESET_SYNC_TIME);
3622 	hclge_reset_handshake(hdev, true);
3623 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3624 
3625 	return ret;
3626 }
3627 
3628 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3629 {
3630 #define MAX_RESET_FAIL_CNT 5
3631 
3632 	if (hdev->reset_pending) {
3633 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3634 			 hdev->reset_pending);
3635 		return true;
3636 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3637 		   HCLGE_RESET_INT_M) {
3638 		dev_info(&hdev->pdev->dev,
3639 			 "reset failed because new reset interrupt\n");
3640 		hclge_clear_reset_cause(hdev);
3641 		return false;
3642 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3643 		hdev->rst_stats.reset_fail_cnt++;
3644 		set_bit(hdev->reset_type, &hdev->reset_pending);
3645 		dev_info(&hdev->pdev->dev,
3646 			 "re-schedule reset task(%u)\n",
3647 			 hdev->rst_stats.reset_fail_cnt);
3648 		return true;
3649 	}
3650 
3651 	hclge_clear_reset_cause(hdev);
3652 
3653 	/* recover the handshake status when reset fail */
3654 	hclge_reset_handshake(hdev, true);
3655 
3656 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3657 
3658 	hclge_dbg_dump_rst_info(hdev);
3659 
3660 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3661 
3662 	return false;
3663 }
3664 
3665 static int hclge_set_rst_done(struct hclge_dev *hdev)
3666 {
3667 	struct hclge_pf_rst_done_cmd *req;
3668 	struct hclge_desc desc;
3669 	int ret;
3670 
3671 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
3672 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3673 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3674 
3675 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3676 	/* To be compatible with the old firmware, which does not support
3677 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3678 	 * return success
3679 	 */
3680 	if (ret == -EOPNOTSUPP) {
3681 		dev_warn(&hdev->pdev->dev,
3682 			 "current firmware does not support command(0x%x)!\n",
3683 			 HCLGE_OPC_PF_RST_DONE);
3684 		return 0;
3685 	} else if (ret) {
3686 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3687 			ret);
3688 	}
3689 
3690 	return ret;
3691 }
3692 
3693 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3694 {
3695 	int ret = 0;
3696 
3697 	switch (hdev->reset_type) {
3698 	case HNAE3_FUNC_RESET:
3699 		/* fall through */
3700 	case HNAE3_FLR_RESET:
3701 		ret = hclge_set_all_vf_rst(hdev, false);
3702 		break;
3703 	case HNAE3_GLOBAL_RESET:
3704 		/* fall through */
3705 	case HNAE3_IMP_RESET:
3706 		ret = hclge_set_rst_done(hdev);
3707 		break;
3708 	default:
3709 		break;
3710 	}
3711 
3712 	/* clear up the handshake status after re-initialize done */
3713 	hclge_reset_handshake(hdev, false);
3714 
3715 	return ret;
3716 }
3717 
3718 static int hclge_reset_stack(struct hclge_dev *hdev)
3719 {
3720 	int ret;
3721 
3722 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3723 	if (ret)
3724 		return ret;
3725 
3726 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3727 	if (ret)
3728 		return ret;
3729 
3730 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3731 	if (ret)
3732 		return ret;
3733 
3734 	return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3735 }
3736 
3737 static int hclge_reset_prepare(struct hclge_dev *hdev)
3738 {
3739 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3740 	int ret;
3741 
3742 	/* Initialize ae_dev reset status as well, in case enet layer wants to
3743 	 * know if device is undergoing reset
3744 	 */
3745 	ae_dev->reset_type = hdev->reset_type;
3746 	hdev->rst_stats.reset_cnt++;
3747 	/* perform reset of the stack & ae device for a client */
3748 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3749 	if (ret)
3750 		return ret;
3751 
3752 	rtnl_lock();
3753 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3754 	rtnl_unlock();
3755 	if (ret)
3756 		return ret;
3757 
3758 	return hclge_reset_prepare_wait(hdev);
3759 }
3760 
3761 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3762 {
3763 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3764 	enum hnae3_reset_type reset_level;
3765 	int ret;
3766 
3767 	hdev->rst_stats.hw_reset_done_cnt++;
3768 
3769 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3770 	if (ret)
3771 		return ret;
3772 
3773 	rtnl_lock();
3774 	ret = hclge_reset_stack(hdev);
3775 	rtnl_unlock();
3776 	if (ret)
3777 		return ret;
3778 
3779 	hclge_clear_reset_cause(hdev);
3780 
3781 	ret = hclge_reset_prepare_up(hdev);
3782 	if (ret)
3783 		return ret;
3784 
3785 
3786 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3787 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3788 	 * times
3789 	 */
3790 	if (ret &&
3791 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3792 		return ret;
3793 
3794 	rtnl_lock();
3795 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3796 	rtnl_unlock();
3797 	if (ret)
3798 		return ret;
3799 
3800 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3801 	if (ret)
3802 		return ret;
3803 
3804 	hdev->last_reset_time = jiffies;
3805 	hdev->rst_stats.reset_fail_cnt = 0;
3806 	hdev->rst_stats.reset_done_cnt++;
3807 	ae_dev->reset_type = HNAE3_NONE_RESET;
3808 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3809 
3810 	/* if default_reset_request has a higher level reset request,
3811 	 * it should be handled as soon as possible. since some errors
3812 	 * need this kind of reset to fix.
3813 	 */
3814 	reset_level = hclge_get_reset_level(ae_dev,
3815 					    &hdev->default_reset_request);
3816 	if (reset_level != HNAE3_NONE_RESET)
3817 		set_bit(reset_level, &hdev->reset_request);
3818 
3819 	return 0;
3820 }
3821 
3822 static void hclge_reset(struct hclge_dev *hdev)
3823 {
3824 	if (hclge_reset_prepare(hdev))
3825 		goto err_reset;
3826 
3827 	if (hclge_reset_wait(hdev))
3828 		goto err_reset;
3829 
3830 	if (hclge_reset_rebuild(hdev))
3831 		goto err_reset;
3832 
3833 	return;
3834 
3835 err_reset:
3836 	if (hclge_reset_err_handle(hdev))
3837 		hclge_reset_task_schedule(hdev);
3838 }
3839 
3840 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3841 {
3842 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3843 	struct hclge_dev *hdev = ae_dev->priv;
3844 
3845 	/* We might end up getting called broadly because of 2 below cases:
3846 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3847 	 *    normalcy is to reset.
3848 	 * 2. A new reset request from the stack due to timeout
3849 	 *
3850 	 * For the first case,error event might not have ae handle available.
3851 	 * check if this is a new reset request and we are not here just because
3852 	 * last reset attempt did not succeed and watchdog hit us again. We will
3853 	 * know this if last reset request did not occur very recently (watchdog
3854 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3855 	 * In case of new request we reset the "reset level" to PF reset.
3856 	 * And if it is a repeat reset request of the most recent one then we
3857 	 * want to make sure we throttle the reset request. Therefore, we will
3858 	 * not allow it again before 3*HZ times.
3859 	 */
3860 	if (!handle)
3861 		handle = &hdev->vport[0].nic;
3862 
3863 	if (time_before(jiffies, (hdev->last_reset_time +
3864 				  HCLGE_RESET_INTERVAL))) {
3865 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3866 		return;
3867 	} else if (hdev->default_reset_request) {
3868 		hdev->reset_level =
3869 			hclge_get_reset_level(ae_dev,
3870 					      &hdev->default_reset_request);
3871 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3872 		hdev->reset_level = HNAE3_FUNC_RESET;
3873 	}
3874 
3875 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3876 		 hdev->reset_level);
3877 
3878 	/* request reset & schedule reset task */
3879 	set_bit(hdev->reset_level, &hdev->reset_request);
3880 	hclge_reset_task_schedule(hdev);
3881 
3882 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3883 		hdev->reset_level++;
3884 }
3885 
3886 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3887 					enum hnae3_reset_type rst_type)
3888 {
3889 	struct hclge_dev *hdev = ae_dev->priv;
3890 
3891 	set_bit(rst_type, &hdev->default_reset_request);
3892 }
3893 
3894 static void hclge_reset_timer(struct timer_list *t)
3895 {
3896 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3897 
3898 	/* if default_reset_request has no value, it means that this reset
3899 	 * request has already be handled, so just return here
3900 	 */
3901 	if (!hdev->default_reset_request)
3902 		return;
3903 
3904 	dev_info(&hdev->pdev->dev,
3905 		 "triggering reset in reset timer\n");
3906 	hclge_reset_event(hdev->pdev, NULL);
3907 }
3908 
3909 static void hclge_reset_subtask(struct hclge_dev *hdev)
3910 {
3911 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3912 
3913 	/* check if there is any ongoing reset in the hardware. This status can
3914 	 * be checked from reset_pending. If there is then, we need to wait for
3915 	 * hardware to complete reset.
3916 	 *    a. If we are able to figure out in reasonable time that hardware
3917 	 *       has fully resetted then, we can proceed with driver, client
3918 	 *       reset.
3919 	 *    b. else, we can come back later to check this status so re-sched
3920 	 *       now.
3921 	 */
3922 	hdev->last_reset_time = jiffies;
3923 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3924 	if (hdev->reset_type != HNAE3_NONE_RESET)
3925 		hclge_reset(hdev);
3926 
3927 	/* check if we got any *new* reset requests to be honored */
3928 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3929 	if (hdev->reset_type != HNAE3_NONE_RESET)
3930 		hclge_do_reset(hdev);
3931 
3932 	hdev->reset_type = HNAE3_NONE_RESET;
3933 }
3934 
3935 static void hclge_reset_service_task(struct hclge_dev *hdev)
3936 {
3937 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3938 		return;
3939 
3940 	down(&hdev->reset_sem);
3941 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3942 
3943 	hclge_reset_subtask(hdev);
3944 
3945 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3946 	up(&hdev->reset_sem);
3947 }
3948 
3949 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3950 {
3951 	int i;
3952 
3953 	/* start from vport 1 for PF is always alive */
3954 	for (i = 1; i < hdev->num_alloc_vport; i++) {
3955 		struct hclge_vport *vport = &hdev->vport[i];
3956 
3957 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3958 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3959 
3960 		/* If vf is not alive, set to default value */
3961 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3962 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3963 	}
3964 }
3965 
3966 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3967 {
3968 	unsigned long delta = round_jiffies_relative(HZ);
3969 
3970 	/* Always handle the link updating to make sure link state is
3971 	 * updated when it is triggered by mbx.
3972 	 */
3973 	hclge_update_link_status(hdev);
3974 
3975 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3976 		delta = jiffies - hdev->last_serv_processed;
3977 
3978 		if (delta < round_jiffies_relative(HZ)) {
3979 			delta = round_jiffies_relative(HZ) - delta;
3980 			goto out;
3981 		}
3982 	}
3983 
3984 	hdev->serv_processed_cnt++;
3985 	hclge_update_vport_alive(hdev);
3986 
3987 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3988 		hdev->last_serv_processed = jiffies;
3989 		goto out;
3990 	}
3991 
3992 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3993 		hclge_update_stats_for_all(hdev);
3994 
3995 	hclge_update_port_info(hdev);
3996 	hclge_sync_vlan_filter(hdev);
3997 
3998 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
3999 		hclge_rfs_filter_expire(hdev);
4000 
4001 	hdev->last_serv_processed = jiffies;
4002 
4003 out:
4004 	hclge_task_schedule(hdev, delta);
4005 }
4006 
4007 static void hclge_service_task(struct work_struct *work)
4008 {
4009 	struct hclge_dev *hdev =
4010 		container_of(work, struct hclge_dev, service_task.work);
4011 
4012 	hclge_reset_service_task(hdev);
4013 	hclge_mailbox_service_task(hdev);
4014 	hclge_periodic_service_task(hdev);
4015 
4016 	/* Handle reset and mbx again in case periodical task delays the
4017 	 * handling by calling hclge_task_schedule() in
4018 	 * hclge_periodic_service_task().
4019 	 */
4020 	hclge_reset_service_task(hdev);
4021 	hclge_mailbox_service_task(hdev);
4022 }
4023 
4024 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4025 {
4026 	/* VF handle has no client */
4027 	if (!handle->client)
4028 		return container_of(handle, struct hclge_vport, nic);
4029 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4030 		return container_of(handle, struct hclge_vport, roce);
4031 	else
4032 		return container_of(handle, struct hclge_vport, nic);
4033 }
4034 
4035 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4036 			    struct hnae3_vector_info *vector_info)
4037 {
4038 	struct hclge_vport *vport = hclge_get_vport(handle);
4039 	struct hnae3_vector_info *vector = vector_info;
4040 	struct hclge_dev *hdev = vport->back;
4041 	int alloc = 0;
4042 	int i, j;
4043 
4044 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4045 	vector_num = min(hdev->num_msi_left, vector_num);
4046 
4047 	for (j = 0; j < vector_num; j++) {
4048 		for (i = 1; i < hdev->num_msi; i++) {
4049 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4050 				vector->vector = pci_irq_vector(hdev->pdev, i);
4051 				vector->io_addr = hdev->hw.io_base +
4052 					HCLGE_VECTOR_REG_BASE +
4053 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
4054 					vport->vport_id *
4055 					HCLGE_VECTOR_VF_OFFSET;
4056 				hdev->vector_status[i] = vport->vport_id;
4057 				hdev->vector_irq[i] = vector->vector;
4058 
4059 				vector++;
4060 				alloc++;
4061 
4062 				break;
4063 			}
4064 		}
4065 	}
4066 	hdev->num_msi_left -= alloc;
4067 	hdev->num_msi_used += alloc;
4068 
4069 	return alloc;
4070 }
4071 
4072 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4073 {
4074 	int i;
4075 
4076 	for (i = 0; i < hdev->num_msi; i++)
4077 		if (vector == hdev->vector_irq[i])
4078 			return i;
4079 
4080 	return -EINVAL;
4081 }
4082 
4083 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4084 {
4085 	struct hclge_vport *vport = hclge_get_vport(handle);
4086 	struct hclge_dev *hdev = vport->back;
4087 	int vector_id;
4088 
4089 	vector_id = hclge_get_vector_index(hdev, vector);
4090 	if (vector_id < 0) {
4091 		dev_err(&hdev->pdev->dev,
4092 			"Get vector index fail. vector = %d\n", vector);
4093 		return vector_id;
4094 	}
4095 
4096 	hclge_free_vector(hdev, vector_id);
4097 
4098 	return 0;
4099 }
4100 
4101 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4102 {
4103 	return HCLGE_RSS_KEY_SIZE;
4104 }
4105 
4106 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4107 {
4108 	return HCLGE_RSS_IND_TBL_SIZE;
4109 }
4110 
4111 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4112 				  const u8 hfunc, const u8 *key)
4113 {
4114 	struct hclge_rss_config_cmd *req;
4115 	unsigned int key_offset = 0;
4116 	struct hclge_desc desc;
4117 	int key_counts;
4118 	int key_size;
4119 	int ret;
4120 
4121 	key_counts = HCLGE_RSS_KEY_SIZE;
4122 	req = (struct hclge_rss_config_cmd *)desc.data;
4123 
4124 	while (key_counts) {
4125 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4126 					   false);
4127 
4128 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4129 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4130 
4131 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4132 		memcpy(req->hash_key,
4133 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4134 
4135 		key_counts -= key_size;
4136 		key_offset++;
4137 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4138 		if (ret) {
4139 			dev_err(&hdev->pdev->dev,
4140 				"Configure RSS config fail, status = %d\n",
4141 				ret);
4142 			return ret;
4143 		}
4144 	}
4145 	return 0;
4146 }
4147 
4148 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4149 {
4150 	struct hclge_rss_indirection_table_cmd *req;
4151 	struct hclge_desc desc;
4152 	int i, j;
4153 	int ret;
4154 
4155 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4156 
4157 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4158 		hclge_cmd_setup_basic_desc
4159 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4160 
4161 		req->start_table_index =
4162 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4163 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4164 
4165 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4166 			req->rss_result[j] =
4167 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4168 
4169 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4170 		if (ret) {
4171 			dev_err(&hdev->pdev->dev,
4172 				"Configure rss indir table fail,status = %d\n",
4173 				ret);
4174 			return ret;
4175 		}
4176 	}
4177 	return 0;
4178 }
4179 
4180 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4181 				 u16 *tc_size, u16 *tc_offset)
4182 {
4183 	struct hclge_rss_tc_mode_cmd *req;
4184 	struct hclge_desc desc;
4185 	int ret;
4186 	int i;
4187 
4188 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4189 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4190 
4191 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4192 		u16 mode = 0;
4193 
4194 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4195 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4196 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4197 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4198 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4199 
4200 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4201 	}
4202 
4203 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4204 	if (ret)
4205 		dev_err(&hdev->pdev->dev,
4206 			"Configure rss tc mode fail, status = %d\n", ret);
4207 
4208 	return ret;
4209 }
4210 
4211 static void hclge_get_rss_type(struct hclge_vport *vport)
4212 {
4213 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4214 	    vport->rss_tuple_sets.ipv4_udp_en ||
4215 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4216 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4217 	    vport->rss_tuple_sets.ipv6_udp_en ||
4218 	    vport->rss_tuple_sets.ipv6_sctp_en)
4219 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4220 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4221 		 vport->rss_tuple_sets.ipv6_fragment_en)
4222 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4223 	else
4224 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4225 }
4226 
4227 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4228 {
4229 	struct hclge_rss_input_tuple_cmd *req;
4230 	struct hclge_desc desc;
4231 	int ret;
4232 
4233 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4234 
4235 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4236 
4237 	/* Get the tuple cfg from pf */
4238 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4239 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4240 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4241 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4242 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4243 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4244 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4245 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4246 	hclge_get_rss_type(&hdev->vport[0]);
4247 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4248 	if (ret)
4249 		dev_err(&hdev->pdev->dev,
4250 			"Configure rss input fail, status = %d\n", ret);
4251 	return ret;
4252 }
4253 
4254 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4255 			 u8 *key, u8 *hfunc)
4256 {
4257 	struct hclge_vport *vport = hclge_get_vport(handle);
4258 	int i;
4259 
4260 	/* Get hash algorithm */
4261 	if (hfunc) {
4262 		switch (vport->rss_algo) {
4263 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4264 			*hfunc = ETH_RSS_HASH_TOP;
4265 			break;
4266 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4267 			*hfunc = ETH_RSS_HASH_XOR;
4268 			break;
4269 		default:
4270 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4271 			break;
4272 		}
4273 	}
4274 
4275 	/* Get the RSS Key required by the user */
4276 	if (key)
4277 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4278 
4279 	/* Get indirect table */
4280 	if (indir)
4281 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4282 			indir[i] =  vport->rss_indirection_tbl[i];
4283 
4284 	return 0;
4285 }
4286 
4287 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4288 			 const  u8 *key, const  u8 hfunc)
4289 {
4290 	struct hclge_vport *vport = hclge_get_vport(handle);
4291 	struct hclge_dev *hdev = vport->back;
4292 	u8 hash_algo;
4293 	int ret, i;
4294 
4295 	/* Set the RSS Hash Key if specififed by the user */
4296 	if (key) {
4297 		switch (hfunc) {
4298 		case ETH_RSS_HASH_TOP:
4299 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4300 			break;
4301 		case ETH_RSS_HASH_XOR:
4302 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4303 			break;
4304 		case ETH_RSS_HASH_NO_CHANGE:
4305 			hash_algo = vport->rss_algo;
4306 			break;
4307 		default:
4308 			return -EINVAL;
4309 		}
4310 
4311 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4312 		if (ret)
4313 			return ret;
4314 
4315 		/* Update the shadow RSS key with user specified qids */
4316 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4317 		vport->rss_algo = hash_algo;
4318 	}
4319 
4320 	/* Update the shadow RSS table with user specified qids */
4321 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4322 		vport->rss_indirection_tbl[i] = indir[i];
4323 
4324 	/* Update the hardware */
4325 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4326 }
4327 
4328 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4329 {
4330 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4331 
4332 	if (nfc->data & RXH_L4_B_2_3)
4333 		hash_sets |= HCLGE_D_PORT_BIT;
4334 	else
4335 		hash_sets &= ~HCLGE_D_PORT_BIT;
4336 
4337 	if (nfc->data & RXH_IP_SRC)
4338 		hash_sets |= HCLGE_S_IP_BIT;
4339 	else
4340 		hash_sets &= ~HCLGE_S_IP_BIT;
4341 
4342 	if (nfc->data & RXH_IP_DST)
4343 		hash_sets |= HCLGE_D_IP_BIT;
4344 	else
4345 		hash_sets &= ~HCLGE_D_IP_BIT;
4346 
4347 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4348 		hash_sets |= HCLGE_V_TAG_BIT;
4349 
4350 	return hash_sets;
4351 }
4352 
4353 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4354 			       struct ethtool_rxnfc *nfc)
4355 {
4356 	struct hclge_vport *vport = hclge_get_vport(handle);
4357 	struct hclge_dev *hdev = vport->back;
4358 	struct hclge_rss_input_tuple_cmd *req;
4359 	struct hclge_desc desc;
4360 	u8 tuple_sets;
4361 	int ret;
4362 
4363 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4364 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4365 		return -EINVAL;
4366 
4367 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4368 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4369 
4370 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4371 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4372 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4373 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4374 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4375 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4376 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4377 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4378 
4379 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4380 	switch (nfc->flow_type) {
4381 	case TCP_V4_FLOW:
4382 		req->ipv4_tcp_en = tuple_sets;
4383 		break;
4384 	case TCP_V6_FLOW:
4385 		req->ipv6_tcp_en = tuple_sets;
4386 		break;
4387 	case UDP_V4_FLOW:
4388 		req->ipv4_udp_en = tuple_sets;
4389 		break;
4390 	case UDP_V6_FLOW:
4391 		req->ipv6_udp_en = tuple_sets;
4392 		break;
4393 	case SCTP_V4_FLOW:
4394 		req->ipv4_sctp_en = tuple_sets;
4395 		break;
4396 	case SCTP_V6_FLOW:
4397 		if ((nfc->data & RXH_L4_B_0_1) ||
4398 		    (nfc->data & RXH_L4_B_2_3))
4399 			return -EINVAL;
4400 
4401 		req->ipv6_sctp_en = tuple_sets;
4402 		break;
4403 	case IPV4_FLOW:
4404 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4405 		break;
4406 	case IPV6_FLOW:
4407 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4408 		break;
4409 	default:
4410 		return -EINVAL;
4411 	}
4412 
4413 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4414 	if (ret) {
4415 		dev_err(&hdev->pdev->dev,
4416 			"Set rss tuple fail, status = %d\n", ret);
4417 		return ret;
4418 	}
4419 
4420 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4421 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4422 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4423 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4424 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4425 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4426 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4427 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4428 	hclge_get_rss_type(vport);
4429 	return 0;
4430 }
4431 
4432 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4433 			       struct ethtool_rxnfc *nfc)
4434 {
4435 	struct hclge_vport *vport = hclge_get_vport(handle);
4436 	u8 tuple_sets;
4437 
4438 	nfc->data = 0;
4439 
4440 	switch (nfc->flow_type) {
4441 	case TCP_V4_FLOW:
4442 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4443 		break;
4444 	case UDP_V4_FLOW:
4445 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4446 		break;
4447 	case TCP_V6_FLOW:
4448 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4449 		break;
4450 	case UDP_V6_FLOW:
4451 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4452 		break;
4453 	case SCTP_V4_FLOW:
4454 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4455 		break;
4456 	case SCTP_V6_FLOW:
4457 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4458 		break;
4459 	case IPV4_FLOW:
4460 	case IPV6_FLOW:
4461 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4462 		break;
4463 	default:
4464 		return -EINVAL;
4465 	}
4466 
4467 	if (!tuple_sets)
4468 		return 0;
4469 
4470 	if (tuple_sets & HCLGE_D_PORT_BIT)
4471 		nfc->data |= RXH_L4_B_2_3;
4472 	if (tuple_sets & HCLGE_S_PORT_BIT)
4473 		nfc->data |= RXH_L4_B_0_1;
4474 	if (tuple_sets & HCLGE_D_IP_BIT)
4475 		nfc->data |= RXH_IP_DST;
4476 	if (tuple_sets & HCLGE_S_IP_BIT)
4477 		nfc->data |= RXH_IP_SRC;
4478 
4479 	return 0;
4480 }
4481 
4482 static int hclge_get_tc_size(struct hnae3_handle *handle)
4483 {
4484 	struct hclge_vport *vport = hclge_get_vport(handle);
4485 	struct hclge_dev *hdev = vport->back;
4486 
4487 	return hdev->rss_size_max;
4488 }
4489 
4490 int hclge_rss_init_hw(struct hclge_dev *hdev)
4491 {
4492 	struct hclge_vport *vport = hdev->vport;
4493 	u8 *rss_indir = vport[0].rss_indirection_tbl;
4494 	u16 rss_size = vport[0].alloc_rss_size;
4495 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4496 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4497 	u8 *key = vport[0].rss_hash_key;
4498 	u8 hfunc = vport[0].rss_algo;
4499 	u16 tc_valid[HCLGE_MAX_TC_NUM];
4500 	u16 roundup_size;
4501 	unsigned int i;
4502 	int ret;
4503 
4504 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4505 	if (ret)
4506 		return ret;
4507 
4508 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4509 	if (ret)
4510 		return ret;
4511 
4512 	ret = hclge_set_rss_input_tuple(hdev);
4513 	if (ret)
4514 		return ret;
4515 
4516 	/* Each TC have the same queue size, and tc_size set to hardware is
4517 	 * the log2 of roundup power of two of rss_size, the acutal queue
4518 	 * size is limited by indirection table.
4519 	 */
4520 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4521 		dev_err(&hdev->pdev->dev,
4522 			"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4523 			rss_size);
4524 		return -EINVAL;
4525 	}
4526 
4527 	roundup_size = roundup_pow_of_two(rss_size);
4528 	roundup_size = ilog2(roundup_size);
4529 
4530 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4531 		tc_valid[i] = 0;
4532 
4533 		if (!(hdev->hw_tc_map & BIT(i)))
4534 			continue;
4535 
4536 		tc_valid[i] = 1;
4537 		tc_size[i] = roundup_size;
4538 		tc_offset[i] = rss_size * i;
4539 	}
4540 
4541 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4542 }
4543 
4544 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4545 {
4546 	struct hclge_vport *vport = hdev->vport;
4547 	int i, j;
4548 
4549 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4550 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4551 			vport[j].rss_indirection_tbl[i] =
4552 				i % vport[j].alloc_rss_size;
4553 	}
4554 }
4555 
4556 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4557 {
4558 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4559 	struct hclge_vport *vport = hdev->vport;
4560 
4561 	if (hdev->pdev->revision >= 0x21)
4562 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4563 
4564 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4565 		vport[i].rss_tuple_sets.ipv4_tcp_en =
4566 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4567 		vport[i].rss_tuple_sets.ipv4_udp_en =
4568 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4569 		vport[i].rss_tuple_sets.ipv4_sctp_en =
4570 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4571 		vport[i].rss_tuple_sets.ipv4_fragment_en =
4572 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4573 		vport[i].rss_tuple_sets.ipv6_tcp_en =
4574 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4575 		vport[i].rss_tuple_sets.ipv6_udp_en =
4576 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4577 		vport[i].rss_tuple_sets.ipv6_sctp_en =
4578 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4579 		vport[i].rss_tuple_sets.ipv6_fragment_en =
4580 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4581 
4582 		vport[i].rss_algo = rss_algo;
4583 
4584 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
4585 		       HCLGE_RSS_KEY_SIZE);
4586 	}
4587 
4588 	hclge_rss_indir_init_cfg(hdev);
4589 }
4590 
4591 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4592 				int vector_id, bool en,
4593 				struct hnae3_ring_chain_node *ring_chain)
4594 {
4595 	struct hclge_dev *hdev = vport->back;
4596 	struct hnae3_ring_chain_node *node;
4597 	struct hclge_desc desc;
4598 	struct hclge_ctrl_vector_chain_cmd *req =
4599 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
4600 	enum hclge_cmd_status status;
4601 	enum hclge_opcode_type op;
4602 	u16 tqp_type_and_id;
4603 	int i;
4604 
4605 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4606 	hclge_cmd_setup_basic_desc(&desc, op, false);
4607 	req->int_vector_id = vector_id;
4608 
4609 	i = 0;
4610 	for (node = ring_chain; node; node = node->next) {
4611 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4612 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4613 				HCLGE_INT_TYPE_S,
4614 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4615 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4616 				HCLGE_TQP_ID_S, node->tqp_index);
4617 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4618 				HCLGE_INT_GL_IDX_S,
4619 				hnae3_get_field(node->int_gl_idx,
4620 						HNAE3_RING_GL_IDX_M,
4621 						HNAE3_RING_GL_IDX_S));
4622 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4623 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4624 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4625 			req->vfid = vport->vport_id;
4626 
4627 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
4628 			if (status) {
4629 				dev_err(&hdev->pdev->dev,
4630 					"Map TQP fail, status is %d.\n",
4631 					status);
4632 				return -EIO;
4633 			}
4634 			i = 0;
4635 
4636 			hclge_cmd_setup_basic_desc(&desc,
4637 						   op,
4638 						   false);
4639 			req->int_vector_id = vector_id;
4640 		}
4641 	}
4642 
4643 	if (i > 0) {
4644 		req->int_cause_num = i;
4645 		req->vfid = vport->vport_id;
4646 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
4647 		if (status) {
4648 			dev_err(&hdev->pdev->dev,
4649 				"Map TQP fail, status is %d.\n", status);
4650 			return -EIO;
4651 		}
4652 	}
4653 
4654 	return 0;
4655 }
4656 
4657 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4658 				    struct hnae3_ring_chain_node *ring_chain)
4659 {
4660 	struct hclge_vport *vport = hclge_get_vport(handle);
4661 	struct hclge_dev *hdev = vport->back;
4662 	int vector_id;
4663 
4664 	vector_id = hclge_get_vector_index(hdev, vector);
4665 	if (vector_id < 0) {
4666 		dev_err(&hdev->pdev->dev,
4667 			"failed to get vector index. vector=%d\n", vector);
4668 		return vector_id;
4669 	}
4670 
4671 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4672 }
4673 
4674 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4675 				       struct hnae3_ring_chain_node *ring_chain)
4676 {
4677 	struct hclge_vport *vport = hclge_get_vport(handle);
4678 	struct hclge_dev *hdev = vport->back;
4679 	int vector_id, ret;
4680 
4681 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4682 		return 0;
4683 
4684 	vector_id = hclge_get_vector_index(hdev, vector);
4685 	if (vector_id < 0) {
4686 		dev_err(&handle->pdev->dev,
4687 			"Get vector index fail. ret =%d\n", vector_id);
4688 		return vector_id;
4689 	}
4690 
4691 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4692 	if (ret)
4693 		dev_err(&handle->pdev->dev,
4694 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4695 			vector_id, ret);
4696 
4697 	return ret;
4698 }
4699 
4700 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4701 				      struct hclge_promisc_param *param)
4702 {
4703 	struct hclge_promisc_cfg_cmd *req;
4704 	struct hclge_desc desc;
4705 	int ret;
4706 
4707 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4708 
4709 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
4710 	req->vf_id = param->vf_id;
4711 
4712 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4713 	 * pdev revision(0x20), new revision support them. The
4714 	 * value of this two fields will not return error when driver
4715 	 * send command to fireware in revision(0x20).
4716 	 */
4717 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4718 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4719 
4720 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4721 	if (ret)
4722 		dev_err(&hdev->pdev->dev,
4723 			"Set promisc mode fail, status is %d.\n", ret);
4724 
4725 	return ret;
4726 }
4727 
4728 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4729 				     bool en_uc, bool en_mc, bool en_bc,
4730 				     int vport_id)
4731 {
4732 	if (!param)
4733 		return;
4734 
4735 	memset(param, 0, sizeof(struct hclge_promisc_param));
4736 	if (en_uc)
4737 		param->enable = HCLGE_PROMISC_EN_UC;
4738 	if (en_mc)
4739 		param->enable |= HCLGE_PROMISC_EN_MC;
4740 	if (en_bc)
4741 		param->enable |= HCLGE_PROMISC_EN_BC;
4742 	param->vf_id = vport_id;
4743 }
4744 
4745 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4746 				 bool en_mc_pmc, bool en_bc_pmc)
4747 {
4748 	struct hclge_dev *hdev = vport->back;
4749 	struct hclge_promisc_param param;
4750 
4751 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4752 				 vport->vport_id);
4753 	return hclge_cmd_set_promisc_mode(hdev, &param);
4754 }
4755 
4756 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4757 				  bool en_mc_pmc)
4758 {
4759 	struct hclge_vport *vport = hclge_get_vport(handle);
4760 	bool en_bc_pmc = true;
4761 
4762 	/* For revision 0x20, if broadcast promisc enabled, vlan filter is
4763 	 * always bypassed. So broadcast promisc should be disabled until
4764 	 * user enable promisc mode
4765 	 */
4766 	if (handle->pdev->revision == 0x20)
4767 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4768 
4769 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4770 					    en_bc_pmc);
4771 }
4772 
4773 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4774 {
4775 	struct hclge_get_fd_mode_cmd *req;
4776 	struct hclge_desc desc;
4777 	int ret;
4778 
4779 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4780 
4781 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
4782 
4783 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4784 	if (ret) {
4785 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4786 		return ret;
4787 	}
4788 
4789 	*fd_mode = req->mode;
4790 
4791 	return ret;
4792 }
4793 
4794 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4795 				   u32 *stage1_entry_num,
4796 				   u32 *stage2_entry_num,
4797 				   u16 *stage1_counter_num,
4798 				   u16 *stage2_counter_num)
4799 {
4800 	struct hclge_get_fd_allocation_cmd *req;
4801 	struct hclge_desc desc;
4802 	int ret;
4803 
4804 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4805 
4806 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4807 
4808 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4809 	if (ret) {
4810 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4811 			ret);
4812 		return ret;
4813 	}
4814 
4815 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4816 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4817 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4818 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4819 
4820 	return ret;
4821 }
4822 
4823 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4824 {
4825 	struct hclge_set_fd_key_config_cmd *req;
4826 	struct hclge_fd_key_cfg *stage;
4827 	struct hclge_desc desc;
4828 	int ret;
4829 
4830 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4831 
4832 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4833 	stage = &hdev->fd_cfg.key_cfg[stage_num];
4834 	req->stage = stage_num;
4835 	req->key_select = stage->key_sel;
4836 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4837 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4838 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4839 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4840 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4841 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4842 
4843 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4844 	if (ret)
4845 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4846 
4847 	return ret;
4848 }
4849 
4850 static int hclge_init_fd_config(struct hclge_dev *hdev)
4851 {
4852 #define LOW_2_WORDS		0x03
4853 	struct hclge_fd_key_cfg *key_cfg;
4854 	int ret;
4855 
4856 	if (!hnae3_dev_fd_supported(hdev))
4857 		return 0;
4858 
4859 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4860 	if (ret)
4861 		return ret;
4862 
4863 	switch (hdev->fd_cfg.fd_mode) {
4864 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4865 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4866 		break;
4867 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4868 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4869 		break;
4870 	default:
4871 		dev_err(&hdev->pdev->dev,
4872 			"Unsupported flow director mode %u\n",
4873 			hdev->fd_cfg.fd_mode);
4874 		return -EOPNOTSUPP;
4875 	}
4876 
4877 	hdev->fd_cfg.proto_support =
4878 		TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4879 		UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4880 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4881 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4882 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4883 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4884 	key_cfg->outer_sipv6_word_en = 0;
4885 	key_cfg->outer_dipv6_word_en = 0;
4886 
4887 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4888 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4889 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4890 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4891 
4892 	/* If use max 400bit key, we can support tuples for ether type */
4893 	if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4894 		hdev->fd_cfg.proto_support |= ETHER_FLOW;
4895 		key_cfg->tuple_active |=
4896 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4897 	}
4898 
4899 	/* roce_type is used to filter roce frames
4900 	 * dst_vport is used to specify the rule
4901 	 */
4902 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4903 
4904 	ret = hclge_get_fd_allocation(hdev,
4905 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4906 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4907 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4908 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4909 	if (ret)
4910 		return ret;
4911 
4912 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4913 }
4914 
4915 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4916 				int loc, u8 *key, bool is_add)
4917 {
4918 	struct hclge_fd_tcam_config_1_cmd *req1;
4919 	struct hclge_fd_tcam_config_2_cmd *req2;
4920 	struct hclge_fd_tcam_config_3_cmd *req3;
4921 	struct hclge_desc desc[3];
4922 	int ret;
4923 
4924 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4925 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4926 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4927 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4928 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4929 
4930 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4931 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4932 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4933 
4934 	req1->stage = stage;
4935 	req1->xy_sel = sel_x ? 1 : 0;
4936 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4937 	req1->index = cpu_to_le32(loc);
4938 	req1->entry_vld = sel_x ? is_add : 0;
4939 
4940 	if (key) {
4941 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4942 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4943 		       sizeof(req2->tcam_data));
4944 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4945 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4946 	}
4947 
4948 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
4949 	if (ret)
4950 		dev_err(&hdev->pdev->dev,
4951 			"config tcam key fail, ret=%d\n",
4952 			ret);
4953 
4954 	return ret;
4955 }
4956 
4957 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4958 			      struct hclge_fd_ad_data *action)
4959 {
4960 	struct hclge_fd_ad_config_cmd *req;
4961 	struct hclge_desc desc;
4962 	u64 ad_data = 0;
4963 	int ret;
4964 
4965 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4966 
4967 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
4968 	req->index = cpu_to_le32(loc);
4969 	req->stage = stage;
4970 
4971 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4972 		      action->write_rule_id_to_bd);
4973 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4974 			action->rule_id);
4975 	ad_data <<= 32;
4976 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4977 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4978 		      action->forward_to_direct_queue);
4979 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4980 			action->queue_id);
4981 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4982 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4983 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4984 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4985 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4986 			action->counter_id);
4987 
4988 	req->ad_data = cpu_to_le64(ad_data);
4989 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4990 	if (ret)
4991 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4992 
4993 	return ret;
4994 }
4995 
4996 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4997 				   struct hclge_fd_rule *rule)
4998 {
4999 	u16 tmp_x_s, tmp_y_s;
5000 	u32 tmp_x_l, tmp_y_l;
5001 	int i;
5002 
5003 	if (rule->unused_tuple & tuple_bit)
5004 		return true;
5005 
5006 	switch (tuple_bit) {
5007 	case 0:
5008 		return false;
5009 	case BIT(INNER_DST_MAC):
5010 		for (i = 0; i < ETH_ALEN; i++) {
5011 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5012 			       rule->tuples_mask.dst_mac[i]);
5013 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5014 			       rule->tuples_mask.dst_mac[i]);
5015 		}
5016 
5017 		return true;
5018 	case BIT(INNER_SRC_MAC):
5019 		for (i = 0; i < ETH_ALEN; i++) {
5020 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5021 			       rule->tuples.src_mac[i]);
5022 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5023 			       rule->tuples.src_mac[i]);
5024 		}
5025 
5026 		return true;
5027 	case BIT(INNER_VLAN_TAG_FST):
5028 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5029 		       rule->tuples_mask.vlan_tag1);
5030 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5031 		       rule->tuples_mask.vlan_tag1);
5032 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5033 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5034 
5035 		return true;
5036 	case BIT(INNER_ETH_TYPE):
5037 		calc_x(tmp_x_s, rule->tuples.ether_proto,
5038 		       rule->tuples_mask.ether_proto);
5039 		calc_y(tmp_y_s, rule->tuples.ether_proto,
5040 		       rule->tuples_mask.ether_proto);
5041 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5042 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5043 
5044 		return true;
5045 	case BIT(INNER_IP_TOS):
5046 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5047 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5048 
5049 		return true;
5050 	case BIT(INNER_IP_PROTO):
5051 		calc_x(*key_x, rule->tuples.ip_proto,
5052 		       rule->tuples_mask.ip_proto);
5053 		calc_y(*key_y, rule->tuples.ip_proto,
5054 		       rule->tuples_mask.ip_proto);
5055 
5056 		return true;
5057 	case BIT(INNER_SRC_IP):
5058 		calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5059 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5060 		calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5061 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5062 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5063 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5064 
5065 		return true;
5066 	case BIT(INNER_DST_IP):
5067 		calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5068 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5069 		calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5070 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5071 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5072 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5073 
5074 		return true;
5075 	case BIT(INNER_SRC_PORT):
5076 		calc_x(tmp_x_s, rule->tuples.src_port,
5077 		       rule->tuples_mask.src_port);
5078 		calc_y(tmp_y_s, rule->tuples.src_port,
5079 		       rule->tuples_mask.src_port);
5080 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5081 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5082 
5083 		return true;
5084 	case BIT(INNER_DST_PORT):
5085 		calc_x(tmp_x_s, rule->tuples.dst_port,
5086 		       rule->tuples_mask.dst_port);
5087 		calc_y(tmp_y_s, rule->tuples.dst_port,
5088 		       rule->tuples_mask.dst_port);
5089 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5090 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5091 
5092 		return true;
5093 	default:
5094 		return false;
5095 	}
5096 }
5097 
5098 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5099 				 u8 vf_id, u8 network_port_id)
5100 {
5101 	u32 port_number = 0;
5102 
5103 	if (port_type == HOST_PORT) {
5104 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5105 				pf_id);
5106 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5107 				vf_id);
5108 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5109 	} else {
5110 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5111 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5112 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5113 	}
5114 
5115 	return port_number;
5116 }
5117 
5118 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5119 				       __le32 *key_x, __le32 *key_y,
5120 				       struct hclge_fd_rule *rule)
5121 {
5122 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5123 	u8 cur_pos = 0, tuple_size, shift_bits;
5124 	unsigned int i;
5125 
5126 	for (i = 0; i < MAX_META_DATA; i++) {
5127 		tuple_size = meta_data_key_info[i].key_length;
5128 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5129 
5130 		switch (tuple_bit) {
5131 		case BIT(ROCE_TYPE):
5132 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5133 			cur_pos += tuple_size;
5134 			break;
5135 		case BIT(DST_VPORT):
5136 			port_number = hclge_get_port_number(HOST_PORT, 0,
5137 							    rule->vf_id, 0);
5138 			hnae3_set_field(meta_data,
5139 					GENMASK(cur_pos + tuple_size, cur_pos),
5140 					cur_pos, port_number);
5141 			cur_pos += tuple_size;
5142 			break;
5143 		default:
5144 			break;
5145 		}
5146 	}
5147 
5148 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5149 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5150 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5151 
5152 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5153 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5154 }
5155 
5156 /* A complete key is combined with meta data key and tuple key.
5157  * Meta data key is stored at the MSB region, and tuple key is stored at
5158  * the LSB region, unused bits will be filled 0.
5159  */
5160 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5161 			    struct hclge_fd_rule *rule)
5162 {
5163 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5164 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5165 	u8 *cur_key_x, *cur_key_y;
5166 	unsigned int i;
5167 	int ret, tuple_size;
5168 	u8 meta_data_region;
5169 
5170 	memset(key_x, 0, sizeof(key_x));
5171 	memset(key_y, 0, sizeof(key_y));
5172 	cur_key_x = key_x;
5173 	cur_key_y = key_y;
5174 
5175 	for (i = 0 ; i < MAX_TUPLE; i++) {
5176 		bool tuple_valid;
5177 		u32 check_tuple;
5178 
5179 		tuple_size = tuple_key_info[i].key_length / 8;
5180 		check_tuple = key_cfg->tuple_active & BIT(i);
5181 
5182 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5183 						     cur_key_y, rule);
5184 		if (tuple_valid) {
5185 			cur_key_x += tuple_size;
5186 			cur_key_y += tuple_size;
5187 		}
5188 	}
5189 
5190 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5191 			MAX_META_DATA_LENGTH / 8;
5192 
5193 	hclge_fd_convert_meta_data(key_cfg,
5194 				   (__le32 *)(key_x + meta_data_region),
5195 				   (__le32 *)(key_y + meta_data_region),
5196 				   rule);
5197 
5198 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5199 				   true);
5200 	if (ret) {
5201 		dev_err(&hdev->pdev->dev,
5202 			"fd key_y config fail, loc=%u, ret=%d\n",
5203 			rule->queue_id, ret);
5204 		return ret;
5205 	}
5206 
5207 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5208 				   true);
5209 	if (ret)
5210 		dev_err(&hdev->pdev->dev,
5211 			"fd key_x config fail, loc=%u, ret=%d\n",
5212 			rule->queue_id, ret);
5213 	return ret;
5214 }
5215 
5216 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5217 			       struct hclge_fd_rule *rule)
5218 {
5219 	struct hclge_fd_ad_data ad_data;
5220 
5221 	ad_data.ad_id = rule->location;
5222 
5223 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5224 		ad_data.drop_packet = true;
5225 		ad_data.forward_to_direct_queue = false;
5226 		ad_data.queue_id = 0;
5227 	} else {
5228 		ad_data.drop_packet = false;
5229 		ad_data.forward_to_direct_queue = true;
5230 		ad_data.queue_id = rule->queue_id;
5231 	}
5232 
5233 	ad_data.use_counter = false;
5234 	ad_data.counter_id = 0;
5235 
5236 	ad_data.use_next_stage = false;
5237 	ad_data.next_input_key = 0;
5238 
5239 	ad_data.write_rule_id_to_bd = true;
5240 	ad_data.rule_id = rule->location;
5241 
5242 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5243 }
5244 
5245 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5246 			       struct ethtool_rx_flow_spec *fs, u32 *unused)
5247 {
5248 	struct ethtool_tcpip4_spec *tcp_ip4_spec;
5249 	struct ethtool_usrip4_spec *usr_ip4_spec;
5250 	struct ethtool_tcpip6_spec *tcp_ip6_spec;
5251 	struct ethtool_usrip6_spec *usr_ip6_spec;
5252 	struct ethhdr *ether_spec;
5253 
5254 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5255 		return -EINVAL;
5256 
5257 	if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5258 		return -EOPNOTSUPP;
5259 
5260 	if ((fs->flow_type & FLOW_EXT) &&
5261 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5262 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5263 		return -EOPNOTSUPP;
5264 	}
5265 
5266 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5267 	case SCTP_V4_FLOW:
5268 	case TCP_V4_FLOW:
5269 	case UDP_V4_FLOW:
5270 		tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5271 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5272 
5273 		if (!tcp_ip4_spec->ip4src)
5274 			*unused |= BIT(INNER_SRC_IP);
5275 
5276 		if (!tcp_ip4_spec->ip4dst)
5277 			*unused |= BIT(INNER_DST_IP);
5278 
5279 		if (!tcp_ip4_spec->psrc)
5280 			*unused |= BIT(INNER_SRC_PORT);
5281 
5282 		if (!tcp_ip4_spec->pdst)
5283 			*unused |= BIT(INNER_DST_PORT);
5284 
5285 		if (!tcp_ip4_spec->tos)
5286 			*unused |= BIT(INNER_IP_TOS);
5287 
5288 		break;
5289 	case IP_USER_FLOW:
5290 		usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5291 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5292 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5293 
5294 		if (!usr_ip4_spec->ip4src)
5295 			*unused |= BIT(INNER_SRC_IP);
5296 
5297 		if (!usr_ip4_spec->ip4dst)
5298 			*unused |= BIT(INNER_DST_IP);
5299 
5300 		if (!usr_ip4_spec->tos)
5301 			*unused |= BIT(INNER_IP_TOS);
5302 
5303 		if (!usr_ip4_spec->proto)
5304 			*unused |= BIT(INNER_IP_PROTO);
5305 
5306 		if (usr_ip4_spec->l4_4_bytes)
5307 			return -EOPNOTSUPP;
5308 
5309 		if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5310 			return -EOPNOTSUPP;
5311 
5312 		break;
5313 	case SCTP_V6_FLOW:
5314 	case TCP_V6_FLOW:
5315 	case UDP_V6_FLOW:
5316 		tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5317 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5318 			BIT(INNER_IP_TOS);
5319 
5320 		/* check whether src/dst ip address used */
5321 		if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5322 		    !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5323 			*unused |= BIT(INNER_SRC_IP);
5324 
5325 		if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5326 		    !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5327 			*unused |= BIT(INNER_DST_IP);
5328 
5329 		if (!tcp_ip6_spec->psrc)
5330 			*unused |= BIT(INNER_SRC_PORT);
5331 
5332 		if (!tcp_ip6_spec->pdst)
5333 			*unused |= BIT(INNER_DST_PORT);
5334 
5335 		if (tcp_ip6_spec->tclass)
5336 			return -EOPNOTSUPP;
5337 
5338 		break;
5339 	case IPV6_USER_FLOW:
5340 		usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5341 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5342 			BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5343 			BIT(INNER_DST_PORT);
5344 
5345 		/* check whether src/dst ip address used */
5346 		if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5347 		    !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5348 			*unused |= BIT(INNER_SRC_IP);
5349 
5350 		if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5351 		    !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5352 			*unused |= BIT(INNER_DST_IP);
5353 
5354 		if (!usr_ip6_spec->l4_proto)
5355 			*unused |= BIT(INNER_IP_PROTO);
5356 
5357 		if (usr_ip6_spec->tclass)
5358 			return -EOPNOTSUPP;
5359 
5360 		if (usr_ip6_spec->l4_4_bytes)
5361 			return -EOPNOTSUPP;
5362 
5363 		break;
5364 	case ETHER_FLOW:
5365 		ether_spec = &fs->h_u.ether_spec;
5366 		*unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5367 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5368 			BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5369 
5370 		if (is_zero_ether_addr(ether_spec->h_source))
5371 			*unused |= BIT(INNER_SRC_MAC);
5372 
5373 		if (is_zero_ether_addr(ether_spec->h_dest))
5374 			*unused |= BIT(INNER_DST_MAC);
5375 
5376 		if (!ether_spec->h_proto)
5377 			*unused |= BIT(INNER_ETH_TYPE);
5378 
5379 		break;
5380 	default:
5381 		return -EOPNOTSUPP;
5382 	}
5383 
5384 	if ((fs->flow_type & FLOW_EXT)) {
5385 		if (fs->h_ext.vlan_etype)
5386 			return -EOPNOTSUPP;
5387 		if (!fs->h_ext.vlan_tci)
5388 			*unused |= BIT(INNER_VLAN_TAG_FST);
5389 
5390 		if (fs->m_ext.vlan_tci) {
5391 			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5392 				return -EINVAL;
5393 		}
5394 	} else {
5395 		*unused |= BIT(INNER_VLAN_TAG_FST);
5396 	}
5397 
5398 	if (fs->flow_type & FLOW_MAC_EXT) {
5399 		if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5400 			return -EOPNOTSUPP;
5401 
5402 		if (is_zero_ether_addr(fs->h_ext.h_dest))
5403 			*unused |= BIT(INNER_DST_MAC);
5404 		else
5405 			*unused &= ~(BIT(INNER_DST_MAC));
5406 	}
5407 
5408 	return 0;
5409 }
5410 
5411 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5412 {
5413 	struct hclge_fd_rule *rule = NULL;
5414 	struct hlist_node *node2;
5415 
5416 	spin_lock_bh(&hdev->fd_rule_lock);
5417 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5418 		if (rule->location >= location)
5419 			break;
5420 	}
5421 
5422 	spin_unlock_bh(&hdev->fd_rule_lock);
5423 
5424 	return  rule && rule->location == location;
5425 }
5426 
5427 /* make sure being called after lock up with fd_rule_lock */
5428 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5429 				     struct hclge_fd_rule *new_rule,
5430 				     u16 location,
5431 				     bool is_add)
5432 {
5433 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
5434 	struct hlist_node *node2;
5435 
5436 	if (is_add && !new_rule)
5437 		return -EINVAL;
5438 
5439 	hlist_for_each_entry_safe(rule, node2,
5440 				  &hdev->fd_rule_list, rule_node) {
5441 		if (rule->location >= location)
5442 			break;
5443 		parent = rule;
5444 	}
5445 
5446 	if (rule && rule->location == location) {
5447 		hlist_del(&rule->rule_node);
5448 		kfree(rule);
5449 		hdev->hclge_fd_rule_num--;
5450 
5451 		if (!is_add) {
5452 			if (!hdev->hclge_fd_rule_num)
5453 				hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5454 			clear_bit(location, hdev->fd_bmap);
5455 
5456 			return 0;
5457 		}
5458 	} else if (!is_add) {
5459 		dev_err(&hdev->pdev->dev,
5460 			"delete fail, rule %u is inexistent\n",
5461 			location);
5462 		return -EINVAL;
5463 	}
5464 
5465 	INIT_HLIST_NODE(&new_rule->rule_node);
5466 
5467 	if (parent)
5468 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5469 	else
5470 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5471 
5472 	set_bit(location, hdev->fd_bmap);
5473 	hdev->hclge_fd_rule_num++;
5474 	hdev->fd_active_type = new_rule->rule_type;
5475 
5476 	return 0;
5477 }
5478 
5479 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5480 			      struct ethtool_rx_flow_spec *fs,
5481 			      struct hclge_fd_rule *rule)
5482 {
5483 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5484 
5485 	switch (flow_type) {
5486 	case SCTP_V4_FLOW:
5487 	case TCP_V4_FLOW:
5488 	case UDP_V4_FLOW:
5489 		rule->tuples.src_ip[IPV4_INDEX] =
5490 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5491 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5492 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5493 
5494 		rule->tuples.dst_ip[IPV4_INDEX] =
5495 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5496 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5497 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5498 
5499 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5500 		rule->tuples_mask.src_port =
5501 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5502 
5503 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5504 		rule->tuples_mask.dst_port =
5505 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5506 
5507 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5508 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5509 
5510 		rule->tuples.ether_proto = ETH_P_IP;
5511 		rule->tuples_mask.ether_proto = 0xFFFF;
5512 
5513 		break;
5514 	case IP_USER_FLOW:
5515 		rule->tuples.src_ip[IPV4_INDEX] =
5516 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5517 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5518 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5519 
5520 		rule->tuples.dst_ip[IPV4_INDEX] =
5521 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5522 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5523 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5524 
5525 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5526 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5527 
5528 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5529 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5530 
5531 		rule->tuples.ether_proto = ETH_P_IP;
5532 		rule->tuples_mask.ether_proto = 0xFFFF;
5533 
5534 		break;
5535 	case SCTP_V6_FLOW:
5536 	case TCP_V6_FLOW:
5537 	case UDP_V6_FLOW:
5538 		be32_to_cpu_array(rule->tuples.src_ip,
5539 				  fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5540 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5541 				  fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5542 
5543 		be32_to_cpu_array(rule->tuples.dst_ip,
5544 				  fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5545 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5546 				  fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5547 
5548 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5549 		rule->tuples_mask.src_port =
5550 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5551 
5552 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5553 		rule->tuples_mask.dst_port =
5554 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5555 
5556 		rule->tuples.ether_proto = ETH_P_IPV6;
5557 		rule->tuples_mask.ether_proto = 0xFFFF;
5558 
5559 		break;
5560 	case IPV6_USER_FLOW:
5561 		be32_to_cpu_array(rule->tuples.src_ip,
5562 				  fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5563 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5564 				  fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5565 
5566 		be32_to_cpu_array(rule->tuples.dst_ip,
5567 				  fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5568 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5569 				  fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5570 
5571 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5572 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5573 
5574 		rule->tuples.ether_proto = ETH_P_IPV6;
5575 		rule->tuples_mask.ether_proto = 0xFFFF;
5576 
5577 		break;
5578 	case ETHER_FLOW:
5579 		ether_addr_copy(rule->tuples.src_mac,
5580 				fs->h_u.ether_spec.h_source);
5581 		ether_addr_copy(rule->tuples_mask.src_mac,
5582 				fs->m_u.ether_spec.h_source);
5583 
5584 		ether_addr_copy(rule->tuples.dst_mac,
5585 				fs->h_u.ether_spec.h_dest);
5586 		ether_addr_copy(rule->tuples_mask.dst_mac,
5587 				fs->m_u.ether_spec.h_dest);
5588 
5589 		rule->tuples.ether_proto =
5590 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
5591 		rule->tuples_mask.ether_proto =
5592 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
5593 
5594 		break;
5595 	default:
5596 		return -EOPNOTSUPP;
5597 	}
5598 
5599 	switch (flow_type) {
5600 	case SCTP_V4_FLOW:
5601 	case SCTP_V6_FLOW:
5602 		rule->tuples.ip_proto = IPPROTO_SCTP;
5603 		rule->tuples_mask.ip_proto = 0xFF;
5604 		break;
5605 	case TCP_V4_FLOW:
5606 	case TCP_V6_FLOW:
5607 		rule->tuples.ip_proto = IPPROTO_TCP;
5608 		rule->tuples_mask.ip_proto = 0xFF;
5609 		break;
5610 	case UDP_V4_FLOW:
5611 	case UDP_V6_FLOW:
5612 		rule->tuples.ip_proto = IPPROTO_UDP;
5613 		rule->tuples_mask.ip_proto = 0xFF;
5614 		break;
5615 	default:
5616 		break;
5617 	}
5618 
5619 	if ((fs->flow_type & FLOW_EXT)) {
5620 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5621 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5622 	}
5623 
5624 	if (fs->flow_type & FLOW_MAC_EXT) {
5625 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5626 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5627 	}
5628 
5629 	return 0;
5630 }
5631 
5632 /* make sure being called after lock up with fd_rule_lock */
5633 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5634 				struct hclge_fd_rule *rule)
5635 {
5636 	int ret;
5637 
5638 	if (!rule) {
5639 		dev_err(&hdev->pdev->dev,
5640 			"The flow director rule is NULL\n");
5641 		return -EINVAL;
5642 	}
5643 
5644 	/* it will never fail here, so needn't to check return value */
5645 	hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5646 
5647 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5648 	if (ret)
5649 		goto clear_rule;
5650 
5651 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5652 	if (ret)
5653 		goto clear_rule;
5654 
5655 	return 0;
5656 
5657 clear_rule:
5658 	hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5659 	return ret;
5660 }
5661 
5662 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5663 			      struct ethtool_rxnfc *cmd)
5664 {
5665 	struct hclge_vport *vport = hclge_get_vport(handle);
5666 	struct hclge_dev *hdev = vport->back;
5667 	u16 dst_vport_id = 0, q_index = 0;
5668 	struct ethtool_rx_flow_spec *fs;
5669 	struct hclge_fd_rule *rule;
5670 	u32 unused = 0;
5671 	u8 action;
5672 	int ret;
5673 
5674 	if (!hnae3_dev_fd_supported(hdev))
5675 		return -EOPNOTSUPP;
5676 
5677 	if (!hdev->fd_en) {
5678 		dev_warn(&hdev->pdev->dev,
5679 			 "Please enable flow director first\n");
5680 		return -EOPNOTSUPP;
5681 	}
5682 
5683 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5684 
5685 	ret = hclge_fd_check_spec(hdev, fs, &unused);
5686 	if (ret) {
5687 		dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5688 		return ret;
5689 	}
5690 
5691 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5692 		action = HCLGE_FD_ACTION_DROP_PACKET;
5693 	} else {
5694 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5695 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5696 		u16 tqps;
5697 
5698 		if (vf > hdev->num_req_vfs) {
5699 			dev_err(&hdev->pdev->dev,
5700 				"Error: vf id (%u) > max vf num (%u)\n",
5701 				vf, hdev->num_req_vfs);
5702 			return -EINVAL;
5703 		}
5704 
5705 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5706 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5707 
5708 		if (ring >= tqps) {
5709 			dev_err(&hdev->pdev->dev,
5710 				"Error: queue id (%u) > max tqp num (%u)\n",
5711 				ring, tqps - 1);
5712 			return -EINVAL;
5713 		}
5714 
5715 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5716 		q_index = ring;
5717 	}
5718 
5719 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5720 	if (!rule)
5721 		return -ENOMEM;
5722 
5723 	ret = hclge_fd_get_tuple(hdev, fs, rule);
5724 	if (ret) {
5725 		kfree(rule);
5726 		return ret;
5727 	}
5728 
5729 	rule->flow_type = fs->flow_type;
5730 
5731 	rule->location = fs->location;
5732 	rule->unused_tuple = unused;
5733 	rule->vf_id = dst_vport_id;
5734 	rule->queue_id = q_index;
5735 	rule->action = action;
5736 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
5737 
5738 	/* to avoid rule conflict, when user configure rule by ethtool,
5739 	 * we need to clear all arfs rules
5740 	 */
5741 	hclge_clear_arfs_rules(handle);
5742 
5743 	spin_lock_bh(&hdev->fd_rule_lock);
5744 	ret = hclge_fd_config_rule(hdev, rule);
5745 
5746 	spin_unlock_bh(&hdev->fd_rule_lock);
5747 
5748 	return ret;
5749 }
5750 
5751 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5752 			      struct ethtool_rxnfc *cmd)
5753 {
5754 	struct hclge_vport *vport = hclge_get_vport(handle);
5755 	struct hclge_dev *hdev = vport->back;
5756 	struct ethtool_rx_flow_spec *fs;
5757 	int ret;
5758 
5759 	if (!hnae3_dev_fd_supported(hdev))
5760 		return -EOPNOTSUPP;
5761 
5762 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5763 
5764 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5765 		return -EINVAL;
5766 
5767 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
5768 		dev_err(&hdev->pdev->dev,
5769 			"Delete fail, rule %u is inexistent\n", fs->location);
5770 		return -ENOENT;
5771 	}
5772 
5773 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5774 				   NULL, false);
5775 	if (ret)
5776 		return ret;
5777 
5778 	spin_lock_bh(&hdev->fd_rule_lock);
5779 	ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5780 
5781 	spin_unlock_bh(&hdev->fd_rule_lock);
5782 
5783 	return ret;
5784 }
5785 
5786 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5787 				     bool clear_list)
5788 {
5789 	struct hclge_vport *vport = hclge_get_vport(handle);
5790 	struct hclge_dev *hdev = vport->back;
5791 	struct hclge_fd_rule *rule;
5792 	struct hlist_node *node;
5793 	u16 location;
5794 
5795 	if (!hnae3_dev_fd_supported(hdev))
5796 		return;
5797 
5798 	spin_lock_bh(&hdev->fd_rule_lock);
5799 	for_each_set_bit(location, hdev->fd_bmap,
5800 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5801 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5802 				     NULL, false);
5803 
5804 	if (clear_list) {
5805 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5806 					  rule_node) {
5807 			hlist_del(&rule->rule_node);
5808 			kfree(rule);
5809 		}
5810 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5811 		hdev->hclge_fd_rule_num = 0;
5812 		bitmap_zero(hdev->fd_bmap,
5813 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5814 	}
5815 
5816 	spin_unlock_bh(&hdev->fd_rule_lock);
5817 }
5818 
5819 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5820 {
5821 	struct hclge_vport *vport = hclge_get_vport(handle);
5822 	struct hclge_dev *hdev = vport->back;
5823 	struct hclge_fd_rule *rule;
5824 	struct hlist_node *node;
5825 	int ret;
5826 
5827 	/* Return ok here, because reset error handling will check this
5828 	 * return value. If error is returned here, the reset process will
5829 	 * fail.
5830 	 */
5831 	if (!hnae3_dev_fd_supported(hdev))
5832 		return 0;
5833 
5834 	/* if fd is disabled, should not restore it when reset */
5835 	if (!hdev->fd_en)
5836 		return 0;
5837 
5838 	spin_lock_bh(&hdev->fd_rule_lock);
5839 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5840 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5841 		if (!ret)
5842 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5843 
5844 		if (ret) {
5845 			dev_warn(&hdev->pdev->dev,
5846 				 "Restore rule %u failed, remove it\n",
5847 				 rule->location);
5848 			clear_bit(rule->location, hdev->fd_bmap);
5849 			hlist_del(&rule->rule_node);
5850 			kfree(rule);
5851 			hdev->hclge_fd_rule_num--;
5852 		}
5853 	}
5854 
5855 	if (hdev->hclge_fd_rule_num)
5856 		hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5857 
5858 	spin_unlock_bh(&hdev->fd_rule_lock);
5859 
5860 	return 0;
5861 }
5862 
5863 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5864 				 struct ethtool_rxnfc *cmd)
5865 {
5866 	struct hclge_vport *vport = hclge_get_vport(handle);
5867 	struct hclge_dev *hdev = vport->back;
5868 
5869 	if (!hnae3_dev_fd_supported(hdev))
5870 		return -EOPNOTSUPP;
5871 
5872 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
5873 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5874 
5875 	return 0;
5876 }
5877 
5878 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5879 				  struct ethtool_rxnfc *cmd)
5880 {
5881 	struct hclge_vport *vport = hclge_get_vport(handle);
5882 	struct hclge_fd_rule *rule = NULL;
5883 	struct hclge_dev *hdev = vport->back;
5884 	struct ethtool_rx_flow_spec *fs;
5885 	struct hlist_node *node2;
5886 
5887 	if (!hnae3_dev_fd_supported(hdev))
5888 		return -EOPNOTSUPP;
5889 
5890 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5891 
5892 	spin_lock_bh(&hdev->fd_rule_lock);
5893 
5894 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5895 		if (rule->location >= fs->location)
5896 			break;
5897 	}
5898 
5899 	if (!rule || fs->location != rule->location) {
5900 		spin_unlock_bh(&hdev->fd_rule_lock);
5901 
5902 		return -ENOENT;
5903 	}
5904 
5905 	fs->flow_type = rule->flow_type;
5906 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5907 	case SCTP_V4_FLOW:
5908 	case TCP_V4_FLOW:
5909 	case UDP_V4_FLOW:
5910 		fs->h_u.tcp_ip4_spec.ip4src =
5911 				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5912 		fs->m_u.tcp_ip4_spec.ip4src =
5913 			rule->unused_tuple & BIT(INNER_SRC_IP) ?
5914 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5915 
5916 		fs->h_u.tcp_ip4_spec.ip4dst =
5917 				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5918 		fs->m_u.tcp_ip4_spec.ip4dst =
5919 			rule->unused_tuple & BIT(INNER_DST_IP) ?
5920 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5921 
5922 		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5923 		fs->m_u.tcp_ip4_spec.psrc =
5924 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5925 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5926 
5927 		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5928 		fs->m_u.tcp_ip4_spec.pdst =
5929 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5930 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5931 
5932 		fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5933 		fs->m_u.tcp_ip4_spec.tos =
5934 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5935 				0 : rule->tuples_mask.ip_tos;
5936 
5937 		break;
5938 	case IP_USER_FLOW:
5939 		fs->h_u.usr_ip4_spec.ip4src =
5940 				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5941 		fs->m_u.tcp_ip4_spec.ip4src =
5942 			rule->unused_tuple & BIT(INNER_SRC_IP) ?
5943 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5944 
5945 		fs->h_u.usr_ip4_spec.ip4dst =
5946 				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5947 		fs->m_u.usr_ip4_spec.ip4dst =
5948 			rule->unused_tuple & BIT(INNER_DST_IP) ?
5949 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5950 
5951 		fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5952 		fs->m_u.usr_ip4_spec.tos =
5953 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5954 				0 : rule->tuples_mask.ip_tos;
5955 
5956 		fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5957 		fs->m_u.usr_ip4_spec.proto =
5958 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5959 				0 : rule->tuples_mask.ip_proto;
5960 
5961 		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5962 
5963 		break;
5964 	case SCTP_V6_FLOW:
5965 	case TCP_V6_FLOW:
5966 	case UDP_V6_FLOW:
5967 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5968 				  rule->tuples.src_ip, IPV6_SIZE);
5969 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5970 			memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5971 			       sizeof(int) * IPV6_SIZE);
5972 		else
5973 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5974 					  rule->tuples_mask.src_ip, IPV6_SIZE);
5975 
5976 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5977 				  rule->tuples.dst_ip, IPV6_SIZE);
5978 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5979 			memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5980 			       sizeof(int) * IPV6_SIZE);
5981 		else
5982 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5983 					  rule->tuples_mask.dst_ip, IPV6_SIZE);
5984 
5985 		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5986 		fs->m_u.tcp_ip6_spec.psrc =
5987 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5988 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5989 
5990 		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5991 		fs->m_u.tcp_ip6_spec.pdst =
5992 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5993 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5994 
5995 		break;
5996 	case IPV6_USER_FLOW:
5997 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5998 				  rule->tuples.src_ip, IPV6_SIZE);
5999 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
6000 			memset(fs->m_u.usr_ip6_spec.ip6src, 0,
6001 			       sizeof(int) * IPV6_SIZE);
6002 		else
6003 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
6004 					  rule->tuples_mask.src_ip, IPV6_SIZE);
6005 
6006 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
6007 				  rule->tuples.dst_ip, IPV6_SIZE);
6008 		if (rule->unused_tuple & BIT(INNER_DST_IP))
6009 			memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
6010 			       sizeof(int) * IPV6_SIZE);
6011 		else
6012 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
6013 					  rule->tuples_mask.dst_ip, IPV6_SIZE);
6014 
6015 		fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
6016 		fs->m_u.usr_ip6_spec.l4_proto =
6017 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6018 				0 : rule->tuples_mask.ip_proto;
6019 
6020 		break;
6021 	case ETHER_FLOW:
6022 		ether_addr_copy(fs->h_u.ether_spec.h_source,
6023 				rule->tuples.src_mac);
6024 		if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6025 			eth_zero_addr(fs->m_u.ether_spec.h_source);
6026 		else
6027 			ether_addr_copy(fs->m_u.ether_spec.h_source,
6028 					rule->tuples_mask.src_mac);
6029 
6030 		ether_addr_copy(fs->h_u.ether_spec.h_dest,
6031 				rule->tuples.dst_mac);
6032 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6033 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6034 		else
6035 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6036 					rule->tuples_mask.dst_mac);
6037 
6038 		fs->h_u.ether_spec.h_proto =
6039 				cpu_to_be16(rule->tuples.ether_proto);
6040 		fs->m_u.ether_spec.h_proto =
6041 				rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6042 				0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6043 
6044 		break;
6045 	default:
6046 		spin_unlock_bh(&hdev->fd_rule_lock);
6047 		return -EOPNOTSUPP;
6048 	}
6049 
6050 	if (fs->flow_type & FLOW_EXT) {
6051 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6052 		fs->m_ext.vlan_tci =
6053 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6054 				cpu_to_be16(VLAN_VID_MASK) :
6055 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
6056 	}
6057 
6058 	if (fs->flow_type & FLOW_MAC_EXT) {
6059 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6060 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6061 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6062 		else
6063 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6064 					rule->tuples_mask.dst_mac);
6065 	}
6066 
6067 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6068 		fs->ring_cookie = RX_CLS_FLOW_DISC;
6069 	} else {
6070 		u64 vf_id;
6071 
6072 		fs->ring_cookie = rule->queue_id;
6073 		vf_id = rule->vf_id;
6074 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6075 		fs->ring_cookie |= vf_id;
6076 	}
6077 
6078 	spin_unlock_bh(&hdev->fd_rule_lock);
6079 
6080 	return 0;
6081 }
6082 
6083 static int hclge_get_all_rules(struct hnae3_handle *handle,
6084 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
6085 {
6086 	struct hclge_vport *vport = hclge_get_vport(handle);
6087 	struct hclge_dev *hdev = vport->back;
6088 	struct hclge_fd_rule *rule;
6089 	struct hlist_node *node2;
6090 	int cnt = 0;
6091 
6092 	if (!hnae3_dev_fd_supported(hdev))
6093 		return -EOPNOTSUPP;
6094 
6095 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6096 
6097 	spin_lock_bh(&hdev->fd_rule_lock);
6098 	hlist_for_each_entry_safe(rule, node2,
6099 				  &hdev->fd_rule_list, rule_node) {
6100 		if (cnt == cmd->rule_cnt) {
6101 			spin_unlock_bh(&hdev->fd_rule_lock);
6102 			return -EMSGSIZE;
6103 		}
6104 
6105 		rule_locs[cnt] = rule->location;
6106 		cnt++;
6107 	}
6108 
6109 	spin_unlock_bh(&hdev->fd_rule_lock);
6110 
6111 	cmd->rule_cnt = cnt;
6112 
6113 	return 0;
6114 }
6115 
6116 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6117 				     struct hclge_fd_rule_tuples *tuples)
6118 {
6119 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6120 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6121 
6122 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6123 	tuples->ip_proto = fkeys->basic.ip_proto;
6124 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6125 
6126 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6127 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6128 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6129 	} else {
6130 		int i;
6131 
6132 		for (i = 0; i < IPV6_SIZE; i++) {
6133 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6134 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6135 		}
6136 	}
6137 }
6138 
6139 /* traverse all rules, check whether an existed rule has the same tuples */
6140 static struct hclge_fd_rule *
6141 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6142 			  const struct hclge_fd_rule_tuples *tuples)
6143 {
6144 	struct hclge_fd_rule *rule = NULL;
6145 	struct hlist_node *node;
6146 
6147 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6148 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6149 			return rule;
6150 	}
6151 
6152 	return NULL;
6153 }
6154 
6155 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6156 				     struct hclge_fd_rule *rule)
6157 {
6158 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6159 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6160 			     BIT(INNER_SRC_PORT);
6161 	rule->action = 0;
6162 	rule->vf_id = 0;
6163 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6164 	if (tuples->ether_proto == ETH_P_IP) {
6165 		if (tuples->ip_proto == IPPROTO_TCP)
6166 			rule->flow_type = TCP_V4_FLOW;
6167 		else
6168 			rule->flow_type = UDP_V4_FLOW;
6169 	} else {
6170 		if (tuples->ip_proto == IPPROTO_TCP)
6171 			rule->flow_type = TCP_V6_FLOW;
6172 		else
6173 			rule->flow_type = UDP_V6_FLOW;
6174 	}
6175 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6176 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6177 }
6178 
6179 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6180 				      u16 flow_id, struct flow_keys *fkeys)
6181 {
6182 	struct hclge_vport *vport = hclge_get_vport(handle);
6183 	struct hclge_fd_rule_tuples new_tuples;
6184 	struct hclge_dev *hdev = vport->back;
6185 	struct hclge_fd_rule *rule;
6186 	u16 tmp_queue_id;
6187 	u16 bit_id;
6188 	int ret;
6189 
6190 	if (!hnae3_dev_fd_supported(hdev))
6191 		return -EOPNOTSUPP;
6192 
6193 	memset(&new_tuples, 0, sizeof(new_tuples));
6194 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6195 
6196 	spin_lock_bh(&hdev->fd_rule_lock);
6197 
6198 	/* when there is already fd rule existed add by user,
6199 	 * arfs should not work
6200 	 */
6201 	if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6202 		spin_unlock_bh(&hdev->fd_rule_lock);
6203 
6204 		return -EOPNOTSUPP;
6205 	}
6206 
6207 	/* check is there flow director filter existed for this flow,
6208 	 * if not, create a new filter for it;
6209 	 * if filter exist with different queue id, modify the filter;
6210 	 * if filter exist with same queue id, do nothing
6211 	 */
6212 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6213 	if (!rule) {
6214 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6215 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6216 			spin_unlock_bh(&hdev->fd_rule_lock);
6217 
6218 			return -ENOSPC;
6219 		}
6220 
6221 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6222 		if (!rule) {
6223 			spin_unlock_bh(&hdev->fd_rule_lock);
6224 
6225 			return -ENOMEM;
6226 		}
6227 
6228 		set_bit(bit_id, hdev->fd_bmap);
6229 		rule->location = bit_id;
6230 		rule->flow_id = flow_id;
6231 		rule->queue_id = queue_id;
6232 		hclge_fd_build_arfs_rule(&new_tuples, rule);
6233 		ret = hclge_fd_config_rule(hdev, rule);
6234 
6235 		spin_unlock_bh(&hdev->fd_rule_lock);
6236 
6237 		if (ret)
6238 			return ret;
6239 
6240 		return rule->location;
6241 	}
6242 
6243 	spin_unlock_bh(&hdev->fd_rule_lock);
6244 
6245 	if (rule->queue_id == queue_id)
6246 		return rule->location;
6247 
6248 	tmp_queue_id = rule->queue_id;
6249 	rule->queue_id = queue_id;
6250 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6251 	if (ret) {
6252 		rule->queue_id = tmp_queue_id;
6253 		return ret;
6254 	}
6255 
6256 	return rule->location;
6257 }
6258 
6259 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6260 {
6261 #ifdef CONFIG_RFS_ACCEL
6262 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6263 	struct hclge_fd_rule *rule;
6264 	struct hlist_node *node;
6265 	HLIST_HEAD(del_list);
6266 
6267 	spin_lock_bh(&hdev->fd_rule_lock);
6268 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6269 		spin_unlock_bh(&hdev->fd_rule_lock);
6270 		return;
6271 	}
6272 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6273 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6274 					rule->flow_id, rule->location)) {
6275 			hlist_del_init(&rule->rule_node);
6276 			hlist_add_head(&rule->rule_node, &del_list);
6277 			hdev->hclge_fd_rule_num--;
6278 			clear_bit(rule->location, hdev->fd_bmap);
6279 		}
6280 	}
6281 	spin_unlock_bh(&hdev->fd_rule_lock);
6282 
6283 	hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6284 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6285 				     rule->location, NULL, false);
6286 		kfree(rule);
6287 	}
6288 #endif
6289 }
6290 
6291 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6292 {
6293 #ifdef CONFIG_RFS_ACCEL
6294 	struct hclge_vport *vport = hclge_get_vport(handle);
6295 	struct hclge_dev *hdev = vport->back;
6296 
6297 	if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6298 		hclge_del_all_fd_entries(handle, true);
6299 #endif
6300 }
6301 
6302 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6303 {
6304 	struct hclge_vport *vport = hclge_get_vport(handle);
6305 	struct hclge_dev *hdev = vport->back;
6306 
6307 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6308 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6309 }
6310 
6311 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6312 {
6313 	struct hclge_vport *vport = hclge_get_vport(handle);
6314 	struct hclge_dev *hdev = vport->back;
6315 
6316 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6317 }
6318 
6319 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6320 {
6321 	struct hclge_vport *vport = hclge_get_vport(handle);
6322 	struct hclge_dev *hdev = vport->back;
6323 
6324 	return hdev->rst_stats.hw_reset_done_cnt;
6325 }
6326 
6327 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6328 {
6329 	struct hclge_vport *vport = hclge_get_vport(handle);
6330 	struct hclge_dev *hdev = vport->back;
6331 	bool clear;
6332 
6333 	hdev->fd_en = enable;
6334 	clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6335 	if (!enable)
6336 		hclge_del_all_fd_entries(handle, clear);
6337 	else
6338 		hclge_restore_fd_entries(handle);
6339 }
6340 
6341 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6342 {
6343 	struct hclge_desc desc;
6344 	struct hclge_config_mac_mode_cmd *req =
6345 		(struct hclge_config_mac_mode_cmd *)desc.data;
6346 	u32 loop_en = 0;
6347 	int ret;
6348 
6349 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6350 
6351 	if (enable) {
6352 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6353 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6354 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6355 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6356 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6357 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6358 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6359 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6360 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6361 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6362 	}
6363 
6364 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6365 
6366 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6367 	if (ret)
6368 		dev_err(&hdev->pdev->dev,
6369 			"mac enable fail, ret =%d.\n", ret);
6370 }
6371 
6372 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6373 				     u8 switch_param, u8 param_mask)
6374 {
6375 	struct hclge_mac_vlan_switch_cmd *req;
6376 	struct hclge_desc desc;
6377 	u32 func_id;
6378 	int ret;
6379 
6380 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6381 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6382 
6383 	/* read current config parameter */
6384 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6385 				   true);
6386 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6387 	req->func_id = cpu_to_le32(func_id);
6388 
6389 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6390 	if (ret) {
6391 		dev_err(&hdev->pdev->dev,
6392 			"read mac vlan switch parameter fail, ret = %d\n", ret);
6393 		return ret;
6394 	}
6395 
6396 	/* modify and write new config parameter */
6397 	hclge_cmd_reuse_desc(&desc, false);
6398 	req->switch_param = (req->switch_param & param_mask) | switch_param;
6399 	req->param_mask = param_mask;
6400 
6401 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6402 	if (ret)
6403 		dev_err(&hdev->pdev->dev,
6404 			"set mac vlan switch parameter fail, ret = %d\n", ret);
6405 	return ret;
6406 }
6407 
6408 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6409 				       int link_ret)
6410 {
6411 #define HCLGE_PHY_LINK_STATUS_NUM  200
6412 
6413 	struct phy_device *phydev = hdev->hw.mac.phydev;
6414 	int i = 0;
6415 	int ret;
6416 
6417 	do {
6418 		ret = phy_read_status(phydev);
6419 		if (ret) {
6420 			dev_err(&hdev->pdev->dev,
6421 				"phy update link status fail, ret = %d\n", ret);
6422 			return;
6423 		}
6424 
6425 		if (phydev->link == link_ret)
6426 			break;
6427 
6428 		msleep(HCLGE_LINK_STATUS_MS);
6429 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6430 }
6431 
6432 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6433 {
6434 #define HCLGE_MAC_LINK_STATUS_NUM  100
6435 
6436 	int i = 0;
6437 	int ret;
6438 
6439 	do {
6440 		ret = hclge_get_mac_link_status(hdev);
6441 		if (ret < 0)
6442 			return ret;
6443 		else if (ret == link_ret)
6444 			return 0;
6445 
6446 		msleep(HCLGE_LINK_STATUS_MS);
6447 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6448 	return -EBUSY;
6449 }
6450 
6451 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6452 					  bool is_phy)
6453 {
6454 #define HCLGE_LINK_STATUS_DOWN 0
6455 #define HCLGE_LINK_STATUS_UP   1
6456 
6457 	int link_ret;
6458 
6459 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6460 
6461 	if (is_phy)
6462 		hclge_phy_link_status_wait(hdev, link_ret);
6463 
6464 	return hclge_mac_link_status_wait(hdev, link_ret);
6465 }
6466 
6467 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6468 {
6469 	struct hclge_config_mac_mode_cmd *req;
6470 	struct hclge_desc desc;
6471 	u32 loop_en;
6472 	int ret;
6473 
6474 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6475 	/* 1 Read out the MAC mode config at first */
6476 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6477 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6478 	if (ret) {
6479 		dev_err(&hdev->pdev->dev,
6480 			"mac loopback get fail, ret =%d.\n", ret);
6481 		return ret;
6482 	}
6483 
6484 	/* 2 Then setup the loopback flag */
6485 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6486 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6487 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6488 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6489 
6490 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6491 
6492 	/* 3 Config mac work mode with loopback flag
6493 	 * and its original configure parameters
6494 	 */
6495 	hclge_cmd_reuse_desc(&desc, false);
6496 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6497 	if (ret)
6498 		dev_err(&hdev->pdev->dev,
6499 			"mac loopback set fail, ret =%d.\n", ret);
6500 	return ret;
6501 }
6502 
6503 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6504 				     enum hnae3_loop loop_mode)
6505 {
6506 #define HCLGE_SERDES_RETRY_MS	10
6507 #define HCLGE_SERDES_RETRY_NUM	100
6508 
6509 	struct hclge_serdes_lb_cmd *req;
6510 	struct hclge_desc desc;
6511 	int ret, i = 0;
6512 	u8 loop_mode_b;
6513 
6514 	req = (struct hclge_serdes_lb_cmd *)desc.data;
6515 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6516 
6517 	switch (loop_mode) {
6518 	case HNAE3_LOOP_SERIAL_SERDES:
6519 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6520 		break;
6521 	case HNAE3_LOOP_PARALLEL_SERDES:
6522 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6523 		break;
6524 	default:
6525 		dev_err(&hdev->pdev->dev,
6526 			"unsupported serdes loopback mode %d\n", loop_mode);
6527 		return -ENOTSUPP;
6528 	}
6529 
6530 	if (en) {
6531 		req->enable = loop_mode_b;
6532 		req->mask = loop_mode_b;
6533 	} else {
6534 		req->mask = loop_mode_b;
6535 	}
6536 
6537 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6538 	if (ret) {
6539 		dev_err(&hdev->pdev->dev,
6540 			"serdes loopback set fail, ret = %d\n", ret);
6541 		return ret;
6542 	}
6543 
6544 	do {
6545 		msleep(HCLGE_SERDES_RETRY_MS);
6546 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6547 					   true);
6548 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6549 		if (ret) {
6550 			dev_err(&hdev->pdev->dev,
6551 				"serdes loopback get, ret = %d\n", ret);
6552 			return ret;
6553 		}
6554 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
6555 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6556 
6557 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6558 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6559 		return -EBUSY;
6560 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6561 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6562 		return -EIO;
6563 	}
6564 	return ret;
6565 }
6566 
6567 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6568 				     enum hnae3_loop loop_mode)
6569 {
6570 	int ret;
6571 
6572 	ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6573 	if (ret)
6574 		return ret;
6575 
6576 	hclge_cfg_mac_mode(hdev, en);
6577 
6578 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6579 	if (ret)
6580 		dev_err(&hdev->pdev->dev,
6581 			"serdes loopback config mac mode timeout\n");
6582 
6583 	return ret;
6584 }
6585 
6586 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6587 				     struct phy_device *phydev)
6588 {
6589 	int ret;
6590 
6591 	if (!phydev->suspended) {
6592 		ret = phy_suspend(phydev);
6593 		if (ret)
6594 			return ret;
6595 	}
6596 
6597 	ret = phy_resume(phydev);
6598 	if (ret)
6599 		return ret;
6600 
6601 	return phy_loopback(phydev, true);
6602 }
6603 
6604 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6605 				      struct phy_device *phydev)
6606 {
6607 	int ret;
6608 
6609 	ret = phy_loopback(phydev, false);
6610 	if (ret)
6611 		return ret;
6612 
6613 	return phy_suspend(phydev);
6614 }
6615 
6616 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6617 {
6618 	struct phy_device *phydev = hdev->hw.mac.phydev;
6619 	int ret;
6620 
6621 	if (!phydev)
6622 		return -ENOTSUPP;
6623 
6624 	if (en)
6625 		ret = hclge_enable_phy_loopback(hdev, phydev);
6626 	else
6627 		ret = hclge_disable_phy_loopback(hdev, phydev);
6628 	if (ret) {
6629 		dev_err(&hdev->pdev->dev,
6630 			"set phy loopback fail, ret = %d\n", ret);
6631 		return ret;
6632 	}
6633 
6634 	hclge_cfg_mac_mode(hdev, en);
6635 
6636 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6637 	if (ret)
6638 		dev_err(&hdev->pdev->dev,
6639 			"phy loopback config mac mode timeout\n");
6640 
6641 	return ret;
6642 }
6643 
6644 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6645 			    int stream_id, bool enable)
6646 {
6647 	struct hclge_desc desc;
6648 	struct hclge_cfg_com_tqp_queue_cmd *req =
6649 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6650 	int ret;
6651 
6652 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6653 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6654 	req->stream_id = cpu_to_le16(stream_id);
6655 	if (enable)
6656 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6657 
6658 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6659 	if (ret)
6660 		dev_err(&hdev->pdev->dev,
6661 			"Tqp enable fail, status =%d.\n", ret);
6662 	return ret;
6663 }
6664 
6665 static int hclge_set_loopback(struct hnae3_handle *handle,
6666 			      enum hnae3_loop loop_mode, bool en)
6667 {
6668 	struct hclge_vport *vport = hclge_get_vport(handle);
6669 	struct hnae3_knic_private_info *kinfo;
6670 	struct hclge_dev *hdev = vport->back;
6671 	int i, ret;
6672 
6673 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6674 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6675 	 * the same, the packets are looped back in the SSU. If SSU loopback
6676 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6677 	 */
6678 	if (hdev->pdev->revision >= 0x21) {
6679 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6680 
6681 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6682 						HCLGE_SWITCH_ALW_LPBK_MASK);
6683 		if (ret)
6684 			return ret;
6685 	}
6686 
6687 	switch (loop_mode) {
6688 	case HNAE3_LOOP_APP:
6689 		ret = hclge_set_app_loopback(hdev, en);
6690 		break;
6691 	case HNAE3_LOOP_SERIAL_SERDES:
6692 	case HNAE3_LOOP_PARALLEL_SERDES:
6693 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6694 		break;
6695 	case HNAE3_LOOP_PHY:
6696 		ret = hclge_set_phy_loopback(hdev, en);
6697 		break;
6698 	default:
6699 		ret = -ENOTSUPP;
6700 		dev_err(&hdev->pdev->dev,
6701 			"loop_mode %d is not supported\n", loop_mode);
6702 		break;
6703 	}
6704 
6705 	if (ret)
6706 		return ret;
6707 
6708 	kinfo = &vport->nic.kinfo;
6709 	for (i = 0; i < kinfo->num_tqps; i++) {
6710 		ret = hclge_tqp_enable(hdev, i, 0, en);
6711 		if (ret)
6712 			return ret;
6713 	}
6714 
6715 	return 0;
6716 }
6717 
6718 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6719 {
6720 	int ret;
6721 
6722 	ret = hclge_set_app_loopback(hdev, false);
6723 	if (ret)
6724 		return ret;
6725 
6726 	ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6727 	if (ret)
6728 		return ret;
6729 
6730 	return hclge_cfg_serdes_loopback(hdev, false,
6731 					 HNAE3_LOOP_PARALLEL_SERDES);
6732 }
6733 
6734 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6735 {
6736 	struct hclge_vport *vport = hclge_get_vport(handle);
6737 	struct hnae3_knic_private_info *kinfo;
6738 	struct hnae3_queue *queue;
6739 	struct hclge_tqp *tqp;
6740 	int i;
6741 
6742 	kinfo = &vport->nic.kinfo;
6743 	for (i = 0; i < kinfo->num_tqps; i++) {
6744 		queue = handle->kinfo.tqp[i];
6745 		tqp = container_of(queue, struct hclge_tqp, q);
6746 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6747 	}
6748 }
6749 
6750 static void hclge_flush_link_update(struct hclge_dev *hdev)
6751 {
6752 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
6753 
6754 	unsigned long last = hdev->serv_processed_cnt;
6755 	int i = 0;
6756 
6757 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6758 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6759 	       last == hdev->serv_processed_cnt)
6760 		usleep_range(1, 1);
6761 }
6762 
6763 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6764 {
6765 	struct hclge_vport *vport = hclge_get_vport(handle);
6766 	struct hclge_dev *hdev = vport->back;
6767 
6768 	if (enable) {
6769 		hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6770 	} else {
6771 		/* Set the DOWN flag here to disable link updating */
6772 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
6773 
6774 		/* flush memory to make sure DOWN is seen by service task */
6775 		smp_mb__before_atomic();
6776 		hclge_flush_link_update(hdev);
6777 	}
6778 }
6779 
6780 static int hclge_ae_start(struct hnae3_handle *handle)
6781 {
6782 	struct hclge_vport *vport = hclge_get_vport(handle);
6783 	struct hclge_dev *hdev = vport->back;
6784 
6785 	/* mac enable */
6786 	hclge_cfg_mac_mode(hdev, true);
6787 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6788 	hdev->hw.mac.link = 0;
6789 
6790 	/* reset tqp stats */
6791 	hclge_reset_tqp_stats(handle);
6792 
6793 	hclge_mac_start_phy(hdev);
6794 
6795 	return 0;
6796 }
6797 
6798 static void hclge_ae_stop(struct hnae3_handle *handle)
6799 {
6800 	struct hclge_vport *vport = hclge_get_vport(handle);
6801 	struct hclge_dev *hdev = vport->back;
6802 	int i;
6803 
6804 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
6805 
6806 	hclge_clear_arfs_rules(handle);
6807 
6808 	/* If it is not PF reset, the firmware will disable the MAC,
6809 	 * so it only need to stop phy here.
6810 	 */
6811 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6812 	    hdev->reset_type != HNAE3_FUNC_RESET) {
6813 		hclge_mac_stop_phy(hdev);
6814 		hclge_update_link_status(hdev);
6815 		return;
6816 	}
6817 
6818 	for (i = 0; i < handle->kinfo.num_tqps; i++)
6819 		hclge_reset_tqp(handle, i);
6820 
6821 	hclge_config_mac_tnl_int(hdev, false);
6822 
6823 	/* Mac disable */
6824 	hclge_cfg_mac_mode(hdev, false);
6825 
6826 	hclge_mac_stop_phy(hdev);
6827 
6828 	/* reset tqp stats */
6829 	hclge_reset_tqp_stats(handle);
6830 	hclge_update_link_status(hdev);
6831 }
6832 
6833 int hclge_vport_start(struct hclge_vport *vport)
6834 {
6835 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6836 	vport->last_active_jiffies = jiffies;
6837 	return 0;
6838 }
6839 
6840 void hclge_vport_stop(struct hclge_vport *vport)
6841 {
6842 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6843 }
6844 
6845 static int hclge_client_start(struct hnae3_handle *handle)
6846 {
6847 	struct hclge_vport *vport = hclge_get_vport(handle);
6848 
6849 	return hclge_vport_start(vport);
6850 }
6851 
6852 static void hclge_client_stop(struct hnae3_handle *handle)
6853 {
6854 	struct hclge_vport *vport = hclge_get_vport(handle);
6855 
6856 	hclge_vport_stop(vport);
6857 }
6858 
6859 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6860 					 u16 cmdq_resp, u8  resp_code,
6861 					 enum hclge_mac_vlan_tbl_opcode op)
6862 {
6863 	struct hclge_dev *hdev = vport->back;
6864 
6865 	if (cmdq_resp) {
6866 		dev_err(&hdev->pdev->dev,
6867 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6868 			cmdq_resp);
6869 		return -EIO;
6870 	}
6871 
6872 	if (op == HCLGE_MAC_VLAN_ADD) {
6873 		if ((!resp_code) || (resp_code == 1)) {
6874 			return 0;
6875 		} else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6876 			dev_err(&hdev->pdev->dev,
6877 				"add mac addr failed for uc_overflow.\n");
6878 			return -ENOSPC;
6879 		} else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6880 			dev_err(&hdev->pdev->dev,
6881 				"add mac addr failed for mc_overflow.\n");
6882 			return -ENOSPC;
6883 		}
6884 
6885 		dev_err(&hdev->pdev->dev,
6886 			"add mac addr failed for undefined, code=%u.\n",
6887 			resp_code);
6888 		return -EIO;
6889 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
6890 		if (!resp_code) {
6891 			return 0;
6892 		} else if (resp_code == 1) {
6893 			dev_dbg(&hdev->pdev->dev,
6894 				"remove mac addr failed for miss.\n");
6895 			return -ENOENT;
6896 		}
6897 
6898 		dev_err(&hdev->pdev->dev,
6899 			"remove mac addr failed for undefined, code=%u.\n",
6900 			resp_code);
6901 		return -EIO;
6902 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
6903 		if (!resp_code) {
6904 			return 0;
6905 		} else if (resp_code == 1) {
6906 			dev_dbg(&hdev->pdev->dev,
6907 				"lookup mac addr failed for miss.\n");
6908 			return -ENOENT;
6909 		}
6910 
6911 		dev_err(&hdev->pdev->dev,
6912 			"lookup mac addr failed for undefined, code=%u.\n",
6913 			resp_code);
6914 		return -EIO;
6915 	}
6916 
6917 	dev_err(&hdev->pdev->dev,
6918 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6919 
6920 	return -EINVAL;
6921 }
6922 
6923 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6924 {
6925 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6926 
6927 	unsigned int word_num;
6928 	unsigned int bit_num;
6929 
6930 	if (vfid > 255 || vfid < 0)
6931 		return -EIO;
6932 
6933 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6934 		word_num = vfid / 32;
6935 		bit_num  = vfid % 32;
6936 		if (clr)
6937 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6938 		else
6939 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6940 	} else {
6941 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6942 		bit_num  = vfid % 32;
6943 		if (clr)
6944 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6945 		else
6946 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6947 	}
6948 
6949 	return 0;
6950 }
6951 
6952 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6953 {
6954 #define HCLGE_DESC_NUMBER 3
6955 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6956 	int i, j;
6957 
6958 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6959 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6960 			if (desc[i].data[j])
6961 				return false;
6962 
6963 	return true;
6964 }
6965 
6966 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6967 				   const u8 *addr, bool is_mc)
6968 {
6969 	const unsigned char *mac_addr = addr;
6970 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6971 		       (mac_addr[0]) | (mac_addr[1] << 8);
6972 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6973 
6974 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6975 	if (is_mc) {
6976 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6977 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6978 	}
6979 
6980 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6981 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6982 }
6983 
6984 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6985 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
6986 {
6987 	struct hclge_dev *hdev = vport->back;
6988 	struct hclge_desc desc;
6989 	u8 resp_code;
6990 	u16 retval;
6991 	int ret;
6992 
6993 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6994 
6995 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6996 
6997 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6998 	if (ret) {
6999 		dev_err(&hdev->pdev->dev,
7000 			"del mac addr failed for cmd_send, ret =%d.\n",
7001 			ret);
7002 		return ret;
7003 	}
7004 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7005 	retval = le16_to_cpu(desc.retval);
7006 
7007 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7008 					     HCLGE_MAC_VLAN_REMOVE);
7009 }
7010 
7011 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7012 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
7013 				     struct hclge_desc *desc,
7014 				     bool is_mc)
7015 {
7016 	struct hclge_dev *hdev = vport->back;
7017 	u8 resp_code;
7018 	u16 retval;
7019 	int ret;
7020 
7021 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7022 	if (is_mc) {
7023 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7024 		memcpy(desc[0].data,
7025 		       req,
7026 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7027 		hclge_cmd_setup_basic_desc(&desc[1],
7028 					   HCLGE_OPC_MAC_VLAN_ADD,
7029 					   true);
7030 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7031 		hclge_cmd_setup_basic_desc(&desc[2],
7032 					   HCLGE_OPC_MAC_VLAN_ADD,
7033 					   true);
7034 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
7035 	} else {
7036 		memcpy(desc[0].data,
7037 		       req,
7038 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7039 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
7040 	}
7041 	if (ret) {
7042 		dev_err(&hdev->pdev->dev,
7043 			"lookup mac addr failed for cmd_send, ret =%d.\n",
7044 			ret);
7045 		return ret;
7046 	}
7047 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7048 	retval = le16_to_cpu(desc[0].retval);
7049 
7050 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7051 					     HCLGE_MAC_VLAN_LKUP);
7052 }
7053 
7054 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7055 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
7056 				  struct hclge_desc *mc_desc)
7057 {
7058 	struct hclge_dev *hdev = vport->back;
7059 	int cfg_status;
7060 	u8 resp_code;
7061 	u16 retval;
7062 	int ret;
7063 
7064 	if (!mc_desc) {
7065 		struct hclge_desc desc;
7066 
7067 		hclge_cmd_setup_basic_desc(&desc,
7068 					   HCLGE_OPC_MAC_VLAN_ADD,
7069 					   false);
7070 		memcpy(desc.data, req,
7071 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7072 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7073 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7074 		retval = le16_to_cpu(desc.retval);
7075 
7076 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7077 							   resp_code,
7078 							   HCLGE_MAC_VLAN_ADD);
7079 	} else {
7080 		hclge_cmd_reuse_desc(&mc_desc[0], false);
7081 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7082 		hclge_cmd_reuse_desc(&mc_desc[1], false);
7083 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7084 		hclge_cmd_reuse_desc(&mc_desc[2], false);
7085 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7086 		memcpy(mc_desc[0].data, req,
7087 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7088 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7089 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7090 		retval = le16_to_cpu(mc_desc[0].retval);
7091 
7092 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7093 							   resp_code,
7094 							   HCLGE_MAC_VLAN_ADD);
7095 	}
7096 
7097 	if (ret) {
7098 		dev_err(&hdev->pdev->dev,
7099 			"add mac addr failed for cmd_send, ret =%d.\n",
7100 			ret);
7101 		return ret;
7102 	}
7103 
7104 	return cfg_status;
7105 }
7106 
7107 static int hclge_init_umv_space(struct hclge_dev *hdev)
7108 {
7109 	u16 allocated_size = 0;
7110 	int ret;
7111 
7112 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7113 				  true);
7114 	if (ret)
7115 		return ret;
7116 
7117 	if (allocated_size < hdev->wanted_umv_size)
7118 		dev_warn(&hdev->pdev->dev,
7119 			 "Alloc umv space failed, want %u, get %u\n",
7120 			 hdev->wanted_umv_size, allocated_size);
7121 
7122 	mutex_init(&hdev->umv_mutex);
7123 	hdev->max_umv_size = allocated_size;
7124 	/* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7125 	 * preserve some unicast mac vlan table entries shared by pf
7126 	 * and its vfs.
7127 	 */
7128 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7129 	hdev->share_umv_size = hdev->priv_umv_size +
7130 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
7131 
7132 	return 0;
7133 }
7134 
7135 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7136 {
7137 	int ret;
7138 
7139 	if (hdev->max_umv_size > 0) {
7140 		ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7141 					  false);
7142 		if (ret)
7143 			return ret;
7144 		hdev->max_umv_size = 0;
7145 	}
7146 	mutex_destroy(&hdev->umv_mutex);
7147 
7148 	return 0;
7149 }
7150 
7151 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7152 			       u16 *allocated_size, bool is_alloc)
7153 {
7154 	struct hclge_umv_spc_alc_cmd *req;
7155 	struct hclge_desc desc;
7156 	int ret;
7157 
7158 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7159 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7160 	if (!is_alloc)
7161 		hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7162 
7163 	req->space_size = cpu_to_le32(space_size);
7164 
7165 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7166 	if (ret) {
7167 		dev_err(&hdev->pdev->dev,
7168 			"%s umv space failed for cmd_send, ret =%d\n",
7169 			is_alloc ? "allocate" : "free", ret);
7170 		return ret;
7171 	}
7172 
7173 	if (is_alloc && allocated_size)
7174 		*allocated_size = le32_to_cpu(desc.data[1]);
7175 
7176 	return 0;
7177 }
7178 
7179 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7180 {
7181 	struct hclge_vport *vport;
7182 	int i;
7183 
7184 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7185 		vport = &hdev->vport[i];
7186 		vport->used_umv_num = 0;
7187 	}
7188 
7189 	mutex_lock(&hdev->umv_mutex);
7190 	hdev->share_umv_size = hdev->priv_umv_size +
7191 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
7192 	mutex_unlock(&hdev->umv_mutex);
7193 }
7194 
7195 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7196 {
7197 	struct hclge_dev *hdev = vport->back;
7198 	bool is_full;
7199 
7200 	mutex_lock(&hdev->umv_mutex);
7201 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7202 		   hdev->share_umv_size == 0);
7203 	mutex_unlock(&hdev->umv_mutex);
7204 
7205 	return is_full;
7206 }
7207 
7208 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7209 {
7210 	struct hclge_dev *hdev = vport->back;
7211 
7212 	mutex_lock(&hdev->umv_mutex);
7213 	if (is_free) {
7214 		if (vport->used_umv_num > hdev->priv_umv_size)
7215 			hdev->share_umv_size++;
7216 
7217 		if (vport->used_umv_num > 0)
7218 			vport->used_umv_num--;
7219 	} else {
7220 		if (vport->used_umv_num >= hdev->priv_umv_size &&
7221 		    hdev->share_umv_size > 0)
7222 			hdev->share_umv_size--;
7223 		vport->used_umv_num++;
7224 	}
7225 	mutex_unlock(&hdev->umv_mutex);
7226 }
7227 
7228 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7229 			     const unsigned char *addr)
7230 {
7231 	struct hclge_vport *vport = hclge_get_vport(handle);
7232 
7233 	return hclge_add_uc_addr_common(vport, addr);
7234 }
7235 
7236 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7237 			     const unsigned char *addr)
7238 {
7239 	struct hclge_dev *hdev = vport->back;
7240 	struct hclge_mac_vlan_tbl_entry_cmd req;
7241 	struct hclge_desc desc;
7242 	u16 egress_port = 0;
7243 	int ret;
7244 
7245 	/* mac addr check */
7246 	if (is_zero_ether_addr(addr) ||
7247 	    is_broadcast_ether_addr(addr) ||
7248 	    is_multicast_ether_addr(addr)) {
7249 		dev_err(&hdev->pdev->dev,
7250 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7251 			 addr, is_zero_ether_addr(addr),
7252 			 is_broadcast_ether_addr(addr),
7253 			 is_multicast_ether_addr(addr));
7254 		return -EINVAL;
7255 	}
7256 
7257 	memset(&req, 0, sizeof(req));
7258 
7259 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7260 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7261 
7262 	req.egress_port = cpu_to_le16(egress_port);
7263 
7264 	hclge_prepare_mac_addr(&req, addr, false);
7265 
7266 	/* Lookup the mac address in the mac_vlan table, and add
7267 	 * it if the entry is inexistent. Repeated unicast entry
7268 	 * is not allowed in the mac vlan table.
7269 	 */
7270 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7271 	if (ret == -ENOENT) {
7272 		if (!hclge_is_umv_space_full(vport)) {
7273 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7274 			if (!ret)
7275 				hclge_update_umv_space(vport, false);
7276 			return ret;
7277 		}
7278 
7279 		dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7280 			hdev->priv_umv_size);
7281 
7282 		return -ENOSPC;
7283 	}
7284 
7285 	/* check if we just hit the duplicate */
7286 	if (!ret) {
7287 		dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7288 			 vport->vport_id, addr);
7289 		return 0;
7290 	}
7291 
7292 	dev_err(&hdev->pdev->dev,
7293 		"PF failed to add unicast entry(%pM) in the MAC table\n",
7294 		addr);
7295 
7296 	return ret;
7297 }
7298 
7299 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7300 			    const unsigned char *addr)
7301 {
7302 	struct hclge_vport *vport = hclge_get_vport(handle);
7303 
7304 	return hclge_rm_uc_addr_common(vport, addr);
7305 }
7306 
7307 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7308 			    const unsigned char *addr)
7309 {
7310 	struct hclge_dev *hdev = vport->back;
7311 	struct hclge_mac_vlan_tbl_entry_cmd req;
7312 	int ret;
7313 
7314 	/* mac addr check */
7315 	if (is_zero_ether_addr(addr) ||
7316 	    is_broadcast_ether_addr(addr) ||
7317 	    is_multicast_ether_addr(addr)) {
7318 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7319 			addr);
7320 		return -EINVAL;
7321 	}
7322 
7323 	memset(&req, 0, sizeof(req));
7324 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7325 	hclge_prepare_mac_addr(&req, addr, false);
7326 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
7327 	if (!ret)
7328 		hclge_update_umv_space(vport, true);
7329 
7330 	return ret;
7331 }
7332 
7333 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7334 			     const unsigned char *addr)
7335 {
7336 	struct hclge_vport *vport = hclge_get_vport(handle);
7337 
7338 	return hclge_add_mc_addr_common(vport, addr);
7339 }
7340 
7341 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7342 			     const unsigned char *addr)
7343 {
7344 	struct hclge_dev *hdev = vport->back;
7345 	struct hclge_mac_vlan_tbl_entry_cmd req;
7346 	struct hclge_desc desc[3];
7347 	int status;
7348 
7349 	/* mac addr check */
7350 	if (!is_multicast_ether_addr(addr)) {
7351 		dev_err(&hdev->pdev->dev,
7352 			"Add mc mac err! invalid mac:%pM.\n",
7353 			 addr);
7354 		return -EINVAL;
7355 	}
7356 	memset(&req, 0, sizeof(req));
7357 	hclge_prepare_mac_addr(&req, addr, true);
7358 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7359 	if (status) {
7360 		/* This mac addr do not exist, add new entry for it */
7361 		memset(desc[0].data, 0, sizeof(desc[0].data));
7362 		memset(desc[1].data, 0, sizeof(desc[0].data));
7363 		memset(desc[2].data, 0, sizeof(desc[0].data));
7364 	}
7365 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7366 	if (status)
7367 		return status;
7368 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7369 
7370 	if (status == -ENOSPC)
7371 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7372 
7373 	return status;
7374 }
7375 
7376 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7377 			    const unsigned char *addr)
7378 {
7379 	struct hclge_vport *vport = hclge_get_vport(handle);
7380 
7381 	return hclge_rm_mc_addr_common(vport, addr);
7382 }
7383 
7384 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7385 			    const unsigned char *addr)
7386 {
7387 	struct hclge_dev *hdev = vport->back;
7388 	struct hclge_mac_vlan_tbl_entry_cmd req;
7389 	enum hclge_cmd_status status;
7390 	struct hclge_desc desc[3];
7391 
7392 	/* mac addr check */
7393 	if (!is_multicast_ether_addr(addr)) {
7394 		dev_dbg(&hdev->pdev->dev,
7395 			"Remove mc mac err! invalid mac:%pM.\n",
7396 			 addr);
7397 		return -EINVAL;
7398 	}
7399 
7400 	memset(&req, 0, sizeof(req));
7401 	hclge_prepare_mac_addr(&req, addr, true);
7402 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7403 	if (!status) {
7404 		/* This mac addr exist, remove this handle's VFID for it */
7405 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7406 		if (status)
7407 			return status;
7408 
7409 		if (hclge_is_all_function_id_zero(desc))
7410 			/* All the vfid is zero, so need to delete this entry */
7411 			status = hclge_remove_mac_vlan_tbl(vport, &req);
7412 		else
7413 			/* Not all the vfid is zero, update the vfid */
7414 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7415 
7416 	} else {
7417 		/* Maybe this mac address is in mta table, but it cannot be
7418 		 * deleted here because an entry of mta represents an address
7419 		 * range rather than a specific address. the delete action to
7420 		 * all entries will take effect in update_mta_status called by
7421 		 * hns3_nic_set_rx_mode.
7422 		 */
7423 		status = 0;
7424 	}
7425 
7426 	return status;
7427 }
7428 
7429 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7430 			       enum HCLGE_MAC_ADDR_TYPE mac_type)
7431 {
7432 	struct hclge_vport_mac_addr_cfg *mac_cfg;
7433 	struct list_head *list;
7434 
7435 	if (!vport->vport_id)
7436 		return;
7437 
7438 	mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7439 	if (!mac_cfg)
7440 		return;
7441 
7442 	mac_cfg->hd_tbl_status = true;
7443 	memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7444 
7445 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7446 	       &vport->uc_mac_list : &vport->mc_mac_list;
7447 
7448 	list_add_tail(&mac_cfg->node, list);
7449 }
7450 
7451 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7452 			      bool is_write_tbl,
7453 			      enum HCLGE_MAC_ADDR_TYPE mac_type)
7454 {
7455 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7456 	struct list_head *list;
7457 	bool uc_flag, mc_flag;
7458 
7459 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7460 	       &vport->uc_mac_list : &vport->mc_mac_list;
7461 
7462 	uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7463 	mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7464 
7465 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7466 		if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7467 			if (uc_flag && mac_cfg->hd_tbl_status)
7468 				hclge_rm_uc_addr_common(vport, mac_addr);
7469 
7470 			if (mc_flag && mac_cfg->hd_tbl_status)
7471 				hclge_rm_mc_addr_common(vport, mac_addr);
7472 
7473 			list_del(&mac_cfg->node);
7474 			kfree(mac_cfg);
7475 			break;
7476 		}
7477 	}
7478 }
7479 
7480 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7481 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
7482 {
7483 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7484 	struct list_head *list;
7485 
7486 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7487 	       &vport->uc_mac_list : &vport->mc_mac_list;
7488 
7489 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7490 		if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7491 			hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7492 
7493 		if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7494 			hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7495 
7496 		mac_cfg->hd_tbl_status = false;
7497 		if (is_del_list) {
7498 			list_del(&mac_cfg->node);
7499 			kfree(mac_cfg);
7500 		}
7501 	}
7502 }
7503 
7504 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7505 {
7506 	struct hclge_vport_mac_addr_cfg *mac, *tmp;
7507 	struct hclge_vport *vport;
7508 	int i;
7509 
7510 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7511 		vport = &hdev->vport[i];
7512 		list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7513 			list_del(&mac->node);
7514 			kfree(mac);
7515 		}
7516 
7517 		list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7518 			list_del(&mac->node);
7519 			kfree(mac);
7520 		}
7521 	}
7522 }
7523 
7524 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7525 					      u16 cmdq_resp, u8 resp_code)
7526 {
7527 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
7528 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
7529 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
7530 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
7531 
7532 	int return_status;
7533 
7534 	if (cmdq_resp) {
7535 		dev_err(&hdev->pdev->dev,
7536 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7537 			cmdq_resp);
7538 		return -EIO;
7539 	}
7540 
7541 	switch (resp_code) {
7542 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
7543 	case HCLGE_ETHERTYPE_ALREADY_ADD:
7544 		return_status = 0;
7545 		break;
7546 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7547 		dev_err(&hdev->pdev->dev,
7548 			"add mac ethertype failed for manager table overflow.\n");
7549 		return_status = -EIO;
7550 		break;
7551 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
7552 		dev_err(&hdev->pdev->dev,
7553 			"add mac ethertype failed for key conflict.\n");
7554 		return_status = -EIO;
7555 		break;
7556 	default:
7557 		dev_err(&hdev->pdev->dev,
7558 			"add mac ethertype failed for undefined, code=%u.\n",
7559 			resp_code);
7560 		return_status = -EIO;
7561 	}
7562 
7563 	return return_status;
7564 }
7565 
7566 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7567 				     u8 *mac_addr)
7568 {
7569 	struct hclge_mac_vlan_tbl_entry_cmd req;
7570 	struct hclge_dev *hdev = vport->back;
7571 	struct hclge_desc desc;
7572 	u16 egress_port = 0;
7573 	int i;
7574 
7575 	if (is_zero_ether_addr(mac_addr))
7576 		return false;
7577 
7578 	memset(&req, 0, sizeof(req));
7579 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7580 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7581 	req.egress_port = cpu_to_le16(egress_port);
7582 	hclge_prepare_mac_addr(&req, mac_addr, false);
7583 
7584 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7585 		return true;
7586 
7587 	vf_idx += HCLGE_VF_VPORT_START_NUM;
7588 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7589 		if (i != vf_idx &&
7590 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7591 			return true;
7592 
7593 	return false;
7594 }
7595 
7596 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7597 			    u8 *mac_addr)
7598 {
7599 	struct hclge_vport *vport = hclge_get_vport(handle);
7600 	struct hclge_dev *hdev = vport->back;
7601 
7602 	vport = hclge_get_vf_vport(hdev, vf);
7603 	if (!vport)
7604 		return -EINVAL;
7605 
7606 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7607 		dev_info(&hdev->pdev->dev,
7608 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
7609 			 mac_addr);
7610 		return 0;
7611 	}
7612 
7613 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7614 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7615 			mac_addr);
7616 		return -EEXIST;
7617 	}
7618 
7619 	ether_addr_copy(vport->vf_info.mac, mac_addr);
7620 
7621 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7622 		dev_info(&hdev->pdev->dev,
7623 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7624 			 vf, mac_addr);
7625 		return hclge_inform_reset_assert_to_vf(vport);
7626 	}
7627 
7628 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
7629 		 vf, mac_addr);
7630 	return 0;
7631 }
7632 
7633 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7634 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
7635 {
7636 	struct hclge_desc desc;
7637 	u8 resp_code;
7638 	u16 retval;
7639 	int ret;
7640 
7641 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7642 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7643 
7644 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7645 	if (ret) {
7646 		dev_err(&hdev->pdev->dev,
7647 			"add mac ethertype failed for cmd_send, ret =%d.\n",
7648 			ret);
7649 		return ret;
7650 	}
7651 
7652 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7653 	retval = le16_to_cpu(desc.retval);
7654 
7655 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7656 }
7657 
7658 static int init_mgr_tbl(struct hclge_dev *hdev)
7659 {
7660 	int ret;
7661 	int i;
7662 
7663 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7664 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7665 		if (ret) {
7666 			dev_err(&hdev->pdev->dev,
7667 				"add mac ethertype failed, ret =%d.\n",
7668 				ret);
7669 			return ret;
7670 		}
7671 	}
7672 
7673 	return 0;
7674 }
7675 
7676 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7677 {
7678 	struct hclge_vport *vport = hclge_get_vport(handle);
7679 	struct hclge_dev *hdev = vport->back;
7680 
7681 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
7682 }
7683 
7684 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7685 			      bool is_first)
7686 {
7687 	const unsigned char *new_addr = (const unsigned char *)p;
7688 	struct hclge_vport *vport = hclge_get_vport(handle);
7689 	struct hclge_dev *hdev = vport->back;
7690 	int ret;
7691 
7692 	/* mac addr check */
7693 	if (is_zero_ether_addr(new_addr) ||
7694 	    is_broadcast_ether_addr(new_addr) ||
7695 	    is_multicast_ether_addr(new_addr)) {
7696 		dev_err(&hdev->pdev->dev,
7697 			"Change uc mac err! invalid mac:%pM.\n",
7698 			 new_addr);
7699 		return -EINVAL;
7700 	}
7701 
7702 	if ((!is_first || is_kdump_kernel()) &&
7703 	    hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7704 		dev_warn(&hdev->pdev->dev,
7705 			 "remove old uc mac address fail.\n");
7706 
7707 	ret = hclge_add_uc_addr(handle, new_addr);
7708 	if (ret) {
7709 		dev_err(&hdev->pdev->dev,
7710 			"add uc mac address fail, ret =%d.\n",
7711 			ret);
7712 
7713 		if (!is_first &&
7714 		    hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7715 			dev_err(&hdev->pdev->dev,
7716 				"restore uc mac address fail.\n");
7717 
7718 		return -EIO;
7719 	}
7720 
7721 	ret = hclge_pause_addr_cfg(hdev, new_addr);
7722 	if (ret) {
7723 		dev_err(&hdev->pdev->dev,
7724 			"configure mac pause address fail, ret =%d.\n",
7725 			ret);
7726 		return -EIO;
7727 	}
7728 
7729 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7730 
7731 	return 0;
7732 }
7733 
7734 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7735 			  int cmd)
7736 {
7737 	struct hclge_vport *vport = hclge_get_vport(handle);
7738 	struct hclge_dev *hdev = vport->back;
7739 
7740 	if (!hdev->hw.mac.phydev)
7741 		return -EOPNOTSUPP;
7742 
7743 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7744 }
7745 
7746 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7747 				      u8 fe_type, bool filter_en, u8 vf_id)
7748 {
7749 	struct hclge_vlan_filter_ctrl_cmd *req;
7750 	struct hclge_desc desc;
7751 	int ret;
7752 
7753 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7754 
7755 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7756 	req->vlan_type = vlan_type;
7757 	req->vlan_fe = filter_en ? fe_type : 0;
7758 	req->vf_id = vf_id;
7759 
7760 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7761 	if (ret)
7762 		dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7763 			ret);
7764 
7765 	return ret;
7766 }
7767 
7768 #define HCLGE_FILTER_TYPE_VF		0
7769 #define HCLGE_FILTER_TYPE_PORT		1
7770 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
7771 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
7772 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
7773 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
7774 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
7775 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
7776 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
7777 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
7778 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
7779 
7780 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7781 {
7782 	struct hclge_vport *vport = hclge_get_vport(handle);
7783 	struct hclge_dev *hdev = vport->back;
7784 
7785 	if (hdev->pdev->revision >= 0x21) {
7786 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7787 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
7788 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7789 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
7790 	} else {
7791 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7792 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7793 					   0);
7794 	}
7795 	if (enable)
7796 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
7797 	else
7798 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7799 }
7800 
7801 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7802 				    bool is_kill, u16 vlan,
7803 				    __be16 proto)
7804 {
7805 	struct hclge_vport *vport = &hdev->vport[vfid];
7806 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
7807 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
7808 	struct hclge_desc desc[2];
7809 	u8 vf_byte_val;
7810 	u8 vf_byte_off;
7811 	int ret;
7812 
7813 	/* if vf vlan table is full, firmware will close vf vlan filter, it
7814 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
7815 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
7816 	 * new vlan, because tx packets with these vlan id will be dropped.
7817 	 */
7818 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7819 		if (vport->vf_info.spoofchk && vlan) {
7820 			dev_err(&hdev->pdev->dev,
7821 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
7822 			return -EPERM;
7823 		}
7824 		return 0;
7825 	}
7826 
7827 	hclge_cmd_setup_basic_desc(&desc[0],
7828 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7829 	hclge_cmd_setup_basic_desc(&desc[1],
7830 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7831 
7832 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7833 
7834 	vf_byte_off = vfid / 8;
7835 	vf_byte_val = 1 << (vfid % 8);
7836 
7837 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7838 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7839 
7840 	req0->vlan_id  = cpu_to_le16(vlan);
7841 	req0->vlan_cfg = is_kill;
7842 
7843 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7844 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7845 	else
7846 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7847 
7848 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
7849 	if (ret) {
7850 		dev_err(&hdev->pdev->dev,
7851 			"Send vf vlan command fail, ret =%d.\n",
7852 			ret);
7853 		return ret;
7854 	}
7855 
7856 	if (!is_kill) {
7857 #define HCLGE_VF_VLAN_NO_ENTRY	2
7858 		if (!req0->resp_code || req0->resp_code == 1)
7859 			return 0;
7860 
7861 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7862 			set_bit(vfid, hdev->vf_vlan_full);
7863 			dev_warn(&hdev->pdev->dev,
7864 				 "vf vlan table is full, vf vlan filter is disabled\n");
7865 			return 0;
7866 		}
7867 
7868 		dev_err(&hdev->pdev->dev,
7869 			"Add vf vlan filter fail, ret =%u.\n",
7870 			req0->resp_code);
7871 	} else {
7872 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
7873 		if (!req0->resp_code)
7874 			return 0;
7875 
7876 		/* vf vlan filter is disabled when vf vlan table is full,
7877 		 * then new vlan id will not be added into vf vlan table.
7878 		 * Just return 0 without warning, avoid massive verbose
7879 		 * print logs when unload.
7880 		 */
7881 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7882 			return 0;
7883 
7884 		dev_err(&hdev->pdev->dev,
7885 			"Kill vf vlan filter fail, ret =%u.\n",
7886 			req0->resp_code);
7887 	}
7888 
7889 	return -EIO;
7890 }
7891 
7892 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7893 				      u16 vlan_id, bool is_kill)
7894 {
7895 	struct hclge_vlan_filter_pf_cfg_cmd *req;
7896 	struct hclge_desc desc;
7897 	u8 vlan_offset_byte_val;
7898 	u8 vlan_offset_byte;
7899 	u8 vlan_offset_160;
7900 	int ret;
7901 
7902 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7903 
7904 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7905 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7906 			   HCLGE_VLAN_BYTE_SIZE;
7907 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7908 
7909 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7910 	req->vlan_offset = vlan_offset_160;
7911 	req->vlan_cfg = is_kill;
7912 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7913 
7914 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7915 	if (ret)
7916 		dev_err(&hdev->pdev->dev,
7917 			"port vlan command, send fail, ret =%d.\n", ret);
7918 	return ret;
7919 }
7920 
7921 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7922 				    u16 vport_id, u16 vlan_id,
7923 				    bool is_kill)
7924 {
7925 	u16 vport_idx, vport_num = 0;
7926 	int ret;
7927 
7928 	if (is_kill && !vlan_id)
7929 		return 0;
7930 
7931 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7932 				       proto);
7933 	if (ret) {
7934 		dev_err(&hdev->pdev->dev,
7935 			"Set %u vport vlan filter config fail, ret =%d.\n",
7936 			vport_id, ret);
7937 		return ret;
7938 	}
7939 
7940 	/* vlan 0 may be added twice when 8021q module is enabled */
7941 	if (!is_kill && !vlan_id &&
7942 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
7943 		return 0;
7944 
7945 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7946 		dev_err(&hdev->pdev->dev,
7947 			"Add port vlan failed, vport %u is already in vlan %u\n",
7948 			vport_id, vlan_id);
7949 		return -EINVAL;
7950 	}
7951 
7952 	if (is_kill &&
7953 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7954 		dev_err(&hdev->pdev->dev,
7955 			"Delete port vlan failed, vport %u is not in vlan %u\n",
7956 			vport_id, vlan_id);
7957 		return -EINVAL;
7958 	}
7959 
7960 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7961 		vport_num++;
7962 
7963 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7964 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7965 						 is_kill);
7966 
7967 	return ret;
7968 }
7969 
7970 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7971 {
7972 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7973 	struct hclge_vport_vtag_tx_cfg_cmd *req;
7974 	struct hclge_dev *hdev = vport->back;
7975 	struct hclge_desc desc;
7976 	u16 bmap_index;
7977 	int status;
7978 
7979 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7980 
7981 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7982 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7983 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7984 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7985 		      vcfg->accept_tag1 ? 1 : 0);
7986 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7987 		      vcfg->accept_untag1 ? 1 : 0);
7988 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7989 		      vcfg->accept_tag2 ? 1 : 0);
7990 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7991 		      vcfg->accept_untag2 ? 1 : 0);
7992 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7993 		      vcfg->insert_tag1_en ? 1 : 0);
7994 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7995 		      vcfg->insert_tag2_en ? 1 : 0);
7996 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7997 
7998 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7999 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8000 			HCLGE_VF_NUM_PER_BYTE;
8001 	req->vf_bitmap[bmap_index] =
8002 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8003 
8004 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8005 	if (status)
8006 		dev_err(&hdev->pdev->dev,
8007 			"Send port txvlan cfg command fail, ret =%d\n",
8008 			status);
8009 
8010 	return status;
8011 }
8012 
8013 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8014 {
8015 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8016 	struct hclge_vport_vtag_rx_cfg_cmd *req;
8017 	struct hclge_dev *hdev = vport->back;
8018 	struct hclge_desc desc;
8019 	u16 bmap_index;
8020 	int status;
8021 
8022 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8023 
8024 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8025 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8026 		      vcfg->strip_tag1_en ? 1 : 0);
8027 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8028 		      vcfg->strip_tag2_en ? 1 : 0);
8029 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8030 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
8031 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8032 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
8033 
8034 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8035 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8036 			HCLGE_VF_NUM_PER_BYTE;
8037 	req->vf_bitmap[bmap_index] =
8038 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8039 
8040 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8041 	if (status)
8042 		dev_err(&hdev->pdev->dev,
8043 			"Send port rxvlan cfg command fail, ret =%d\n",
8044 			status);
8045 
8046 	return status;
8047 }
8048 
8049 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8050 				  u16 port_base_vlan_state,
8051 				  u16 vlan_tag)
8052 {
8053 	int ret;
8054 
8055 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8056 		vport->txvlan_cfg.accept_tag1 = true;
8057 		vport->txvlan_cfg.insert_tag1_en = false;
8058 		vport->txvlan_cfg.default_tag1 = 0;
8059 	} else {
8060 		vport->txvlan_cfg.accept_tag1 = false;
8061 		vport->txvlan_cfg.insert_tag1_en = true;
8062 		vport->txvlan_cfg.default_tag1 = vlan_tag;
8063 	}
8064 
8065 	vport->txvlan_cfg.accept_untag1 = true;
8066 
8067 	/* accept_tag2 and accept_untag2 are not supported on
8068 	 * pdev revision(0x20), new revision support them,
8069 	 * this two fields can not be configured by user.
8070 	 */
8071 	vport->txvlan_cfg.accept_tag2 = true;
8072 	vport->txvlan_cfg.accept_untag2 = true;
8073 	vport->txvlan_cfg.insert_tag2_en = false;
8074 	vport->txvlan_cfg.default_tag2 = 0;
8075 
8076 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8077 		vport->rxvlan_cfg.strip_tag1_en = false;
8078 		vport->rxvlan_cfg.strip_tag2_en =
8079 				vport->rxvlan_cfg.rx_vlan_offload_en;
8080 	} else {
8081 		vport->rxvlan_cfg.strip_tag1_en =
8082 				vport->rxvlan_cfg.rx_vlan_offload_en;
8083 		vport->rxvlan_cfg.strip_tag2_en = true;
8084 	}
8085 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8086 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8087 
8088 	ret = hclge_set_vlan_tx_offload_cfg(vport);
8089 	if (ret)
8090 		return ret;
8091 
8092 	return hclge_set_vlan_rx_offload_cfg(vport);
8093 }
8094 
8095 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8096 {
8097 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8098 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8099 	struct hclge_desc desc;
8100 	int status;
8101 
8102 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8103 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8104 	rx_req->ot_fst_vlan_type =
8105 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8106 	rx_req->ot_sec_vlan_type =
8107 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8108 	rx_req->in_fst_vlan_type =
8109 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8110 	rx_req->in_sec_vlan_type =
8111 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8112 
8113 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8114 	if (status) {
8115 		dev_err(&hdev->pdev->dev,
8116 			"Send rxvlan protocol type command fail, ret =%d\n",
8117 			status);
8118 		return status;
8119 	}
8120 
8121 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8122 
8123 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8124 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8125 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8126 
8127 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8128 	if (status)
8129 		dev_err(&hdev->pdev->dev,
8130 			"Send txvlan protocol type command fail, ret =%d\n",
8131 			status);
8132 
8133 	return status;
8134 }
8135 
8136 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8137 {
8138 #define HCLGE_DEF_VLAN_TYPE		0x8100
8139 
8140 	struct hnae3_handle *handle = &hdev->vport[0].nic;
8141 	struct hclge_vport *vport;
8142 	int ret;
8143 	int i;
8144 
8145 	if (hdev->pdev->revision >= 0x21) {
8146 		/* for revision 0x21, vf vlan filter is per function */
8147 		for (i = 0; i < hdev->num_alloc_vport; i++) {
8148 			vport = &hdev->vport[i];
8149 			ret = hclge_set_vlan_filter_ctrl(hdev,
8150 							 HCLGE_FILTER_TYPE_VF,
8151 							 HCLGE_FILTER_FE_EGRESS,
8152 							 true,
8153 							 vport->vport_id);
8154 			if (ret)
8155 				return ret;
8156 		}
8157 
8158 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8159 						 HCLGE_FILTER_FE_INGRESS, true,
8160 						 0);
8161 		if (ret)
8162 			return ret;
8163 	} else {
8164 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8165 						 HCLGE_FILTER_FE_EGRESS_V1_B,
8166 						 true, 0);
8167 		if (ret)
8168 			return ret;
8169 	}
8170 
8171 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
8172 
8173 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8174 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8175 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8176 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8177 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8178 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8179 
8180 	ret = hclge_set_vlan_protocol_type(hdev);
8181 	if (ret)
8182 		return ret;
8183 
8184 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8185 		u16 vlan_tag;
8186 
8187 		vport = &hdev->vport[i];
8188 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8189 
8190 		ret = hclge_vlan_offload_cfg(vport,
8191 					     vport->port_base_vlan_cfg.state,
8192 					     vlan_tag);
8193 		if (ret)
8194 			return ret;
8195 	}
8196 
8197 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8198 }
8199 
8200 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8201 				       bool writen_to_tbl)
8202 {
8203 	struct hclge_vport_vlan_cfg *vlan;
8204 
8205 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8206 	if (!vlan)
8207 		return;
8208 
8209 	vlan->hd_tbl_status = writen_to_tbl;
8210 	vlan->vlan_id = vlan_id;
8211 
8212 	list_add_tail(&vlan->node, &vport->vlan_list);
8213 }
8214 
8215 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8216 {
8217 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8218 	struct hclge_dev *hdev = vport->back;
8219 	int ret;
8220 
8221 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8222 		if (!vlan->hd_tbl_status) {
8223 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8224 						       vport->vport_id,
8225 						       vlan->vlan_id, false);
8226 			if (ret) {
8227 				dev_err(&hdev->pdev->dev,
8228 					"restore vport vlan list failed, ret=%d\n",
8229 					ret);
8230 				return ret;
8231 			}
8232 		}
8233 		vlan->hd_tbl_status = true;
8234 	}
8235 
8236 	return 0;
8237 }
8238 
8239 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8240 				      bool is_write_tbl)
8241 {
8242 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8243 	struct hclge_dev *hdev = vport->back;
8244 
8245 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8246 		if (vlan->vlan_id == vlan_id) {
8247 			if (is_write_tbl && vlan->hd_tbl_status)
8248 				hclge_set_vlan_filter_hw(hdev,
8249 							 htons(ETH_P_8021Q),
8250 							 vport->vport_id,
8251 							 vlan_id,
8252 							 true);
8253 
8254 			list_del(&vlan->node);
8255 			kfree(vlan);
8256 			break;
8257 		}
8258 	}
8259 }
8260 
8261 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8262 {
8263 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8264 	struct hclge_dev *hdev = vport->back;
8265 
8266 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8267 		if (vlan->hd_tbl_status)
8268 			hclge_set_vlan_filter_hw(hdev,
8269 						 htons(ETH_P_8021Q),
8270 						 vport->vport_id,
8271 						 vlan->vlan_id,
8272 						 true);
8273 
8274 		vlan->hd_tbl_status = false;
8275 		if (is_del_list) {
8276 			list_del(&vlan->node);
8277 			kfree(vlan);
8278 		}
8279 	}
8280 }
8281 
8282 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8283 {
8284 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8285 	struct hclge_vport *vport;
8286 	int i;
8287 
8288 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8289 		vport = &hdev->vport[i];
8290 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8291 			list_del(&vlan->node);
8292 			kfree(vlan);
8293 		}
8294 	}
8295 }
8296 
8297 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8298 {
8299 	struct hclge_vport *vport = hclge_get_vport(handle);
8300 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8301 	struct hclge_dev *hdev = vport->back;
8302 	u16 vlan_proto;
8303 	u16 state, vlan_id;
8304 	int i;
8305 
8306 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8307 		vport = &hdev->vport[i];
8308 		vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8309 		vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8310 		state = vport->port_base_vlan_cfg.state;
8311 
8312 		if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8313 			hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8314 						 vport->vport_id, vlan_id,
8315 						 false);
8316 			continue;
8317 		}
8318 
8319 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8320 			int ret;
8321 
8322 			if (!vlan->hd_tbl_status)
8323 				continue;
8324 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8325 						       vport->vport_id,
8326 						       vlan->vlan_id, false);
8327 			if (ret)
8328 				break;
8329 		}
8330 	}
8331 }
8332 
8333 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8334 {
8335 	struct hclge_vport *vport = hclge_get_vport(handle);
8336 
8337 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8338 		vport->rxvlan_cfg.strip_tag1_en = false;
8339 		vport->rxvlan_cfg.strip_tag2_en = enable;
8340 	} else {
8341 		vport->rxvlan_cfg.strip_tag1_en = enable;
8342 		vport->rxvlan_cfg.strip_tag2_en = true;
8343 	}
8344 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8345 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8346 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8347 
8348 	return hclge_set_vlan_rx_offload_cfg(vport);
8349 }
8350 
8351 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8352 					    u16 port_base_vlan_state,
8353 					    struct hclge_vlan_info *new_info,
8354 					    struct hclge_vlan_info *old_info)
8355 {
8356 	struct hclge_dev *hdev = vport->back;
8357 	int ret;
8358 
8359 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8360 		hclge_rm_vport_all_vlan_table(vport, false);
8361 		return hclge_set_vlan_filter_hw(hdev,
8362 						 htons(new_info->vlan_proto),
8363 						 vport->vport_id,
8364 						 new_info->vlan_tag,
8365 						 false);
8366 	}
8367 
8368 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8369 				       vport->vport_id, old_info->vlan_tag,
8370 				       true);
8371 	if (ret)
8372 		return ret;
8373 
8374 	return hclge_add_vport_all_vlan_table(vport);
8375 }
8376 
8377 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8378 				    struct hclge_vlan_info *vlan_info)
8379 {
8380 	struct hnae3_handle *nic = &vport->nic;
8381 	struct hclge_vlan_info *old_vlan_info;
8382 	struct hclge_dev *hdev = vport->back;
8383 	int ret;
8384 
8385 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8386 
8387 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8388 	if (ret)
8389 		return ret;
8390 
8391 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8392 		/* add new VLAN tag */
8393 		ret = hclge_set_vlan_filter_hw(hdev,
8394 					       htons(vlan_info->vlan_proto),
8395 					       vport->vport_id,
8396 					       vlan_info->vlan_tag,
8397 					       false);
8398 		if (ret)
8399 			return ret;
8400 
8401 		/* remove old VLAN tag */
8402 		ret = hclge_set_vlan_filter_hw(hdev,
8403 					       htons(old_vlan_info->vlan_proto),
8404 					       vport->vport_id,
8405 					       old_vlan_info->vlan_tag,
8406 					       true);
8407 		if (ret)
8408 			return ret;
8409 
8410 		goto update;
8411 	}
8412 
8413 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8414 					       old_vlan_info);
8415 	if (ret)
8416 		return ret;
8417 
8418 	/* update state only when disable/enable port based VLAN */
8419 	vport->port_base_vlan_cfg.state = state;
8420 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8421 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8422 	else
8423 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8424 
8425 update:
8426 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8427 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8428 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8429 
8430 	return 0;
8431 }
8432 
8433 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8434 					  enum hnae3_port_base_vlan_state state,
8435 					  u16 vlan)
8436 {
8437 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8438 		if (!vlan)
8439 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8440 		else
8441 			return HNAE3_PORT_BASE_VLAN_ENABLE;
8442 	} else {
8443 		if (!vlan)
8444 			return HNAE3_PORT_BASE_VLAN_DISABLE;
8445 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8446 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8447 		else
8448 			return HNAE3_PORT_BASE_VLAN_MODIFY;
8449 	}
8450 }
8451 
8452 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8453 				    u16 vlan, u8 qos, __be16 proto)
8454 {
8455 	struct hclge_vport *vport = hclge_get_vport(handle);
8456 	struct hclge_dev *hdev = vport->back;
8457 	struct hclge_vlan_info vlan_info;
8458 	u16 state;
8459 	int ret;
8460 
8461 	if (hdev->pdev->revision == 0x20)
8462 		return -EOPNOTSUPP;
8463 
8464 	vport = hclge_get_vf_vport(hdev, vfid);
8465 	if (!vport)
8466 		return -EINVAL;
8467 
8468 	/* qos is a 3 bits value, so can not be bigger than 7 */
8469 	if (vlan > VLAN_N_VID - 1 || qos > 7)
8470 		return -EINVAL;
8471 	if (proto != htons(ETH_P_8021Q))
8472 		return -EPROTONOSUPPORT;
8473 
8474 	state = hclge_get_port_base_vlan_state(vport,
8475 					       vport->port_base_vlan_cfg.state,
8476 					       vlan);
8477 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8478 		return 0;
8479 
8480 	vlan_info.vlan_tag = vlan;
8481 	vlan_info.qos = qos;
8482 	vlan_info.vlan_proto = ntohs(proto);
8483 
8484 	if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8485 		return hclge_update_port_base_vlan_cfg(vport, state,
8486 						       &vlan_info);
8487 	} else {
8488 		ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8489 							vport->vport_id, state,
8490 							vlan, qos,
8491 							ntohs(proto));
8492 		return ret;
8493 	}
8494 }
8495 
8496 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8497 			  u16 vlan_id, bool is_kill)
8498 {
8499 	struct hclge_vport *vport = hclge_get_vport(handle);
8500 	struct hclge_dev *hdev = vport->back;
8501 	bool writen_to_tbl = false;
8502 	int ret = 0;
8503 
8504 	/* When device is resetting, firmware is unable to handle
8505 	 * mailbox. Just record the vlan id, and remove it after
8506 	 * reset finished.
8507 	 */
8508 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8509 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
8510 		return -EBUSY;
8511 	}
8512 
8513 	/* when port base vlan enabled, we use port base vlan as the vlan
8514 	 * filter entry. In this case, we don't update vlan filter table
8515 	 * when user add new vlan or remove exist vlan, just update the vport
8516 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
8517 	 * table until port base vlan disabled
8518 	 */
8519 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8520 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8521 					       vlan_id, is_kill);
8522 		writen_to_tbl = true;
8523 	}
8524 
8525 	if (!ret) {
8526 		if (is_kill)
8527 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
8528 		else
8529 			hclge_add_vport_vlan_table(vport, vlan_id,
8530 						   writen_to_tbl);
8531 	} else if (is_kill) {
8532 		/* when remove hw vlan filter failed, record the vlan id,
8533 		 * and try to remove it from hw later, to be consistence
8534 		 * with stack
8535 		 */
8536 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
8537 	}
8538 	return ret;
8539 }
8540 
8541 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8542 {
8543 #define HCLGE_MAX_SYNC_COUNT	60
8544 
8545 	int i, ret, sync_cnt = 0;
8546 	u16 vlan_id;
8547 
8548 	/* start from vport 1 for PF is always alive */
8549 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8550 		struct hclge_vport *vport = &hdev->vport[i];
8551 
8552 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8553 					 VLAN_N_VID);
8554 		while (vlan_id != VLAN_N_VID) {
8555 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8556 						       vport->vport_id, vlan_id,
8557 						       true);
8558 			if (ret && ret != -EINVAL)
8559 				return;
8560 
8561 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8562 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
8563 
8564 			sync_cnt++;
8565 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8566 				return;
8567 
8568 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8569 						 VLAN_N_VID);
8570 		}
8571 	}
8572 }
8573 
8574 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8575 {
8576 	struct hclge_config_max_frm_size_cmd *req;
8577 	struct hclge_desc desc;
8578 
8579 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8580 
8581 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8582 	req->max_frm_size = cpu_to_le16(new_mps);
8583 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8584 
8585 	return hclge_cmd_send(&hdev->hw, &desc, 1);
8586 }
8587 
8588 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8589 {
8590 	struct hclge_vport *vport = hclge_get_vport(handle);
8591 
8592 	return hclge_set_vport_mtu(vport, new_mtu);
8593 }
8594 
8595 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8596 {
8597 	struct hclge_dev *hdev = vport->back;
8598 	int i, max_frm_size, ret;
8599 
8600 	/* HW supprt 2 layer vlan */
8601 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8602 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8603 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
8604 		return -EINVAL;
8605 
8606 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8607 	mutex_lock(&hdev->vport_lock);
8608 	/* VF's mps must fit within hdev->mps */
8609 	if (vport->vport_id && max_frm_size > hdev->mps) {
8610 		mutex_unlock(&hdev->vport_lock);
8611 		return -EINVAL;
8612 	} else if (vport->vport_id) {
8613 		vport->mps = max_frm_size;
8614 		mutex_unlock(&hdev->vport_lock);
8615 		return 0;
8616 	}
8617 
8618 	/* PF's mps must be greater then VF's mps */
8619 	for (i = 1; i < hdev->num_alloc_vport; i++)
8620 		if (max_frm_size < hdev->vport[i].mps) {
8621 			mutex_unlock(&hdev->vport_lock);
8622 			return -EINVAL;
8623 		}
8624 
8625 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8626 
8627 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
8628 	if (ret) {
8629 		dev_err(&hdev->pdev->dev,
8630 			"Change mtu fail, ret =%d\n", ret);
8631 		goto out;
8632 	}
8633 
8634 	hdev->mps = max_frm_size;
8635 	vport->mps = max_frm_size;
8636 
8637 	ret = hclge_buffer_alloc(hdev);
8638 	if (ret)
8639 		dev_err(&hdev->pdev->dev,
8640 			"Allocate buffer fail, ret =%d\n", ret);
8641 
8642 out:
8643 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8644 	mutex_unlock(&hdev->vport_lock);
8645 	return ret;
8646 }
8647 
8648 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8649 				    bool enable)
8650 {
8651 	struct hclge_reset_tqp_queue_cmd *req;
8652 	struct hclge_desc desc;
8653 	int ret;
8654 
8655 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8656 
8657 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8658 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8659 	if (enable)
8660 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8661 
8662 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8663 	if (ret) {
8664 		dev_err(&hdev->pdev->dev,
8665 			"Send tqp reset cmd error, status =%d\n", ret);
8666 		return ret;
8667 	}
8668 
8669 	return 0;
8670 }
8671 
8672 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8673 {
8674 	struct hclge_reset_tqp_queue_cmd *req;
8675 	struct hclge_desc desc;
8676 	int ret;
8677 
8678 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8679 
8680 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8681 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8682 
8683 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8684 	if (ret) {
8685 		dev_err(&hdev->pdev->dev,
8686 			"Get reset status error, status =%d\n", ret);
8687 		return ret;
8688 	}
8689 
8690 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8691 }
8692 
8693 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8694 {
8695 	struct hnae3_queue *queue;
8696 	struct hclge_tqp *tqp;
8697 
8698 	queue = handle->kinfo.tqp[queue_id];
8699 	tqp = container_of(queue, struct hclge_tqp, q);
8700 
8701 	return tqp->index;
8702 }
8703 
8704 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8705 {
8706 	struct hclge_vport *vport = hclge_get_vport(handle);
8707 	struct hclge_dev *hdev = vport->back;
8708 	int reset_try_times = 0;
8709 	int reset_status;
8710 	u16 queue_gid;
8711 	int ret;
8712 
8713 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8714 
8715 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8716 	if (ret) {
8717 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8718 		return ret;
8719 	}
8720 
8721 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8722 	if (ret) {
8723 		dev_err(&hdev->pdev->dev,
8724 			"Send reset tqp cmd fail, ret = %d\n", ret);
8725 		return ret;
8726 	}
8727 
8728 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8729 		reset_status = hclge_get_reset_status(hdev, queue_gid);
8730 		if (reset_status)
8731 			break;
8732 
8733 		/* Wait for tqp hw reset */
8734 		usleep_range(1000, 1200);
8735 	}
8736 
8737 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8738 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8739 		return ret;
8740 	}
8741 
8742 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8743 	if (ret)
8744 		dev_err(&hdev->pdev->dev,
8745 			"Deassert the soft reset fail, ret = %d\n", ret);
8746 
8747 	return ret;
8748 }
8749 
8750 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8751 {
8752 	struct hclge_dev *hdev = vport->back;
8753 	int reset_try_times = 0;
8754 	int reset_status;
8755 	u16 queue_gid;
8756 	int ret;
8757 
8758 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8759 
8760 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8761 	if (ret) {
8762 		dev_warn(&hdev->pdev->dev,
8763 			 "Send reset tqp cmd fail, ret = %d\n", ret);
8764 		return;
8765 	}
8766 
8767 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8768 		reset_status = hclge_get_reset_status(hdev, queue_gid);
8769 		if (reset_status)
8770 			break;
8771 
8772 		/* Wait for tqp hw reset */
8773 		usleep_range(1000, 1200);
8774 	}
8775 
8776 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8777 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8778 		return;
8779 	}
8780 
8781 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8782 	if (ret)
8783 		dev_warn(&hdev->pdev->dev,
8784 			 "Deassert the soft reset fail, ret = %d\n", ret);
8785 }
8786 
8787 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8788 {
8789 	struct hclge_vport *vport = hclge_get_vport(handle);
8790 	struct hclge_dev *hdev = vport->back;
8791 
8792 	return hdev->fw_version;
8793 }
8794 
8795 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8796 {
8797 	struct phy_device *phydev = hdev->hw.mac.phydev;
8798 
8799 	if (!phydev)
8800 		return;
8801 
8802 	phy_set_asym_pause(phydev, rx_en, tx_en);
8803 }
8804 
8805 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8806 {
8807 	int ret;
8808 
8809 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8810 		return 0;
8811 
8812 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8813 	if (ret)
8814 		dev_err(&hdev->pdev->dev,
8815 			"configure pauseparam error, ret = %d.\n", ret);
8816 
8817 	return ret;
8818 }
8819 
8820 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8821 {
8822 	struct phy_device *phydev = hdev->hw.mac.phydev;
8823 	u16 remote_advertising = 0;
8824 	u16 local_advertising;
8825 	u32 rx_pause, tx_pause;
8826 	u8 flowctl;
8827 
8828 	if (!phydev->link || !phydev->autoneg)
8829 		return 0;
8830 
8831 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8832 
8833 	if (phydev->pause)
8834 		remote_advertising = LPA_PAUSE_CAP;
8835 
8836 	if (phydev->asym_pause)
8837 		remote_advertising |= LPA_PAUSE_ASYM;
8838 
8839 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8840 					   remote_advertising);
8841 	tx_pause = flowctl & FLOW_CTRL_TX;
8842 	rx_pause = flowctl & FLOW_CTRL_RX;
8843 
8844 	if (phydev->duplex == HCLGE_MAC_HALF) {
8845 		tx_pause = 0;
8846 		rx_pause = 0;
8847 	}
8848 
8849 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8850 }
8851 
8852 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8853 				 u32 *rx_en, u32 *tx_en)
8854 {
8855 	struct hclge_vport *vport = hclge_get_vport(handle);
8856 	struct hclge_dev *hdev = vport->back;
8857 	struct phy_device *phydev = hdev->hw.mac.phydev;
8858 
8859 	*auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8860 
8861 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8862 		*rx_en = 0;
8863 		*tx_en = 0;
8864 		return;
8865 	}
8866 
8867 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8868 		*rx_en = 1;
8869 		*tx_en = 0;
8870 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8871 		*tx_en = 1;
8872 		*rx_en = 0;
8873 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8874 		*rx_en = 1;
8875 		*tx_en = 1;
8876 	} else {
8877 		*rx_en = 0;
8878 		*tx_en = 0;
8879 	}
8880 }
8881 
8882 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8883 					 u32 rx_en, u32 tx_en)
8884 {
8885 	if (rx_en && tx_en)
8886 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
8887 	else if (rx_en && !tx_en)
8888 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8889 	else if (!rx_en && tx_en)
8890 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8891 	else
8892 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
8893 
8894 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8895 }
8896 
8897 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8898 				u32 rx_en, u32 tx_en)
8899 {
8900 	struct hclge_vport *vport = hclge_get_vport(handle);
8901 	struct hclge_dev *hdev = vport->back;
8902 	struct phy_device *phydev = hdev->hw.mac.phydev;
8903 	u32 fc_autoneg;
8904 
8905 	if (phydev) {
8906 		fc_autoneg = hclge_get_autoneg(handle);
8907 		if (auto_neg != fc_autoneg) {
8908 			dev_info(&hdev->pdev->dev,
8909 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8910 			return -EOPNOTSUPP;
8911 		}
8912 	}
8913 
8914 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8915 		dev_info(&hdev->pdev->dev,
8916 			 "Priority flow control enabled. Cannot set link flow control.\n");
8917 		return -EOPNOTSUPP;
8918 	}
8919 
8920 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8921 
8922 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8923 
8924 	if (!auto_neg)
8925 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8926 
8927 	if (phydev)
8928 		return phy_start_aneg(phydev);
8929 
8930 	return -EOPNOTSUPP;
8931 }
8932 
8933 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8934 					  u8 *auto_neg, u32 *speed, u8 *duplex)
8935 {
8936 	struct hclge_vport *vport = hclge_get_vport(handle);
8937 	struct hclge_dev *hdev = vport->back;
8938 
8939 	if (speed)
8940 		*speed = hdev->hw.mac.speed;
8941 	if (duplex)
8942 		*duplex = hdev->hw.mac.duplex;
8943 	if (auto_neg)
8944 		*auto_neg = hdev->hw.mac.autoneg;
8945 }
8946 
8947 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8948 				 u8 *module_type)
8949 {
8950 	struct hclge_vport *vport = hclge_get_vport(handle);
8951 	struct hclge_dev *hdev = vport->back;
8952 
8953 	if (media_type)
8954 		*media_type = hdev->hw.mac.media_type;
8955 
8956 	if (module_type)
8957 		*module_type = hdev->hw.mac.module_type;
8958 }
8959 
8960 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8961 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
8962 {
8963 	struct hclge_vport *vport = hclge_get_vport(handle);
8964 	struct hclge_dev *hdev = vport->back;
8965 	struct phy_device *phydev = hdev->hw.mac.phydev;
8966 	int mdix_ctrl, mdix, is_resolved;
8967 	unsigned int retval;
8968 
8969 	if (!phydev) {
8970 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8971 		*tp_mdix = ETH_TP_MDI_INVALID;
8972 		return;
8973 	}
8974 
8975 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8976 
8977 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8978 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8979 				    HCLGE_PHY_MDIX_CTRL_S);
8980 
8981 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8982 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8983 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8984 
8985 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8986 
8987 	switch (mdix_ctrl) {
8988 	case 0x0:
8989 		*tp_mdix_ctrl = ETH_TP_MDI;
8990 		break;
8991 	case 0x1:
8992 		*tp_mdix_ctrl = ETH_TP_MDI_X;
8993 		break;
8994 	case 0x3:
8995 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8996 		break;
8997 	default:
8998 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8999 		break;
9000 	}
9001 
9002 	if (!is_resolved)
9003 		*tp_mdix = ETH_TP_MDI_INVALID;
9004 	else if (mdix)
9005 		*tp_mdix = ETH_TP_MDI_X;
9006 	else
9007 		*tp_mdix = ETH_TP_MDI;
9008 }
9009 
9010 static void hclge_info_show(struct hclge_dev *hdev)
9011 {
9012 	struct device *dev = &hdev->pdev->dev;
9013 
9014 	dev_info(dev, "PF info begin:\n");
9015 
9016 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9017 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9018 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9019 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9020 	dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9021 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9022 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9023 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9024 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9025 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9026 	dev_info(dev, "This is %s PF\n",
9027 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9028 	dev_info(dev, "DCB %s\n",
9029 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9030 	dev_info(dev, "MQPRIO %s\n",
9031 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9032 
9033 	dev_info(dev, "PF info end.\n");
9034 }
9035 
9036 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9037 					  struct hclge_vport *vport)
9038 {
9039 	struct hnae3_client *client = vport->nic.client;
9040 	struct hclge_dev *hdev = ae_dev->priv;
9041 	int rst_cnt = hdev->rst_stats.reset_cnt;
9042 	int ret;
9043 
9044 	ret = client->ops->init_instance(&vport->nic);
9045 	if (ret)
9046 		return ret;
9047 
9048 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9049 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9050 	    rst_cnt != hdev->rst_stats.reset_cnt) {
9051 		ret = -EBUSY;
9052 		goto init_nic_err;
9053 	}
9054 
9055 	/* Enable nic hw error interrupts */
9056 	ret = hclge_config_nic_hw_error(hdev, true);
9057 	if (ret) {
9058 		dev_err(&ae_dev->pdev->dev,
9059 			"fail(%d) to enable hw error interrupts\n", ret);
9060 		goto init_nic_err;
9061 	}
9062 
9063 	hnae3_set_client_init_flag(client, ae_dev, 1);
9064 
9065 	if (netif_msg_drv(&hdev->vport->nic))
9066 		hclge_info_show(hdev);
9067 
9068 	return ret;
9069 
9070 init_nic_err:
9071 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9072 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9073 		msleep(HCLGE_WAIT_RESET_DONE);
9074 
9075 	client->ops->uninit_instance(&vport->nic, 0);
9076 
9077 	return ret;
9078 }
9079 
9080 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9081 					   struct hclge_vport *vport)
9082 {
9083 	struct hclge_dev *hdev = ae_dev->priv;
9084 	struct hnae3_client *client;
9085 	int rst_cnt;
9086 	int ret;
9087 
9088 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9089 	    !hdev->nic_client)
9090 		return 0;
9091 
9092 	client = hdev->roce_client;
9093 	ret = hclge_init_roce_base_info(vport);
9094 	if (ret)
9095 		return ret;
9096 
9097 	rst_cnt = hdev->rst_stats.reset_cnt;
9098 	ret = client->ops->init_instance(&vport->roce);
9099 	if (ret)
9100 		return ret;
9101 
9102 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9103 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9104 	    rst_cnt != hdev->rst_stats.reset_cnt) {
9105 		ret = -EBUSY;
9106 		goto init_roce_err;
9107 	}
9108 
9109 	/* Enable roce ras interrupts */
9110 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
9111 	if (ret) {
9112 		dev_err(&ae_dev->pdev->dev,
9113 			"fail(%d) to enable roce ras interrupts\n", ret);
9114 		goto init_roce_err;
9115 	}
9116 
9117 	hnae3_set_client_init_flag(client, ae_dev, 1);
9118 
9119 	return 0;
9120 
9121 init_roce_err:
9122 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9123 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9124 		msleep(HCLGE_WAIT_RESET_DONE);
9125 
9126 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9127 
9128 	return ret;
9129 }
9130 
9131 static int hclge_init_client_instance(struct hnae3_client *client,
9132 				      struct hnae3_ae_dev *ae_dev)
9133 {
9134 	struct hclge_dev *hdev = ae_dev->priv;
9135 	struct hclge_vport *vport;
9136 	int i, ret;
9137 
9138 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9139 		vport = &hdev->vport[i];
9140 
9141 		switch (client->type) {
9142 		case HNAE3_CLIENT_KNIC:
9143 			hdev->nic_client = client;
9144 			vport->nic.client = client;
9145 			ret = hclge_init_nic_client_instance(ae_dev, vport);
9146 			if (ret)
9147 				goto clear_nic;
9148 
9149 			ret = hclge_init_roce_client_instance(ae_dev, vport);
9150 			if (ret)
9151 				goto clear_roce;
9152 
9153 			break;
9154 		case HNAE3_CLIENT_ROCE:
9155 			if (hnae3_dev_roce_supported(hdev)) {
9156 				hdev->roce_client = client;
9157 				vport->roce.client = client;
9158 			}
9159 
9160 			ret = hclge_init_roce_client_instance(ae_dev, vport);
9161 			if (ret)
9162 				goto clear_roce;
9163 
9164 			break;
9165 		default:
9166 			return -EINVAL;
9167 		}
9168 	}
9169 
9170 	return 0;
9171 
9172 clear_nic:
9173 	hdev->nic_client = NULL;
9174 	vport->nic.client = NULL;
9175 	return ret;
9176 clear_roce:
9177 	hdev->roce_client = NULL;
9178 	vport->roce.client = NULL;
9179 	return ret;
9180 }
9181 
9182 static void hclge_uninit_client_instance(struct hnae3_client *client,
9183 					 struct hnae3_ae_dev *ae_dev)
9184 {
9185 	struct hclge_dev *hdev = ae_dev->priv;
9186 	struct hclge_vport *vport;
9187 	int i;
9188 
9189 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9190 		vport = &hdev->vport[i];
9191 		if (hdev->roce_client) {
9192 			clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9193 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9194 				msleep(HCLGE_WAIT_RESET_DONE);
9195 
9196 			hdev->roce_client->ops->uninit_instance(&vport->roce,
9197 								0);
9198 			hdev->roce_client = NULL;
9199 			vport->roce.client = NULL;
9200 		}
9201 		if (client->type == HNAE3_CLIENT_ROCE)
9202 			return;
9203 		if (hdev->nic_client && client->ops->uninit_instance) {
9204 			clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9205 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9206 				msleep(HCLGE_WAIT_RESET_DONE);
9207 
9208 			client->ops->uninit_instance(&vport->nic, 0);
9209 			hdev->nic_client = NULL;
9210 			vport->nic.client = NULL;
9211 		}
9212 	}
9213 }
9214 
9215 static int hclge_pci_init(struct hclge_dev *hdev)
9216 {
9217 	struct pci_dev *pdev = hdev->pdev;
9218 	struct hclge_hw *hw;
9219 	int ret;
9220 
9221 	ret = pci_enable_device(pdev);
9222 	if (ret) {
9223 		dev_err(&pdev->dev, "failed to enable PCI device\n");
9224 		return ret;
9225 	}
9226 
9227 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9228 	if (ret) {
9229 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9230 		if (ret) {
9231 			dev_err(&pdev->dev,
9232 				"can't set consistent PCI DMA");
9233 			goto err_disable_device;
9234 		}
9235 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9236 	}
9237 
9238 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9239 	if (ret) {
9240 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9241 		goto err_disable_device;
9242 	}
9243 
9244 	pci_set_master(pdev);
9245 	hw = &hdev->hw;
9246 	hw->io_base = pcim_iomap(pdev, 2, 0);
9247 	if (!hw->io_base) {
9248 		dev_err(&pdev->dev, "Can't map configuration register space\n");
9249 		ret = -ENOMEM;
9250 		goto err_clr_master;
9251 	}
9252 
9253 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9254 
9255 	return 0;
9256 err_clr_master:
9257 	pci_clear_master(pdev);
9258 	pci_release_regions(pdev);
9259 err_disable_device:
9260 	pci_disable_device(pdev);
9261 
9262 	return ret;
9263 }
9264 
9265 static void hclge_pci_uninit(struct hclge_dev *hdev)
9266 {
9267 	struct pci_dev *pdev = hdev->pdev;
9268 
9269 	pcim_iounmap(pdev, hdev->hw.io_base);
9270 	pci_free_irq_vectors(pdev);
9271 	pci_clear_master(pdev);
9272 	pci_release_mem_regions(pdev);
9273 	pci_disable_device(pdev);
9274 }
9275 
9276 static void hclge_state_init(struct hclge_dev *hdev)
9277 {
9278 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9279 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9280 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9281 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9282 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9283 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9284 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9285 }
9286 
9287 static void hclge_state_uninit(struct hclge_dev *hdev)
9288 {
9289 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9290 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9291 
9292 	if (hdev->reset_timer.function)
9293 		del_timer_sync(&hdev->reset_timer);
9294 	if (hdev->service_task.work.func)
9295 		cancel_delayed_work_sync(&hdev->service_task);
9296 }
9297 
9298 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9299 {
9300 #define HCLGE_FLR_RETRY_WAIT_MS	500
9301 #define HCLGE_FLR_RETRY_CNT	5
9302 
9303 	struct hclge_dev *hdev = ae_dev->priv;
9304 	int retry_cnt = 0;
9305 	int ret;
9306 
9307 retry:
9308 	down(&hdev->reset_sem);
9309 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9310 	hdev->reset_type = HNAE3_FLR_RESET;
9311 	ret = hclge_reset_prepare(hdev);
9312 	if (ret) {
9313 		dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9314 			ret);
9315 		if (hdev->reset_pending ||
9316 		    retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9317 			dev_err(&hdev->pdev->dev,
9318 				"reset_pending:0x%lx, retry_cnt:%d\n",
9319 				hdev->reset_pending, retry_cnt);
9320 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9321 			up(&hdev->reset_sem);
9322 			msleep(HCLGE_FLR_RETRY_WAIT_MS);
9323 			goto retry;
9324 		}
9325 	}
9326 
9327 	/* disable misc vector before FLR done */
9328 	hclge_enable_vector(&hdev->misc_vector, false);
9329 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9330 	hdev->rst_stats.flr_rst_cnt++;
9331 }
9332 
9333 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9334 {
9335 	struct hclge_dev *hdev = ae_dev->priv;
9336 	int ret;
9337 
9338 	hclge_enable_vector(&hdev->misc_vector, true);
9339 
9340 	ret = hclge_reset_rebuild(hdev);
9341 	if (ret)
9342 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9343 
9344 	hdev->reset_type = HNAE3_NONE_RESET;
9345 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9346 	up(&hdev->reset_sem);
9347 }
9348 
9349 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9350 {
9351 	u16 i;
9352 
9353 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9354 		struct hclge_vport *vport = &hdev->vport[i];
9355 		int ret;
9356 
9357 		 /* Send cmd to clear VF's FUNC_RST_ING */
9358 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9359 		if (ret)
9360 			dev_warn(&hdev->pdev->dev,
9361 				 "clear vf(%u) rst failed %d!\n",
9362 				 vport->vport_id, ret);
9363 	}
9364 }
9365 
9366 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9367 {
9368 	struct pci_dev *pdev = ae_dev->pdev;
9369 	struct hclge_dev *hdev;
9370 	int ret;
9371 
9372 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9373 	if (!hdev) {
9374 		ret = -ENOMEM;
9375 		goto out;
9376 	}
9377 
9378 	hdev->pdev = pdev;
9379 	hdev->ae_dev = ae_dev;
9380 	hdev->reset_type = HNAE3_NONE_RESET;
9381 	hdev->reset_level = HNAE3_FUNC_RESET;
9382 	ae_dev->priv = hdev;
9383 
9384 	/* HW supprt 2 layer vlan */
9385 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9386 
9387 	mutex_init(&hdev->vport_lock);
9388 	spin_lock_init(&hdev->fd_rule_lock);
9389 	sema_init(&hdev->reset_sem, 1);
9390 
9391 	ret = hclge_pci_init(hdev);
9392 	if (ret)
9393 		goto out;
9394 
9395 	/* Firmware command queue initialize */
9396 	ret = hclge_cmd_queue_init(hdev);
9397 	if (ret)
9398 		goto err_pci_uninit;
9399 
9400 	/* Firmware command initialize */
9401 	ret = hclge_cmd_init(hdev);
9402 	if (ret)
9403 		goto err_cmd_uninit;
9404 
9405 	ret = hclge_get_cap(hdev);
9406 	if (ret)
9407 		goto err_cmd_uninit;
9408 
9409 	ret = hclge_configure(hdev);
9410 	if (ret) {
9411 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9412 		goto err_cmd_uninit;
9413 	}
9414 
9415 	ret = hclge_init_msi(hdev);
9416 	if (ret) {
9417 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9418 		goto err_cmd_uninit;
9419 	}
9420 
9421 	ret = hclge_misc_irq_init(hdev);
9422 	if (ret)
9423 		goto err_msi_uninit;
9424 
9425 	ret = hclge_alloc_tqps(hdev);
9426 	if (ret) {
9427 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9428 		goto err_msi_irq_uninit;
9429 	}
9430 
9431 	ret = hclge_alloc_vport(hdev);
9432 	if (ret)
9433 		goto err_msi_irq_uninit;
9434 
9435 	ret = hclge_map_tqp(hdev);
9436 	if (ret)
9437 		goto err_msi_irq_uninit;
9438 
9439 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9440 		ret = hclge_mac_mdio_config(hdev);
9441 		if (ret)
9442 			goto err_msi_irq_uninit;
9443 	}
9444 
9445 	ret = hclge_init_umv_space(hdev);
9446 	if (ret)
9447 		goto err_mdiobus_unreg;
9448 
9449 	ret = hclge_mac_init(hdev);
9450 	if (ret) {
9451 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9452 		goto err_mdiobus_unreg;
9453 	}
9454 
9455 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9456 	if (ret) {
9457 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9458 		goto err_mdiobus_unreg;
9459 	}
9460 
9461 	ret = hclge_config_gro(hdev, true);
9462 	if (ret)
9463 		goto err_mdiobus_unreg;
9464 
9465 	ret = hclge_init_vlan_config(hdev);
9466 	if (ret) {
9467 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9468 		goto err_mdiobus_unreg;
9469 	}
9470 
9471 	ret = hclge_tm_schd_init(hdev);
9472 	if (ret) {
9473 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9474 		goto err_mdiobus_unreg;
9475 	}
9476 
9477 	hclge_rss_init_cfg(hdev);
9478 	ret = hclge_rss_init_hw(hdev);
9479 	if (ret) {
9480 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9481 		goto err_mdiobus_unreg;
9482 	}
9483 
9484 	ret = init_mgr_tbl(hdev);
9485 	if (ret) {
9486 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9487 		goto err_mdiobus_unreg;
9488 	}
9489 
9490 	ret = hclge_init_fd_config(hdev);
9491 	if (ret) {
9492 		dev_err(&pdev->dev,
9493 			"fd table init fail, ret=%d\n", ret);
9494 		goto err_mdiobus_unreg;
9495 	}
9496 
9497 	INIT_KFIFO(hdev->mac_tnl_log);
9498 
9499 	hclge_dcb_ops_set(hdev);
9500 
9501 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9502 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9503 
9504 	/* Setup affinity after service timer setup because add_timer_on
9505 	 * is called in affinity notify.
9506 	 */
9507 	hclge_misc_affinity_setup(hdev);
9508 
9509 	hclge_clear_all_event_cause(hdev);
9510 	hclge_clear_resetting_state(hdev);
9511 
9512 	/* Log and clear the hw errors those already occurred */
9513 	hclge_handle_all_hns_hw_errors(ae_dev);
9514 
9515 	/* request delayed reset for the error recovery because an immediate
9516 	 * global reset on a PF affecting pending initialization of other PFs
9517 	 */
9518 	if (ae_dev->hw_err_reset_req) {
9519 		enum hnae3_reset_type reset_level;
9520 
9521 		reset_level = hclge_get_reset_level(ae_dev,
9522 						    &ae_dev->hw_err_reset_req);
9523 		hclge_set_def_reset_request(ae_dev, reset_level);
9524 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9525 	}
9526 
9527 	/* Enable MISC vector(vector0) */
9528 	hclge_enable_vector(&hdev->misc_vector, true);
9529 
9530 	hclge_state_init(hdev);
9531 	hdev->last_reset_time = jiffies;
9532 
9533 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9534 		 HCLGE_DRIVER_NAME);
9535 
9536 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
9537 
9538 	return 0;
9539 
9540 err_mdiobus_unreg:
9541 	if (hdev->hw.mac.phydev)
9542 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
9543 err_msi_irq_uninit:
9544 	hclge_misc_irq_uninit(hdev);
9545 err_msi_uninit:
9546 	pci_free_irq_vectors(pdev);
9547 err_cmd_uninit:
9548 	hclge_cmd_uninit(hdev);
9549 err_pci_uninit:
9550 	pcim_iounmap(pdev, hdev->hw.io_base);
9551 	pci_clear_master(pdev);
9552 	pci_release_regions(pdev);
9553 	pci_disable_device(pdev);
9554 out:
9555 	return ret;
9556 }
9557 
9558 static void hclge_stats_clear(struct hclge_dev *hdev)
9559 {
9560 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
9561 }
9562 
9563 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9564 {
9565 	return hclge_config_switch_param(hdev, vf, enable,
9566 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
9567 }
9568 
9569 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9570 {
9571 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9572 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
9573 					  enable, vf);
9574 }
9575 
9576 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9577 {
9578 	int ret;
9579 
9580 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9581 	if (ret) {
9582 		dev_err(&hdev->pdev->dev,
9583 			"Set vf %d mac spoof check %s failed, ret=%d\n",
9584 			vf, enable ? "on" : "off", ret);
9585 		return ret;
9586 	}
9587 
9588 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9589 	if (ret)
9590 		dev_err(&hdev->pdev->dev,
9591 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
9592 			vf, enable ? "on" : "off", ret);
9593 
9594 	return ret;
9595 }
9596 
9597 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9598 				 bool enable)
9599 {
9600 	struct hclge_vport *vport = hclge_get_vport(handle);
9601 	struct hclge_dev *hdev = vport->back;
9602 	u32 new_spoofchk = enable ? 1 : 0;
9603 	int ret;
9604 
9605 	if (hdev->pdev->revision == 0x20)
9606 		return -EOPNOTSUPP;
9607 
9608 	vport = hclge_get_vf_vport(hdev, vf);
9609 	if (!vport)
9610 		return -EINVAL;
9611 
9612 	if (vport->vf_info.spoofchk == new_spoofchk)
9613 		return 0;
9614 
9615 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9616 		dev_warn(&hdev->pdev->dev,
9617 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9618 			 vf);
9619 	else if (enable && hclge_is_umv_space_full(vport))
9620 		dev_warn(&hdev->pdev->dev,
9621 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9622 			 vf);
9623 
9624 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9625 	if (ret)
9626 		return ret;
9627 
9628 	vport->vf_info.spoofchk = new_spoofchk;
9629 	return 0;
9630 }
9631 
9632 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9633 {
9634 	struct hclge_vport *vport = hdev->vport;
9635 	int ret;
9636 	int i;
9637 
9638 	if (hdev->pdev->revision == 0x20)
9639 		return 0;
9640 
9641 	/* resume the vf spoof check state after reset */
9642 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9643 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9644 					       vport->vf_info.spoofchk);
9645 		if (ret)
9646 			return ret;
9647 
9648 		vport++;
9649 	}
9650 
9651 	return 0;
9652 }
9653 
9654 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9655 {
9656 	struct hclge_vport *vport = hclge_get_vport(handle);
9657 	struct hclge_dev *hdev = vport->back;
9658 	u32 new_trusted = enable ? 1 : 0;
9659 	bool en_bc_pmc;
9660 	int ret;
9661 
9662 	vport = hclge_get_vf_vport(hdev, vf);
9663 	if (!vport)
9664 		return -EINVAL;
9665 
9666 	if (vport->vf_info.trusted == new_trusted)
9667 		return 0;
9668 
9669 	/* Disable promisc mode for VF if it is not trusted any more. */
9670 	if (!enable && vport->vf_info.promisc_enable) {
9671 		en_bc_pmc = hdev->pdev->revision != 0x20;
9672 		ret = hclge_set_vport_promisc_mode(vport, false, false,
9673 						   en_bc_pmc);
9674 		if (ret)
9675 			return ret;
9676 		vport->vf_info.promisc_enable = 0;
9677 		hclge_inform_vf_promisc_info(vport);
9678 	}
9679 
9680 	vport->vf_info.trusted = new_trusted;
9681 
9682 	return 0;
9683 }
9684 
9685 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9686 {
9687 	int ret;
9688 	int vf;
9689 
9690 	/* reset vf rate to default value */
9691 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9692 		struct hclge_vport *vport = &hdev->vport[vf];
9693 
9694 		vport->vf_info.max_tx_rate = 0;
9695 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9696 		if (ret)
9697 			dev_err(&hdev->pdev->dev,
9698 				"vf%d failed to reset to default, ret=%d\n",
9699 				vf - HCLGE_VF_VPORT_START_NUM, ret);
9700 	}
9701 }
9702 
9703 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9704 				     int min_tx_rate, int max_tx_rate)
9705 {
9706 	if (min_tx_rate != 0 ||
9707 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9708 		dev_err(&hdev->pdev->dev,
9709 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9710 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9711 		return -EINVAL;
9712 	}
9713 
9714 	return 0;
9715 }
9716 
9717 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9718 			     int min_tx_rate, int max_tx_rate, bool force)
9719 {
9720 	struct hclge_vport *vport = hclge_get_vport(handle);
9721 	struct hclge_dev *hdev = vport->back;
9722 	int ret;
9723 
9724 	ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9725 	if (ret)
9726 		return ret;
9727 
9728 	vport = hclge_get_vf_vport(hdev, vf);
9729 	if (!vport)
9730 		return -EINVAL;
9731 
9732 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9733 		return 0;
9734 
9735 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9736 	if (ret)
9737 		return ret;
9738 
9739 	vport->vf_info.max_tx_rate = max_tx_rate;
9740 
9741 	return 0;
9742 }
9743 
9744 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9745 {
9746 	struct hnae3_handle *handle = &hdev->vport->nic;
9747 	struct hclge_vport *vport;
9748 	int ret;
9749 	int vf;
9750 
9751 	/* resume the vf max_tx_rate after reset */
9752 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9753 		vport = hclge_get_vf_vport(hdev, vf);
9754 		if (!vport)
9755 			return -EINVAL;
9756 
9757 		/* zero means max rate, after reset, firmware already set it to
9758 		 * max rate, so just continue.
9759 		 */
9760 		if (!vport->vf_info.max_tx_rate)
9761 			continue;
9762 
9763 		ret = hclge_set_vf_rate(handle, vf, 0,
9764 					vport->vf_info.max_tx_rate, true);
9765 		if (ret) {
9766 			dev_err(&hdev->pdev->dev,
9767 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
9768 				vf, vport->vf_info.max_tx_rate, ret);
9769 			return ret;
9770 		}
9771 	}
9772 
9773 	return 0;
9774 }
9775 
9776 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9777 {
9778 	struct hclge_vport *vport = hdev->vport;
9779 	int i;
9780 
9781 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9782 		hclge_vport_stop(vport);
9783 		vport++;
9784 	}
9785 }
9786 
9787 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9788 {
9789 	struct hclge_dev *hdev = ae_dev->priv;
9790 	struct pci_dev *pdev = ae_dev->pdev;
9791 	int ret;
9792 
9793 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9794 
9795 	hclge_stats_clear(hdev);
9796 	memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9797 	memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9798 
9799 	ret = hclge_cmd_init(hdev);
9800 	if (ret) {
9801 		dev_err(&pdev->dev, "Cmd queue init failed\n");
9802 		return ret;
9803 	}
9804 
9805 	ret = hclge_map_tqp(hdev);
9806 	if (ret) {
9807 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9808 		return ret;
9809 	}
9810 
9811 	hclge_reset_umv_space(hdev);
9812 
9813 	ret = hclge_mac_init(hdev);
9814 	if (ret) {
9815 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9816 		return ret;
9817 	}
9818 
9819 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9820 	if (ret) {
9821 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9822 		return ret;
9823 	}
9824 
9825 	ret = hclge_config_gro(hdev, true);
9826 	if (ret)
9827 		return ret;
9828 
9829 	ret = hclge_init_vlan_config(hdev);
9830 	if (ret) {
9831 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9832 		return ret;
9833 	}
9834 
9835 	ret = hclge_tm_init_hw(hdev, true);
9836 	if (ret) {
9837 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9838 		return ret;
9839 	}
9840 
9841 	ret = hclge_rss_init_hw(hdev);
9842 	if (ret) {
9843 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9844 		return ret;
9845 	}
9846 
9847 	ret = init_mgr_tbl(hdev);
9848 	if (ret) {
9849 		dev_err(&pdev->dev,
9850 			"failed to reinit manager table, ret = %d\n", ret);
9851 		return ret;
9852 	}
9853 
9854 	ret = hclge_init_fd_config(hdev);
9855 	if (ret) {
9856 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9857 		return ret;
9858 	}
9859 
9860 	/* Log and clear the hw errors those already occurred */
9861 	hclge_handle_all_hns_hw_errors(ae_dev);
9862 
9863 	/* Re-enable the hw error interrupts because
9864 	 * the interrupts get disabled on global reset.
9865 	 */
9866 	ret = hclge_config_nic_hw_error(hdev, true);
9867 	if (ret) {
9868 		dev_err(&pdev->dev,
9869 			"fail(%d) to re-enable NIC hw error interrupts\n",
9870 			ret);
9871 		return ret;
9872 	}
9873 
9874 	if (hdev->roce_client) {
9875 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
9876 		if (ret) {
9877 			dev_err(&pdev->dev,
9878 				"fail(%d) to re-enable roce ras interrupts\n",
9879 				ret);
9880 			return ret;
9881 		}
9882 	}
9883 
9884 	hclge_reset_vport_state(hdev);
9885 	ret = hclge_reset_vport_spoofchk(hdev);
9886 	if (ret)
9887 		return ret;
9888 
9889 	ret = hclge_resume_vf_rate(hdev);
9890 	if (ret)
9891 		return ret;
9892 
9893 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9894 		 HCLGE_DRIVER_NAME);
9895 
9896 	return 0;
9897 }
9898 
9899 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9900 {
9901 	struct hclge_dev *hdev = ae_dev->priv;
9902 	struct hclge_mac *mac = &hdev->hw.mac;
9903 
9904 	hclge_reset_vf_rate(hdev);
9905 	hclge_misc_affinity_teardown(hdev);
9906 	hclge_state_uninit(hdev);
9907 
9908 	if (mac->phydev)
9909 		mdiobus_unregister(mac->mdio_bus);
9910 
9911 	hclge_uninit_umv_space(hdev);
9912 
9913 	/* Disable MISC vector(vector0) */
9914 	hclge_enable_vector(&hdev->misc_vector, false);
9915 	synchronize_irq(hdev->misc_vector.vector_irq);
9916 
9917 	/* Disable all hw interrupts */
9918 	hclge_config_mac_tnl_int(hdev, false);
9919 	hclge_config_nic_hw_error(hdev, false);
9920 	hclge_config_rocee_ras_interrupt(hdev, false);
9921 
9922 	hclge_cmd_uninit(hdev);
9923 	hclge_misc_irq_uninit(hdev);
9924 	hclge_pci_uninit(hdev);
9925 	mutex_destroy(&hdev->vport_lock);
9926 	hclge_uninit_vport_mac_table(hdev);
9927 	hclge_uninit_vport_vlan_table(hdev);
9928 	ae_dev->priv = NULL;
9929 }
9930 
9931 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9932 {
9933 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9934 	struct hclge_vport *vport = hclge_get_vport(handle);
9935 	struct hclge_dev *hdev = vport->back;
9936 
9937 	return min_t(u32, hdev->rss_size_max,
9938 		     vport->alloc_tqps / kinfo->num_tc);
9939 }
9940 
9941 static void hclge_get_channels(struct hnae3_handle *handle,
9942 			       struct ethtool_channels *ch)
9943 {
9944 	ch->max_combined = hclge_get_max_channels(handle);
9945 	ch->other_count = 1;
9946 	ch->max_other = 1;
9947 	ch->combined_count = handle->kinfo.rss_size;
9948 }
9949 
9950 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9951 					u16 *alloc_tqps, u16 *max_rss_size)
9952 {
9953 	struct hclge_vport *vport = hclge_get_vport(handle);
9954 	struct hclge_dev *hdev = vport->back;
9955 
9956 	*alloc_tqps = vport->alloc_tqps;
9957 	*max_rss_size = hdev->rss_size_max;
9958 }
9959 
9960 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9961 			      bool rxfh_configured)
9962 {
9963 	struct hclge_vport *vport = hclge_get_vport(handle);
9964 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9965 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9966 	struct hclge_dev *hdev = vport->back;
9967 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9968 	u16 cur_rss_size = kinfo->rss_size;
9969 	u16 cur_tqps = kinfo->num_tqps;
9970 	u16 tc_valid[HCLGE_MAX_TC_NUM];
9971 	u16 roundup_size;
9972 	u32 *rss_indir;
9973 	unsigned int i;
9974 	int ret;
9975 
9976 	kinfo->req_rss_size = new_tqps_num;
9977 
9978 	ret = hclge_tm_vport_map_update(hdev);
9979 	if (ret) {
9980 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9981 		return ret;
9982 	}
9983 
9984 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
9985 	roundup_size = ilog2(roundup_size);
9986 	/* Set the RSS TC mode according to the new RSS size */
9987 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9988 		tc_valid[i] = 0;
9989 
9990 		if (!(hdev->hw_tc_map & BIT(i)))
9991 			continue;
9992 
9993 		tc_valid[i] = 1;
9994 		tc_size[i] = roundup_size;
9995 		tc_offset[i] = kinfo->rss_size * i;
9996 	}
9997 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9998 	if (ret)
9999 		return ret;
10000 
10001 	/* RSS indirection table has been configuared by user */
10002 	if (rxfh_configured)
10003 		goto out;
10004 
10005 	/* Reinitializes the rss indirect table according to the new RSS size */
10006 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10007 	if (!rss_indir)
10008 		return -ENOMEM;
10009 
10010 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10011 		rss_indir[i] = i % kinfo->rss_size;
10012 
10013 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10014 	if (ret)
10015 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10016 			ret);
10017 
10018 	kfree(rss_indir);
10019 
10020 out:
10021 	if (!ret)
10022 		dev_info(&hdev->pdev->dev,
10023 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10024 			 cur_rss_size, kinfo->rss_size,
10025 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10026 
10027 	return ret;
10028 }
10029 
10030 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10031 			      u32 *regs_num_64_bit)
10032 {
10033 	struct hclge_desc desc;
10034 	u32 total_num;
10035 	int ret;
10036 
10037 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10038 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10039 	if (ret) {
10040 		dev_err(&hdev->pdev->dev,
10041 			"Query register number cmd failed, ret = %d.\n", ret);
10042 		return ret;
10043 	}
10044 
10045 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
10046 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
10047 
10048 	total_num = *regs_num_32_bit + *regs_num_64_bit;
10049 	if (!total_num)
10050 		return -EINVAL;
10051 
10052 	return 0;
10053 }
10054 
10055 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10056 				 void *data)
10057 {
10058 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10059 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10060 
10061 	struct hclge_desc *desc;
10062 	u32 *reg_val = data;
10063 	__le32 *desc_data;
10064 	int nodata_num;
10065 	int cmd_num;
10066 	int i, k, n;
10067 	int ret;
10068 
10069 	if (regs_num == 0)
10070 		return 0;
10071 
10072 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10073 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10074 			       HCLGE_32_BIT_REG_RTN_DATANUM);
10075 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10076 	if (!desc)
10077 		return -ENOMEM;
10078 
10079 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10080 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10081 	if (ret) {
10082 		dev_err(&hdev->pdev->dev,
10083 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
10084 		kfree(desc);
10085 		return ret;
10086 	}
10087 
10088 	for (i = 0; i < cmd_num; i++) {
10089 		if (i == 0) {
10090 			desc_data = (__le32 *)(&desc[i].data[0]);
10091 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10092 		} else {
10093 			desc_data = (__le32 *)(&desc[i]);
10094 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
10095 		}
10096 		for (k = 0; k < n; k++) {
10097 			*reg_val++ = le32_to_cpu(*desc_data++);
10098 
10099 			regs_num--;
10100 			if (!regs_num)
10101 				break;
10102 		}
10103 	}
10104 
10105 	kfree(desc);
10106 	return 0;
10107 }
10108 
10109 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10110 				 void *data)
10111 {
10112 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10113 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10114 
10115 	struct hclge_desc *desc;
10116 	u64 *reg_val = data;
10117 	__le64 *desc_data;
10118 	int nodata_len;
10119 	int cmd_num;
10120 	int i, k, n;
10121 	int ret;
10122 
10123 	if (regs_num == 0)
10124 		return 0;
10125 
10126 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10127 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10128 			       HCLGE_64_BIT_REG_RTN_DATANUM);
10129 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10130 	if (!desc)
10131 		return -ENOMEM;
10132 
10133 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10134 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10135 	if (ret) {
10136 		dev_err(&hdev->pdev->dev,
10137 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
10138 		kfree(desc);
10139 		return ret;
10140 	}
10141 
10142 	for (i = 0; i < cmd_num; i++) {
10143 		if (i == 0) {
10144 			desc_data = (__le64 *)(&desc[i].data[0]);
10145 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10146 		} else {
10147 			desc_data = (__le64 *)(&desc[i]);
10148 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
10149 		}
10150 		for (k = 0; k < n; k++) {
10151 			*reg_val++ = le64_to_cpu(*desc_data++);
10152 
10153 			regs_num--;
10154 			if (!regs_num)
10155 				break;
10156 		}
10157 	}
10158 
10159 	kfree(desc);
10160 	return 0;
10161 }
10162 
10163 #define MAX_SEPARATE_NUM	4
10164 #define SEPARATOR_VALUE		0xFDFCFBFA
10165 #define REG_NUM_PER_LINE	4
10166 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
10167 #define REG_SEPARATOR_LINE	1
10168 #define REG_NUM_REMAIN_MASK	3
10169 #define BD_LIST_MAX_NUM		30
10170 
10171 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10172 {
10173 	/*prepare 4 commands to query DFX BD number*/
10174 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10175 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10176 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10177 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10178 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10179 	desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10180 	hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10181 
10182 	return hclge_cmd_send(&hdev->hw, desc, 4);
10183 }
10184 
10185 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10186 				    int *bd_num_list,
10187 				    u32 type_num)
10188 {
10189 	u32 entries_per_desc, desc_index, index, offset, i;
10190 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10191 	int ret;
10192 
10193 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
10194 	if (ret) {
10195 		dev_err(&hdev->pdev->dev,
10196 			"Get dfx bd num fail, status is %d.\n", ret);
10197 		return ret;
10198 	}
10199 
10200 	entries_per_desc = ARRAY_SIZE(desc[0].data);
10201 	for (i = 0; i < type_num; i++) {
10202 		offset = hclge_dfx_bd_offset_list[i];
10203 		index = offset % entries_per_desc;
10204 		desc_index = offset / entries_per_desc;
10205 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10206 	}
10207 
10208 	return ret;
10209 }
10210 
10211 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10212 				  struct hclge_desc *desc_src, int bd_num,
10213 				  enum hclge_opcode_type cmd)
10214 {
10215 	struct hclge_desc *desc = desc_src;
10216 	int i, ret;
10217 
10218 	hclge_cmd_setup_basic_desc(desc, cmd, true);
10219 	for (i = 0; i < bd_num - 1; i++) {
10220 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10221 		desc++;
10222 		hclge_cmd_setup_basic_desc(desc, cmd, true);
10223 	}
10224 
10225 	desc = desc_src;
10226 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10227 	if (ret)
10228 		dev_err(&hdev->pdev->dev,
10229 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10230 			cmd, ret);
10231 
10232 	return ret;
10233 }
10234 
10235 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10236 				    void *data)
10237 {
10238 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10239 	struct hclge_desc *desc = desc_src;
10240 	u32 *reg = data;
10241 
10242 	entries_per_desc = ARRAY_SIZE(desc->data);
10243 	reg_num = entries_per_desc * bd_num;
10244 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10245 	for (i = 0; i < reg_num; i++) {
10246 		index = i % entries_per_desc;
10247 		desc_index = i / entries_per_desc;
10248 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
10249 	}
10250 	for (i = 0; i < separator_num; i++)
10251 		*reg++ = SEPARATOR_VALUE;
10252 
10253 	return reg_num + separator_num;
10254 }
10255 
10256 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10257 {
10258 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10259 	int data_len_per_desc, bd_num, i;
10260 	int bd_num_list[BD_LIST_MAX_NUM];
10261 	u32 data_len;
10262 	int ret;
10263 
10264 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10265 	if (ret) {
10266 		dev_err(&hdev->pdev->dev,
10267 			"Get dfx reg bd num fail, status is %d.\n", ret);
10268 		return ret;
10269 	}
10270 
10271 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
10272 	*len = 0;
10273 	for (i = 0; i < dfx_reg_type_num; i++) {
10274 		bd_num = bd_num_list[i];
10275 		data_len = data_len_per_desc * bd_num;
10276 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10277 	}
10278 
10279 	return ret;
10280 }
10281 
10282 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10283 {
10284 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10285 	int bd_num, bd_num_max, buf_len, i;
10286 	int bd_num_list[BD_LIST_MAX_NUM];
10287 	struct hclge_desc *desc_src;
10288 	u32 *reg = data;
10289 	int ret;
10290 
10291 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10292 	if (ret) {
10293 		dev_err(&hdev->pdev->dev,
10294 			"Get dfx reg bd num fail, status is %d.\n", ret);
10295 		return ret;
10296 	}
10297 
10298 	bd_num_max = bd_num_list[0];
10299 	for (i = 1; i < dfx_reg_type_num; i++)
10300 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10301 
10302 	buf_len = sizeof(*desc_src) * bd_num_max;
10303 	desc_src = kzalloc(buf_len, GFP_KERNEL);
10304 	if (!desc_src)
10305 		return -ENOMEM;
10306 
10307 	for (i = 0; i < dfx_reg_type_num; i++) {
10308 		bd_num = bd_num_list[i];
10309 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10310 					     hclge_dfx_reg_opcode_list[i]);
10311 		if (ret) {
10312 			dev_err(&hdev->pdev->dev,
10313 				"Get dfx reg fail, status is %d.\n", ret);
10314 			break;
10315 		}
10316 
10317 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10318 	}
10319 
10320 	kfree(desc_src);
10321 	return ret;
10322 }
10323 
10324 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10325 			      struct hnae3_knic_private_info *kinfo)
10326 {
10327 #define HCLGE_RING_REG_OFFSET		0x200
10328 #define HCLGE_RING_INT_REG_OFFSET	0x4
10329 
10330 	int i, j, reg_num, separator_num;
10331 	int data_num_sum;
10332 	u32 *reg = data;
10333 
10334 	/* fetching per-PF registers valus from PF PCIe register space */
10335 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10336 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10337 	for (i = 0; i < reg_num; i++)
10338 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10339 	for (i = 0; i < separator_num; i++)
10340 		*reg++ = SEPARATOR_VALUE;
10341 	data_num_sum = reg_num + separator_num;
10342 
10343 	reg_num = ARRAY_SIZE(common_reg_addr_list);
10344 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10345 	for (i = 0; i < reg_num; i++)
10346 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10347 	for (i = 0; i < separator_num; i++)
10348 		*reg++ = SEPARATOR_VALUE;
10349 	data_num_sum += reg_num + separator_num;
10350 
10351 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
10352 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10353 	for (j = 0; j < kinfo->num_tqps; j++) {
10354 		for (i = 0; i < reg_num; i++)
10355 			*reg++ = hclge_read_dev(&hdev->hw,
10356 						ring_reg_addr_list[i] +
10357 						HCLGE_RING_REG_OFFSET * j);
10358 		for (i = 0; i < separator_num; i++)
10359 			*reg++ = SEPARATOR_VALUE;
10360 	}
10361 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10362 
10363 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10364 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10365 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
10366 		for (i = 0; i < reg_num; i++)
10367 			*reg++ = hclge_read_dev(&hdev->hw,
10368 						tqp_intr_reg_addr_list[i] +
10369 						HCLGE_RING_INT_REG_OFFSET * j);
10370 		for (i = 0; i < separator_num; i++)
10371 			*reg++ = SEPARATOR_VALUE;
10372 	}
10373 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10374 
10375 	return data_num_sum;
10376 }
10377 
10378 static int hclge_get_regs_len(struct hnae3_handle *handle)
10379 {
10380 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10381 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10382 	struct hclge_vport *vport = hclge_get_vport(handle);
10383 	struct hclge_dev *hdev = vport->back;
10384 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10385 	int regs_lines_32_bit, regs_lines_64_bit;
10386 	int ret;
10387 
10388 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10389 	if (ret) {
10390 		dev_err(&hdev->pdev->dev,
10391 			"Get register number failed, ret = %d.\n", ret);
10392 		return ret;
10393 	}
10394 
10395 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10396 	if (ret) {
10397 		dev_err(&hdev->pdev->dev,
10398 			"Get dfx reg len failed, ret = %d.\n", ret);
10399 		return ret;
10400 	}
10401 
10402 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10403 		REG_SEPARATOR_LINE;
10404 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10405 		REG_SEPARATOR_LINE;
10406 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10407 		REG_SEPARATOR_LINE;
10408 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10409 		REG_SEPARATOR_LINE;
10410 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10411 		REG_SEPARATOR_LINE;
10412 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10413 		REG_SEPARATOR_LINE;
10414 
10415 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10416 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10417 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10418 }
10419 
10420 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10421 			   void *data)
10422 {
10423 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10424 	struct hclge_vport *vport = hclge_get_vport(handle);
10425 	struct hclge_dev *hdev = vport->back;
10426 	u32 regs_num_32_bit, regs_num_64_bit;
10427 	int i, reg_num, separator_num, ret;
10428 	u32 *reg = data;
10429 
10430 	*version = hdev->fw_version;
10431 
10432 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10433 	if (ret) {
10434 		dev_err(&hdev->pdev->dev,
10435 			"Get register number failed, ret = %d.\n", ret);
10436 		return;
10437 	}
10438 
10439 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10440 
10441 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10442 	if (ret) {
10443 		dev_err(&hdev->pdev->dev,
10444 			"Get 32 bit register failed, ret = %d.\n", ret);
10445 		return;
10446 	}
10447 	reg_num = regs_num_32_bit;
10448 	reg += reg_num;
10449 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10450 	for (i = 0; i < separator_num; i++)
10451 		*reg++ = SEPARATOR_VALUE;
10452 
10453 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10454 	if (ret) {
10455 		dev_err(&hdev->pdev->dev,
10456 			"Get 64 bit register failed, ret = %d.\n", ret);
10457 		return;
10458 	}
10459 	reg_num = regs_num_64_bit * 2;
10460 	reg += reg_num;
10461 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10462 	for (i = 0; i < separator_num; i++)
10463 		*reg++ = SEPARATOR_VALUE;
10464 
10465 	ret = hclge_get_dfx_reg(hdev, reg);
10466 	if (ret)
10467 		dev_err(&hdev->pdev->dev,
10468 			"Get dfx register failed, ret = %d.\n", ret);
10469 }
10470 
10471 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10472 {
10473 	struct hclge_set_led_state_cmd *req;
10474 	struct hclge_desc desc;
10475 	int ret;
10476 
10477 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10478 
10479 	req = (struct hclge_set_led_state_cmd *)desc.data;
10480 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10481 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10482 
10483 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10484 	if (ret)
10485 		dev_err(&hdev->pdev->dev,
10486 			"Send set led state cmd error, ret =%d\n", ret);
10487 
10488 	return ret;
10489 }
10490 
10491 enum hclge_led_status {
10492 	HCLGE_LED_OFF,
10493 	HCLGE_LED_ON,
10494 	HCLGE_LED_NO_CHANGE = 0xFF,
10495 };
10496 
10497 static int hclge_set_led_id(struct hnae3_handle *handle,
10498 			    enum ethtool_phys_id_state status)
10499 {
10500 	struct hclge_vport *vport = hclge_get_vport(handle);
10501 	struct hclge_dev *hdev = vport->back;
10502 
10503 	switch (status) {
10504 	case ETHTOOL_ID_ACTIVE:
10505 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
10506 	case ETHTOOL_ID_INACTIVE:
10507 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10508 	default:
10509 		return -EINVAL;
10510 	}
10511 }
10512 
10513 static void hclge_get_link_mode(struct hnae3_handle *handle,
10514 				unsigned long *supported,
10515 				unsigned long *advertising)
10516 {
10517 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10518 	struct hclge_vport *vport = hclge_get_vport(handle);
10519 	struct hclge_dev *hdev = vport->back;
10520 	unsigned int idx = 0;
10521 
10522 	for (; idx < size; idx++) {
10523 		supported[idx] = hdev->hw.mac.supported[idx];
10524 		advertising[idx] = hdev->hw.mac.advertising[idx];
10525 	}
10526 }
10527 
10528 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10529 {
10530 	struct hclge_vport *vport = hclge_get_vport(handle);
10531 	struct hclge_dev *hdev = vport->back;
10532 
10533 	return hclge_config_gro(hdev, enable);
10534 }
10535 
10536 static const struct hnae3_ae_ops hclge_ops = {
10537 	.init_ae_dev = hclge_init_ae_dev,
10538 	.uninit_ae_dev = hclge_uninit_ae_dev,
10539 	.flr_prepare = hclge_flr_prepare,
10540 	.flr_done = hclge_flr_done,
10541 	.init_client_instance = hclge_init_client_instance,
10542 	.uninit_client_instance = hclge_uninit_client_instance,
10543 	.map_ring_to_vector = hclge_map_ring_to_vector,
10544 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10545 	.get_vector = hclge_get_vector,
10546 	.put_vector = hclge_put_vector,
10547 	.set_promisc_mode = hclge_set_promisc_mode,
10548 	.set_loopback = hclge_set_loopback,
10549 	.start = hclge_ae_start,
10550 	.stop = hclge_ae_stop,
10551 	.client_start = hclge_client_start,
10552 	.client_stop = hclge_client_stop,
10553 	.get_status = hclge_get_status,
10554 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
10555 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10556 	.get_media_type = hclge_get_media_type,
10557 	.check_port_speed = hclge_check_port_speed,
10558 	.get_fec = hclge_get_fec,
10559 	.set_fec = hclge_set_fec,
10560 	.get_rss_key_size = hclge_get_rss_key_size,
10561 	.get_rss_indir_size = hclge_get_rss_indir_size,
10562 	.get_rss = hclge_get_rss,
10563 	.set_rss = hclge_set_rss,
10564 	.set_rss_tuple = hclge_set_rss_tuple,
10565 	.get_rss_tuple = hclge_get_rss_tuple,
10566 	.get_tc_size = hclge_get_tc_size,
10567 	.get_mac_addr = hclge_get_mac_addr,
10568 	.set_mac_addr = hclge_set_mac_addr,
10569 	.do_ioctl = hclge_do_ioctl,
10570 	.add_uc_addr = hclge_add_uc_addr,
10571 	.rm_uc_addr = hclge_rm_uc_addr,
10572 	.add_mc_addr = hclge_add_mc_addr,
10573 	.rm_mc_addr = hclge_rm_mc_addr,
10574 	.set_autoneg = hclge_set_autoneg,
10575 	.get_autoneg = hclge_get_autoneg,
10576 	.restart_autoneg = hclge_restart_autoneg,
10577 	.halt_autoneg = hclge_halt_autoneg,
10578 	.get_pauseparam = hclge_get_pauseparam,
10579 	.set_pauseparam = hclge_set_pauseparam,
10580 	.set_mtu = hclge_set_mtu,
10581 	.reset_queue = hclge_reset_tqp,
10582 	.get_stats = hclge_get_stats,
10583 	.get_mac_stats = hclge_get_mac_stat,
10584 	.update_stats = hclge_update_stats,
10585 	.get_strings = hclge_get_strings,
10586 	.get_sset_count = hclge_get_sset_count,
10587 	.get_fw_version = hclge_get_fw_version,
10588 	.get_mdix_mode = hclge_get_mdix_mode,
10589 	.enable_vlan_filter = hclge_enable_vlan_filter,
10590 	.set_vlan_filter = hclge_set_vlan_filter,
10591 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10592 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10593 	.reset_event = hclge_reset_event,
10594 	.get_reset_level = hclge_get_reset_level,
10595 	.set_default_reset_request = hclge_set_def_reset_request,
10596 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10597 	.set_channels = hclge_set_channels,
10598 	.get_channels = hclge_get_channels,
10599 	.get_regs_len = hclge_get_regs_len,
10600 	.get_regs = hclge_get_regs,
10601 	.set_led_id = hclge_set_led_id,
10602 	.get_link_mode = hclge_get_link_mode,
10603 	.add_fd_entry = hclge_add_fd_entry,
10604 	.del_fd_entry = hclge_del_fd_entry,
10605 	.del_all_fd_entries = hclge_del_all_fd_entries,
10606 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10607 	.get_fd_rule_info = hclge_get_fd_rule_info,
10608 	.get_fd_all_rules = hclge_get_all_rules,
10609 	.restore_fd_rules = hclge_restore_fd_entries,
10610 	.enable_fd = hclge_enable_fd,
10611 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
10612 	.dbg_run_cmd = hclge_dbg_run_cmd,
10613 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
10614 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
10615 	.ae_dev_resetting = hclge_ae_dev_resetting,
10616 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10617 	.set_gro_en = hclge_gro_en,
10618 	.get_global_queue_id = hclge_covert_handle_qid_global,
10619 	.set_timer_task = hclge_set_timer_task,
10620 	.mac_connect_phy = hclge_mac_connect_phy,
10621 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
10622 	.restore_vlan_table = hclge_restore_vlan_table,
10623 	.get_vf_config = hclge_get_vf_config,
10624 	.set_vf_link_state = hclge_set_vf_link_state,
10625 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
10626 	.set_vf_trust = hclge_set_vf_trust,
10627 	.set_vf_rate = hclge_set_vf_rate,
10628 	.set_vf_mac = hclge_set_vf_mac,
10629 };
10630 
10631 static struct hnae3_ae_algo ae_algo = {
10632 	.ops = &hclge_ops,
10633 	.pdev_id_table = ae_algo_pci_tbl,
10634 };
10635 
10636 static int hclge_init(void)
10637 {
10638 	pr_info("%s is initializing\n", HCLGE_NAME);
10639 
10640 	hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
10641 	if (!hclge_wq) {
10642 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
10643 		return -ENOMEM;
10644 	}
10645 
10646 	hnae3_register_ae_algo(&ae_algo);
10647 
10648 	return 0;
10649 }
10650 
10651 static void hclge_exit(void)
10652 {
10653 	hnae3_unregister_ae_algo(&ae_algo);
10654 	destroy_workqueue(hclge_wq);
10655 }
10656 module_init(hclge_init);
10657 module_exit(hclge_exit);
10658 
10659 MODULE_LICENSE("GPL");
10660 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10661 MODULE_DESCRIPTION("HCLGE Driver");
10662 MODULE_VERSION(HCLGE_MOD_VERSION);
10663