xref: /openbmc/linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c (revision a89aa749ece9c6fee7932163472d2ee0efd6ddd3)
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256U
31 #define HCLGE_BUF_MUL_BY	2
32 #define HCLGE_BUF_DIV_BY	2
33 #define NEED_RESERVE_TC_NUM	2
34 #define BUF_MAX_PERCENT		100
35 #define BUF_RESERVE_PERCENT	90
36 
37 #define HCLGE_RESET_MAX_FAIL_CNT	5
38 #define HCLGE_RESET_SYNC_TIME		100
39 #define HCLGE_PF_RESET_SYNC_TIME	20
40 #define HCLGE_PF_RESET_SYNC_CNT		1500
41 
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55 
56 #define HCLGE_LINK_STATUS_MS	10
57 
58 #define HCLGE_VF_VPORT_START_NUM	1
59 
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66 			       u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
70 						   unsigned long *addr);
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
72 
73 static struct hnae3_ae_algo ae_algo;
74 
75 static struct workqueue_struct *hclge_wq;
76 
77 static const struct pci_device_id ae_algo_pci_tbl[] = {
78 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
79 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
85 	/* required last entry */
86 	{0, }
87 };
88 
89 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
90 
91 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
92 					 HCLGE_CMDQ_TX_ADDR_H_REG,
93 					 HCLGE_CMDQ_TX_DEPTH_REG,
94 					 HCLGE_CMDQ_TX_TAIL_REG,
95 					 HCLGE_CMDQ_TX_HEAD_REG,
96 					 HCLGE_CMDQ_RX_ADDR_L_REG,
97 					 HCLGE_CMDQ_RX_ADDR_H_REG,
98 					 HCLGE_CMDQ_RX_DEPTH_REG,
99 					 HCLGE_CMDQ_RX_TAIL_REG,
100 					 HCLGE_CMDQ_RX_HEAD_REG,
101 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
102 					 HCLGE_CMDQ_INTR_STS_REG,
103 					 HCLGE_CMDQ_INTR_EN_REG,
104 					 HCLGE_CMDQ_INTR_GEN_REG};
105 
106 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
107 					   HCLGE_VECTOR0_OTER_EN_REG,
108 					   HCLGE_MISC_RESET_STS_REG,
109 					   HCLGE_MISC_VECTOR_INT_STS,
110 					   HCLGE_GLOBAL_RESET_REG,
111 					   HCLGE_FUN_RST_ING,
112 					   HCLGE_GRO_EN_REG};
113 
114 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
115 					 HCLGE_RING_RX_ADDR_H_REG,
116 					 HCLGE_RING_RX_BD_NUM_REG,
117 					 HCLGE_RING_RX_BD_LENGTH_REG,
118 					 HCLGE_RING_RX_MERGE_EN_REG,
119 					 HCLGE_RING_RX_TAIL_REG,
120 					 HCLGE_RING_RX_HEAD_REG,
121 					 HCLGE_RING_RX_FBD_NUM_REG,
122 					 HCLGE_RING_RX_OFFSET_REG,
123 					 HCLGE_RING_RX_FBD_OFFSET_REG,
124 					 HCLGE_RING_RX_STASH_REG,
125 					 HCLGE_RING_RX_BD_ERR_REG,
126 					 HCLGE_RING_TX_ADDR_L_REG,
127 					 HCLGE_RING_TX_ADDR_H_REG,
128 					 HCLGE_RING_TX_BD_NUM_REG,
129 					 HCLGE_RING_TX_PRIORITY_REG,
130 					 HCLGE_RING_TX_TC_REG,
131 					 HCLGE_RING_TX_MERGE_EN_REG,
132 					 HCLGE_RING_TX_TAIL_REG,
133 					 HCLGE_RING_TX_HEAD_REG,
134 					 HCLGE_RING_TX_FBD_NUM_REG,
135 					 HCLGE_RING_TX_OFFSET_REG,
136 					 HCLGE_RING_TX_EBD_NUM_REG,
137 					 HCLGE_RING_TX_EBD_OFFSET_REG,
138 					 HCLGE_RING_TX_BD_ERR_REG,
139 					 HCLGE_RING_EN_REG};
140 
141 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
142 					     HCLGE_TQP_INTR_GL0_REG,
143 					     HCLGE_TQP_INTR_GL1_REG,
144 					     HCLGE_TQP_INTR_GL2_REG,
145 					     HCLGE_TQP_INTR_RL_REG};
146 
147 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
148 	"App    Loopback test",
149 	"Serdes serial Loopback test",
150 	"Serdes parallel Loopback test",
151 	"Phy    Loopback test"
152 };
153 
154 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
155 	{"mac_tx_mac_pause_num",
156 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
157 	{"mac_rx_mac_pause_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
159 	{"mac_tx_control_pkt_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
161 	{"mac_rx_control_pkt_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
163 	{"mac_tx_pfc_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
165 	{"mac_tx_pfc_pri0_pkt_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
167 	{"mac_tx_pfc_pri1_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
169 	{"mac_tx_pfc_pri2_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
171 	{"mac_tx_pfc_pri3_pkt_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
173 	{"mac_tx_pfc_pri4_pkt_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
175 	{"mac_tx_pfc_pri5_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
177 	{"mac_tx_pfc_pri6_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
179 	{"mac_tx_pfc_pri7_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
181 	{"mac_rx_pfc_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
183 	{"mac_rx_pfc_pri0_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
185 	{"mac_rx_pfc_pri1_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
187 	{"mac_rx_pfc_pri2_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
189 	{"mac_rx_pfc_pri3_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
191 	{"mac_rx_pfc_pri4_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
193 	{"mac_rx_pfc_pri5_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
195 	{"mac_rx_pfc_pri6_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
197 	{"mac_rx_pfc_pri7_pkt_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
199 	{"mac_tx_total_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
201 	{"mac_tx_total_oct_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
203 	{"mac_tx_good_pkt_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
205 	{"mac_tx_bad_pkt_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
207 	{"mac_tx_good_oct_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
209 	{"mac_tx_bad_oct_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
211 	{"mac_tx_uni_pkt_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
213 	{"mac_tx_multi_pkt_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
215 	{"mac_tx_broad_pkt_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
217 	{"mac_tx_undersize_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
219 	{"mac_tx_oversize_pkt_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
221 	{"mac_tx_64_oct_pkt_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
223 	{"mac_tx_65_127_oct_pkt_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
225 	{"mac_tx_128_255_oct_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
227 	{"mac_tx_256_511_oct_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
229 	{"mac_tx_512_1023_oct_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
231 	{"mac_tx_1024_1518_oct_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
233 	{"mac_tx_1519_2047_oct_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
235 	{"mac_tx_2048_4095_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
237 	{"mac_tx_4096_8191_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
239 	{"mac_tx_8192_9216_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
241 	{"mac_tx_9217_12287_oct_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
243 	{"mac_tx_12288_16383_oct_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
245 	{"mac_tx_1519_max_good_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
247 	{"mac_tx_1519_max_bad_pkt_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
249 	{"mac_rx_total_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
251 	{"mac_rx_total_oct_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
253 	{"mac_rx_good_pkt_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
255 	{"mac_rx_bad_pkt_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
257 	{"mac_rx_good_oct_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
259 	{"mac_rx_bad_oct_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
261 	{"mac_rx_uni_pkt_num",
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
263 	{"mac_rx_multi_pkt_num",
264 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
265 	{"mac_rx_broad_pkt_num",
266 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
267 	{"mac_rx_undersize_pkt_num",
268 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
269 	{"mac_rx_oversize_pkt_num",
270 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
271 	{"mac_rx_64_oct_pkt_num",
272 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
273 	{"mac_rx_65_127_oct_pkt_num",
274 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
275 	{"mac_rx_128_255_oct_pkt_num",
276 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
277 	{"mac_rx_256_511_oct_pkt_num",
278 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
279 	{"mac_rx_512_1023_oct_pkt_num",
280 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
281 	{"mac_rx_1024_1518_oct_pkt_num",
282 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
283 	{"mac_rx_1519_2047_oct_pkt_num",
284 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
285 	{"mac_rx_2048_4095_oct_pkt_num",
286 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
287 	{"mac_rx_4096_8191_oct_pkt_num",
288 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
289 	{"mac_rx_8192_9216_oct_pkt_num",
290 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
291 	{"mac_rx_9217_12287_oct_pkt_num",
292 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
293 	{"mac_rx_12288_16383_oct_pkt_num",
294 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
295 	{"mac_rx_1519_max_good_pkt_num",
296 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
297 	{"mac_rx_1519_max_bad_pkt_num",
298 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
299 
300 	{"mac_tx_fragment_pkt_num",
301 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
302 	{"mac_tx_undermin_pkt_num",
303 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
304 	{"mac_tx_jabber_pkt_num",
305 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
306 	{"mac_tx_err_all_pkt_num",
307 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
308 	{"mac_tx_from_app_good_pkt_num",
309 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
310 	{"mac_tx_from_app_bad_pkt_num",
311 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
312 	{"mac_rx_fragment_pkt_num",
313 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
314 	{"mac_rx_undermin_pkt_num",
315 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
316 	{"mac_rx_jabber_pkt_num",
317 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
318 	{"mac_rx_fcs_err_pkt_num",
319 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
320 	{"mac_rx_send_app_good_pkt_num",
321 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
322 	{"mac_rx_send_app_bad_pkt_num",
323 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
324 };
325 
326 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
327 	{
328 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
329 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
330 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
331 		.i_port_bitmap = 0x1,
332 	},
333 };
334 
335 static const u8 hclge_hash_key[] = {
336 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
337 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
338 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
339 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
340 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
341 };
342 
343 static const u32 hclge_dfx_bd_offset_list[] = {
344 	HCLGE_DFX_BIOS_BD_OFFSET,
345 	HCLGE_DFX_SSU_0_BD_OFFSET,
346 	HCLGE_DFX_SSU_1_BD_OFFSET,
347 	HCLGE_DFX_IGU_BD_OFFSET,
348 	HCLGE_DFX_RPU_0_BD_OFFSET,
349 	HCLGE_DFX_RPU_1_BD_OFFSET,
350 	HCLGE_DFX_NCSI_BD_OFFSET,
351 	HCLGE_DFX_RTC_BD_OFFSET,
352 	HCLGE_DFX_PPP_BD_OFFSET,
353 	HCLGE_DFX_RCB_BD_OFFSET,
354 	HCLGE_DFX_TQP_BD_OFFSET,
355 	HCLGE_DFX_SSU_2_BD_OFFSET
356 };
357 
358 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
359 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
360 	HCLGE_OPC_DFX_SSU_REG_0,
361 	HCLGE_OPC_DFX_SSU_REG_1,
362 	HCLGE_OPC_DFX_IGU_EGU_REG,
363 	HCLGE_OPC_DFX_RPU_REG_0,
364 	HCLGE_OPC_DFX_RPU_REG_1,
365 	HCLGE_OPC_DFX_NCSI_REG,
366 	HCLGE_OPC_DFX_RTC_REG,
367 	HCLGE_OPC_DFX_PPP_REG,
368 	HCLGE_OPC_DFX_RCB_REG,
369 	HCLGE_OPC_DFX_TQP_REG,
370 	HCLGE_OPC_DFX_SSU_REG_2
371 };
372 
373 static const struct key_info meta_data_key_info[] = {
374 	{ PACKET_TYPE_ID, 6},
375 	{ IP_FRAGEMENT, 1},
376 	{ ROCE_TYPE, 1},
377 	{ NEXT_KEY, 5},
378 	{ VLAN_NUMBER, 2},
379 	{ SRC_VPORT, 12},
380 	{ DST_VPORT, 12},
381 	{ TUNNEL_PACKET, 1},
382 };
383 
384 static const struct key_info tuple_key_info[] = {
385 	{ OUTER_DST_MAC, 48},
386 	{ OUTER_SRC_MAC, 48},
387 	{ OUTER_VLAN_TAG_FST, 16},
388 	{ OUTER_VLAN_TAG_SEC, 16},
389 	{ OUTER_ETH_TYPE, 16},
390 	{ OUTER_L2_RSV, 16},
391 	{ OUTER_IP_TOS, 8},
392 	{ OUTER_IP_PROTO, 8},
393 	{ OUTER_SRC_IP, 32},
394 	{ OUTER_DST_IP, 32},
395 	{ OUTER_L3_RSV, 16},
396 	{ OUTER_SRC_PORT, 16},
397 	{ OUTER_DST_PORT, 16},
398 	{ OUTER_L4_RSV, 32},
399 	{ OUTER_TUN_VNI, 24},
400 	{ OUTER_TUN_FLOW_ID, 8},
401 	{ INNER_DST_MAC, 48},
402 	{ INNER_SRC_MAC, 48},
403 	{ INNER_VLAN_TAG_FST, 16},
404 	{ INNER_VLAN_TAG_SEC, 16},
405 	{ INNER_ETH_TYPE, 16},
406 	{ INNER_L2_RSV, 16},
407 	{ INNER_IP_TOS, 8},
408 	{ INNER_IP_PROTO, 8},
409 	{ INNER_SRC_IP, 32},
410 	{ INNER_DST_IP, 32},
411 	{ INNER_L3_RSV, 16},
412 	{ INNER_SRC_PORT, 16},
413 	{ INNER_DST_PORT, 16},
414 	{ INNER_L4_RSV, 32},
415 };
416 
417 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
418 {
419 #define HCLGE_MAC_CMD_NUM 21
420 
421 	u64 *data = (u64 *)(&hdev->mac_stats);
422 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
423 	__le64 *desc_data;
424 	int i, k, n;
425 	int ret;
426 
427 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
428 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
429 	if (ret) {
430 		dev_err(&hdev->pdev->dev,
431 			"Get MAC pkt stats fail, status = %d.\n", ret);
432 
433 		return ret;
434 	}
435 
436 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
437 		/* for special opcode 0032, only the first desc has the head */
438 		if (unlikely(i == 0)) {
439 			desc_data = (__le64 *)(&desc[i].data[0]);
440 			n = HCLGE_RD_FIRST_STATS_NUM;
441 		} else {
442 			desc_data = (__le64 *)(&desc[i]);
443 			n = HCLGE_RD_OTHER_STATS_NUM;
444 		}
445 
446 		for (k = 0; k < n; k++) {
447 			*data += le64_to_cpu(*desc_data);
448 			data++;
449 			desc_data++;
450 		}
451 	}
452 
453 	return 0;
454 }
455 
456 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
457 {
458 	u64 *data = (u64 *)(&hdev->mac_stats);
459 	struct hclge_desc *desc;
460 	__le64 *desc_data;
461 	u16 i, k, n;
462 	int ret;
463 
464 	/* This may be called inside atomic sections,
465 	 * so GFP_ATOMIC is more suitalbe here
466 	 */
467 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
468 	if (!desc)
469 		return -ENOMEM;
470 
471 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
472 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
473 	if (ret) {
474 		kfree(desc);
475 		return ret;
476 	}
477 
478 	for (i = 0; i < desc_num; i++) {
479 		/* for special opcode 0034, only the first desc has the head */
480 		if (i == 0) {
481 			desc_data = (__le64 *)(&desc[i].data[0]);
482 			n = HCLGE_RD_FIRST_STATS_NUM;
483 		} else {
484 			desc_data = (__le64 *)(&desc[i]);
485 			n = HCLGE_RD_OTHER_STATS_NUM;
486 		}
487 
488 		for (k = 0; k < n; k++) {
489 			*data += le64_to_cpu(*desc_data);
490 			data++;
491 			desc_data++;
492 		}
493 	}
494 
495 	kfree(desc);
496 
497 	return 0;
498 }
499 
500 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
501 {
502 	struct hclge_desc desc;
503 	__le32 *desc_data;
504 	u32 reg_num;
505 	int ret;
506 
507 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
508 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
509 	if (ret)
510 		return ret;
511 
512 	desc_data = (__le32 *)(&desc.data[0]);
513 	reg_num = le32_to_cpu(*desc_data);
514 
515 	*desc_num = 1 + ((reg_num - 3) >> 2) +
516 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
517 
518 	return 0;
519 }
520 
521 static int hclge_mac_update_stats(struct hclge_dev *hdev)
522 {
523 	u32 desc_num;
524 	int ret;
525 
526 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
527 
528 	/* The firmware supports the new statistics acquisition method */
529 	if (!ret)
530 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
531 	else if (ret == -EOPNOTSUPP)
532 		ret = hclge_mac_update_stats_defective(hdev);
533 	else
534 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
535 
536 	return ret;
537 }
538 
539 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
540 {
541 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
542 	struct hclge_vport *vport = hclge_get_vport(handle);
543 	struct hclge_dev *hdev = vport->back;
544 	struct hnae3_queue *queue;
545 	struct hclge_desc desc[1];
546 	struct hclge_tqp *tqp;
547 	int ret, i;
548 
549 	for (i = 0; i < kinfo->num_tqps; i++) {
550 		queue = handle->kinfo.tqp[i];
551 		tqp = container_of(queue, struct hclge_tqp, q);
552 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
553 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
554 					   true);
555 
556 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
557 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
558 		if (ret) {
559 			dev_err(&hdev->pdev->dev,
560 				"Query tqp stat fail, status = %d,queue = %d\n",
561 				ret, i);
562 			return ret;
563 		}
564 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
565 			le32_to_cpu(desc[0].data[1]);
566 	}
567 
568 	for (i = 0; i < kinfo->num_tqps; i++) {
569 		queue = handle->kinfo.tqp[i];
570 		tqp = container_of(queue, struct hclge_tqp, q);
571 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
572 		hclge_cmd_setup_basic_desc(&desc[0],
573 					   HCLGE_OPC_QUERY_TX_STATUS,
574 					   true);
575 
576 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
577 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
578 		if (ret) {
579 			dev_err(&hdev->pdev->dev,
580 				"Query tqp stat fail, status = %d,queue = %d\n",
581 				ret, i);
582 			return ret;
583 		}
584 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
585 			le32_to_cpu(desc[0].data[1]);
586 	}
587 
588 	return 0;
589 }
590 
591 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
592 {
593 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
594 	struct hclge_tqp *tqp;
595 	u64 *buff = data;
596 	int i;
597 
598 	for (i = 0; i < kinfo->num_tqps; i++) {
599 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
600 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
601 	}
602 
603 	for (i = 0; i < kinfo->num_tqps; i++) {
604 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
605 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
606 	}
607 
608 	return buff;
609 }
610 
611 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
612 {
613 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
614 
615 	/* each tqp has TX & RX two queues */
616 	return kinfo->num_tqps * (2);
617 }
618 
619 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
620 {
621 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622 	u8 *buff = data;
623 	int i = 0;
624 
625 	for (i = 0; i < kinfo->num_tqps; i++) {
626 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
627 			struct hclge_tqp, q);
628 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
629 			 tqp->index);
630 		buff = buff + ETH_GSTRING_LEN;
631 	}
632 
633 	for (i = 0; i < kinfo->num_tqps; i++) {
634 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
635 			struct hclge_tqp, q);
636 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
637 			 tqp->index);
638 		buff = buff + ETH_GSTRING_LEN;
639 	}
640 
641 	return buff;
642 }
643 
644 static u64 *hclge_comm_get_stats(const void *comm_stats,
645 				 const struct hclge_comm_stats_str strs[],
646 				 int size, u64 *data)
647 {
648 	u64 *buf = data;
649 	u32 i;
650 
651 	for (i = 0; i < size; i++)
652 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
653 
654 	return buf + size;
655 }
656 
657 static u8 *hclge_comm_get_strings(u32 stringset,
658 				  const struct hclge_comm_stats_str strs[],
659 				  int size, u8 *data)
660 {
661 	char *buff = (char *)data;
662 	u32 i;
663 
664 	if (stringset != ETH_SS_STATS)
665 		return buff;
666 
667 	for (i = 0; i < size; i++) {
668 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
669 		buff = buff + ETH_GSTRING_LEN;
670 	}
671 
672 	return (u8 *)buff;
673 }
674 
675 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
676 {
677 	struct hnae3_handle *handle;
678 	int status;
679 
680 	handle = &hdev->vport[0].nic;
681 	if (handle->client) {
682 		status = hclge_tqps_update_stats(handle);
683 		if (status) {
684 			dev_err(&hdev->pdev->dev,
685 				"Update TQPS stats fail, status = %d.\n",
686 				status);
687 		}
688 	}
689 
690 	status = hclge_mac_update_stats(hdev);
691 	if (status)
692 		dev_err(&hdev->pdev->dev,
693 			"Update MAC stats fail, status = %d.\n", status);
694 }
695 
696 static void hclge_update_stats(struct hnae3_handle *handle,
697 			       struct net_device_stats *net_stats)
698 {
699 	struct hclge_vport *vport = hclge_get_vport(handle);
700 	struct hclge_dev *hdev = vport->back;
701 	int status;
702 
703 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
704 		return;
705 
706 	status = hclge_mac_update_stats(hdev);
707 	if (status)
708 		dev_err(&hdev->pdev->dev,
709 			"Update MAC stats fail, status = %d.\n",
710 			status);
711 
712 	status = hclge_tqps_update_stats(handle);
713 	if (status)
714 		dev_err(&hdev->pdev->dev,
715 			"Update TQPS stats fail, status = %d.\n",
716 			status);
717 
718 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
719 }
720 
721 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
722 {
723 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
724 		HNAE3_SUPPORT_PHY_LOOPBACK |\
725 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
726 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
727 
728 	struct hclge_vport *vport = hclge_get_vport(handle);
729 	struct hclge_dev *hdev = vport->back;
730 	int count = 0;
731 
732 	/* Loopback test support rules:
733 	 * mac: only GE mode support
734 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
735 	 * phy: only support when phy device exist on board
736 	 */
737 	if (stringset == ETH_SS_TEST) {
738 		/* clear loopback bit flags at first */
739 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
740 		if (hdev->pdev->revision >= 0x21 ||
741 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
742 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
743 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
744 			count += 1;
745 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
746 		}
747 
748 		count += 2;
749 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
750 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
751 
752 		if (hdev->hw.mac.phydev) {
753 			count += 1;
754 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
755 		}
756 
757 	} else if (stringset == ETH_SS_STATS) {
758 		count = ARRAY_SIZE(g_mac_stats_string) +
759 			hclge_tqps_get_sset_count(handle, stringset);
760 	}
761 
762 	return count;
763 }
764 
765 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
766 			      u8 *data)
767 {
768 	u8 *p = (char *)data;
769 	int size;
770 
771 	if (stringset == ETH_SS_STATS) {
772 		size = ARRAY_SIZE(g_mac_stats_string);
773 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
774 					   size, p);
775 		p = hclge_tqps_get_strings(handle, p);
776 	} else if (stringset == ETH_SS_TEST) {
777 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
778 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
779 			       ETH_GSTRING_LEN);
780 			p += ETH_GSTRING_LEN;
781 		}
782 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
783 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
784 			       ETH_GSTRING_LEN);
785 			p += ETH_GSTRING_LEN;
786 		}
787 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
788 			memcpy(p,
789 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
790 			       ETH_GSTRING_LEN);
791 			p += ETH_GSTRING_LEN;
792 		}
793 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
794 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
795 			       ETH_GSTRING_LEN);
796 			p += ETH_GSTRING_LEN;
797 		}
798 	}
799 }
800 
801 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
802 {
803 	struct hclge_vport *vport = hclge_get_vport(handle);
804 	struct hclge_dev *hdev = vport->back;
805 	u64 *p;
806 
807 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
808 				 ARRAY_SIZE(g_mac_stats_string), data);
809 	p = hclge_tqps_get_stats(handle, p);
810 }
811 
812 static void hclge_get_mac_stat(struct hnae3_handle *handle,
813 			       struct hns3_mac_stats *mac_stats)
814 {
815 	struct hclge_vport *vport = hclge_get_vport(handle);
816 	struct hclge_dev *hdev = vport->back;
817 
818 	hclge_update_stats(handle, NULL);
819 
820 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
821 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
822 }
823 
824 static int hclge_parse_func_status(struct hclge_dev *hdev,
825 				   struct hclge_func_status_cmd *status)
826 {
827 #define HCLGE_MAC_ID_MASK	0xF
828 
829 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
830 		return -EINVAL;
831 
832 	/* Set the pf to main pf */
833 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
834 		hdev->flag |= HCLGE_FLAG_MAIN;
835 	else
836 		hdev->flag &= ~HCLGE_FLAG_MAIN;
837 
838 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
839 	return 0;
840 }
841 
842 static int hclge_query_function_status(struct hclge_dev *hdev)
843 {
844 #define HCLGE_QUERY_MAX_CNT	5
845 
846 	struct hclge_func_status_cmd *req;
847 	struct hclge_desc desc;
848 	int timeout = 0;
849 	int ret;
850 
851 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
852 	req = (struct hclge_func_status_cmd *)desc.data;
853 
854 	do {
855 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
856 		if (ret) {
857 			dev_err(&hdev->pdev->dev,
858 				"query function status failed %d.\n", ret);
859 			return ret;
860 		}
861 
862 		/* Check pf reset is done */
863 		if (req->pf_state)
864 			break;
865 		usleep_range(1000, 2000);
866 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
867 
868 	return hclge_parse_func_status(hdev, req);
869 }
870 
871 static int hclge_query_pf_resource(struct hclge_dev *hdev)
872 {
873 	struct hclge_pf_res_cmd *req;
874 	struct hclge_desc desc;
875 	int ret;
876 
877 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
878 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
879 	if (ret) {
880 		dev_err(&hdev->pdev->dev,
881 			"query pf resource failed %d.\n", ret);
882 		return ret;
883 	}
884 
885 	req = (struct hclge_pf_res_cmd *)desc.data;
886 	hdev->num_tqps = le16_to_cpu(req->tqp_num);
887 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
888 
889 	if (req->tx_buf_size)
890 		hdev->tx_buf_size =
891 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
892 	else
893 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
894 
895 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
896 
897 	if (req->dv_buf_size)
898 		hdev->dv_buf_size =
899 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
900 	else
901 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
902 
903 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
904 
905 	if (hnae3_dev_roce_supported(hdev)) {
906 		hdev->roce_base_msix_offset =
907 		hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
908 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
909 		hdev->num_roce_msi =
910 		hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
911 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
912 
913 		/* nic's msix numbers is always equals to the roce's. */
914 		hdev->num_nic_msi = hdev->num_roce_msi;
915 
916 		/* PF should have NIC vectors and Roce vectors,
917 		 * NIC vectors are queued before Roce vectors.
918 		 */
919 		hdev->num_msi = hdev->num_roce_msi +
920 				hdev->roce_base_msix_offset;
921 	} else {
922 		hdev->num_msi =
923 		hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
924 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
925 
926 		hdev->num_nic_msi = hdev->num_msi;
927 	}
928 
929 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
930 		dev_err(&hdev->pdev->dev,
931 			"Just %u msi resources, not enough for pf(min:2).\n",
932 			hdev->num_nic_msi);
933 		return -EINVAL;
934 	}
935 
936 	return 0;
937 }
938 
939 static int hclge_parse_speed(int speed_cmd, int *speed)
940 {
941 	switch (speed_cmd) {
942 	case 6:
943 		*speed = HCLGE_MAC_SPEED_10M;
944 		break;
945 	case 7:
946 		*speed = HCLGE_MAC_SPEED_100M;
947 		break;
948 	case 0:
949 		*speed = HCLGE_MAC_SPEED_1G;
950 		break;
951 	case 1:
952 		*speed = HCLGE_MAC_SPEED_10G;
953 		break;
954 	case 2:
955 		*speed = HCLGE_MAC_SPEED_25G;
956 		break;
957 	case 3:
958 		*speed = HCLGE_MAC_SPEED_40G;
959 		break;
960 	case 4:
961 		*speed = HCLGE_MAC_SPEED_50G;
962 		break;
963 	case 5:
964 		*speed = HCLGE_MAC_SPEED_100G;
965 		break;
966 	default:
967 		return -EINVAL;
968 	}
969 
970 	return 0;
971 }
972 
973 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
974 {
975 	struct hclge_vport *vport = hclge_get_vport(handle);
976 	struct hclge_dev *hdev = vport->back;
977 	u32 speed_ability = hdev->hw.mac.speed_ability;
978 	u32 speed_bit = 0;
979 
980 	switch (speed) {
981 	case HCLGE_MAC_SPEED_10M:
982 		speed_bit = HCLGE_SUPPORT_10M_BIT;
983 		break;
984 	case HCLGE_MAC_SPEED_100M:
985 		speed_bit = HCLGE_SUPPORT_100M_BIT;
986 		break;
987 	case HCLGE_MAC_SPEED_1G:
988 		speed_bit = HCLGE_SUPPORT_1G_BIT;
989 		break;
990 	case HCLGE_MAC_SPEED_10G:
991 		speed_bit = HCLGE_SUPPORT_10G_BIT;
992 		break;
993 	case HCLGE_MAC_SPEED_25G:
994 		speed_bit = HCLGE_SUPPORT_25G_BIT;
995 		break;
996 	case HCLGE_MAC_SPEED_40G:
997 		speed_bit = HCLGE_SUPPORT_40G_BIT;
998 		break;
999 	case HCLGE_MAC_SPEED_50G:
1000 		speed_bit = HCLGE_SUPPORT_50G_BIT;
1001 		break;
1002 	case HCLGE_MAC_SPEED_100G:
1003 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1004 		break;
1005 	default:
1006 		return -EINVAL;
1007 	}
1008 
1009 	if (speed_bit & speed_ability)
1010 		return 0;
1011 
1012 	return -EINVAL;
1013 }
1014 
1015 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1016 {
1017 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1018 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1019 				 mac->supported);
1020 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1021 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1022 				 mac->supported);
1023 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1024 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1025 				 mac->supported);
1026 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1027 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1028 				 mac->supported);
1029 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1030 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1031 				 mac->supported);
1032 }
1033 
1034 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1035 {
1036 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1037 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1038 				 mac->supported);
1039 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1040 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1041 				 mac->supported);
1042 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1043 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1044 				 mac->supported);
1045 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1046 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1047 				 mac->supported);
1048 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1049 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1050 				 mac->supported);
1051 }
1052 
1053 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1054 {
1055 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1056 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1057 				 mac->supported);
1058 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1059 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1060 				 mac->supported);
1061 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1062 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1063 				 mac->supported);
1064 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1065 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1066 				 mac->supported);
1067 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1068 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1069 				 mac->supported);
1070 }
1071 
1072 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1073 {
1074 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1075 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1076 				 mac->supported);
1077 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1078 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1079 				 mac->supported);
1080 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1081 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1082 				 mac->supported);
1083 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1084 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1085 				 mac->supported);
1086 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1087 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1088 				 mac->supported);
1089 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1090 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1091 				 mac->supported);
1092 }
1093 
1094 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1095 {
1096 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1097 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1098 
1099 	switch (mac->speed) {
1100 	case HCLGE_MAC_SPEED_10G:
1101 	case HCLGE_MAC_SPEED_40G:
1102 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1103 				 mac->supported);
1104 		mac->fec_ability =
1105 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1106 		break;
1107 	case HCLGE_MAC_SPEED_25G:
1108 	case HCLGE_MAC_SPEED_50G:
1109 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1110 				 mac->supported);
1111 		mac->fec_ability =
1112 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1113 			BIT(HNAE3_FEC_AUTO);
1114 		break;
1115 	case HCLGE_MAC_SPEED_100G:
1116 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1117 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1118 		break;
1119 	default:
1120 		mac->fec_ability = 0;
1121 		break;
1122 	}
1123 }
1124 
1125 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1126 					u8 speed_ability)
1127 {
1128 	struct hclge_mac *mac = &hdev->hw.mac;
1129 
1130 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1131 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1132 				 mac->supported);
1133 
1134 	hclge_convert_setting_sr(mac, speed_ability);
1135 	hclge_convert_setting_lr(mac, speed_ability);
1136 	hclge_convert_setting_cr(mac, speed_ability);
1137 	if (hdev->pdev->revision >= 0x21)
1138 		hclge_convert_setting_fec(mac);
1139 
1140 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1141 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1142 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1143 }
1144 
1145 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1146 					    u8 speed_ability)
1147 {
1148 	struct hclge_mac *mac = &hdev->hw.mac;
1149 
1150 	hclge_convert_setting_kr(mac, speed_ability);
1151 	if (hdev->pdev->revision >= 0x21)
1152 		hclge_convert_setting_fec(mac);
1153 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1154 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1155 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1156 }
1157 
1158 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1159 					 u8 speed_ability)
1160 {
1161 	unsigned long *supported = hdev->hw.mac.supported;
1162 
1163 	/* default to support all speed for GE port */
1164 	if (!speed_ability)
1165 		speed_ability = HCLGE_SUPPORT_GE;
1166 
1167 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1168 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1169 				 supported);
1170 
1171 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1172 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1173 				 supported);
1174 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1175 				 supported);
1176 	}
1177 
1178 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1179 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1180 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1181 	}
1182 
1183 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1184 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1185 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1186 	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1187 }
1188 
1189 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1190 {
1191 	u8 media_type = hdev->hw.mac.media_type;
1192 
1193 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1194 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1195 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1196 		hclge_parse_copper_link_mode(hdev, speed_ability);
1197 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1198 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1199 }
1200 
1201 static u32 hclge_get_max_speed(u8 speed_ability)
1202 {
1203 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1204 		return HCLGE_MAC_SPEED_100G;
1205 
1206 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1207 		return HCLGE_MAC_SPEED_50G;
1208 
1209 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1210 		return HCLGE_MAC_SPEED_40G;
1211 
1212 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1213 		return HCLGE_MAC_SPEED_25G;
1214 
1215 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1216 		return HCLGE_MAC_SPEED_10G;
1217 
1218 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1219 		return HCLGE_MAC_SPEED_1G;
1220 
1221 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1222 		return HCLGE_MAC_SPEED_100M;
1223 
1224 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1225 		return HCLGE_MAC_SPEED_10M;
1226 
1227 	return HCLGE_MAC_SPEED_1G;
1228 }
1229 
1230 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1231 {
1232 	struct hclge_cfg_param_cmd *req;
1233 	u64 mac_addr_tmp_high;
1234 	u64 mac_addr_tmp;
1235 	unsigned int i;
1236 
1237 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1238 
1239 	/* get the configuration */
1240 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1241 					      HCLGE_CFG_VMDQ_M,
1242 					      HCLGE_CFG_VMDQ_S);
1243 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1244 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1245 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1246 					    HCLGE_CFG_TQP_DESC_N_M,
1247 					    HCLGE_CFG_TQP_DESC_N_S);
1248 
1249 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1250 					HCLGE_CFG_PHY_ADDR_M,
1251 					HCLGE_CFG_PHY_ADDR_S);
1252 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1253 					  HCLGE_CFG_MEDIA_TP_M,
1254 					  HCLGE_CFG_MEDIA_TP_S);
1255 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1256 					  HCLGE_CFG_RX_BUF_LEN_M,
1257 					  HCLGE_CFG_RX_BUF_LEN_S);
1258 	/* get mac_address */
1259 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1260 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1261 					    HCLGE_CFG_MAC_ADDR_H_M,
1262 					    HCLGE_CFG_MAC_ADDR_H_S);
1263 
1264 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1265 
1266 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1267 					     HCLGE_CFG_DEFAULT_SPEED_M,
1268 					     HCLGE_CFG_DEFAULT_SPEED_S);
1269 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1270 					    HCLGE_CFG_RSS_SIZE_M,
1271 					    HCLGE_CFG_RSS_SIZE_S);
1272 
1273 	for (i = 0; i < ETH_ALEN; i++)
1274 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1275 
1276 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1277 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1278 
1279 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1280 					     HCLGE_CFG_SPEED_ABILITY_M,
1281 					     HCLGE_CFG_SPEED_ABILITY_S);
1282 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1283 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1284 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1285 	if (!cfg->umv_space)
1286 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1287 }
1288 
1289 /* hclge_get_cfg: query the static parameter from flash
1290  * @hdev: pointer to struct hclge_dev
1291  * @hcfg: the config structure to be getted
1292  */
1293 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1294 {
1295 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1296 	struct hclge_cfg_param_cmd *req;
1297 	unsigned int i;
1298 	int ret;
1299 
1300 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1301 		u32 offset = 0;
1302 
1303 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1304 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1305 					   true);
1306 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1307 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1308 		/* Len should be united by 4 bytes when send to hardware */
1309 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1310 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1311 		req->offset = cpu_to_le32(offset);
1312 	}
1313 
1314 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1315 	if (ret) {
1316 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1317 		return ret;
1318 	}
1319 
1320 	hclge_parse_cfg(hcfg, desc);
1321 
1322 	return 0;
1323 }
1324 
1325 static int hclge_get_cap(struct hclge_dev *hdev)
1326 {
1327 	int ret;
1328 
1329 	ret = hclge_query_function_status(hdev);
1330 	if (ret) {
1331 		dev_err(&hdev->pdev->dev,
1332 			"query function status error %d.\n", ret);
1333 		return ret;
1334 	}
1335 
1336 	/* get pf resource */
1337 	return hclge_query_pf_resource(hdev);
1338 }
1339 
1340 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1341 {
1342 #define HCLGE_MIN_TX_DESC	64
1343 #define HCLGE_MIN_RX_DESC	64
1344 
1345 	if (!is_kdump_kernel())
1346 		return;
1347 
1348 	dev_info(&hdev->pdev->dev,
1349 		 "Running kdump kernel. Using minimal resources\n");
1350 
1351 	/* minimal queue pairs equals to the number of vports */
1352 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1353 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1354 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1355 }
1356 
1357 static int hclge_configure(struct hclge_dev *hdev)
1358 {
1359 	struct hclge_cfg cfg;
1360 	unsigned int i;
1361 	int ret;
1362 
1363 	ret = hclge_get_cfg(hdev, &cfg);
1364 	if (ret) {
1365 		dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1366 		return ret;
1367 	}
1368 
1369 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1370 	hdev->base_tqp_pid = 0;
1371 	hdev->rss_size_max = cfg.rss_size_max;
1372 	hdev->rx_buf_len = cfg.rx_buf_len;
1373 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1374 	hdev->hw.mac.media_type = cfg.media_type;
1375 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1376 	hdev->num_tx_desc = cfg.tqp_desc_num;
1377 	hdev->num_rx_desc = cfg.tqp_desc_num;
1378 	hdev->tm_info.num_pg = 1;
1379 	hdev->tc_max = cfg.tc_num;
1380 	hdev->tm_info.hw_pfc_map = 0;
1381 	hdev->wanted_umv_size = cfg.umv_space;
1382 
1383 	if (hnae3_dev_fd_supported(hdev)) {
1384 		hdev->fd_en = true;
1385 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1386 	}
1387 
1388 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1389 	if (ret) {
1390 		dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1391 		return ret;
1392 	}
1393 
1394 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1395 
1396 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1397 
1398 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1399 	    (hdev->tc_max < 1)) {
1400 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1401 			 hdev->tc_max);
1402 		hdev->tc_max = 1;
1403 	}
1404 
1405 	/* Dev does not support DCB */
1406 	if (!hnae3_dev_dcb_supported(hdev)) {
1407 		hdev->tc_max = 1;
1408 		hdev->pfc_max = 0;
1409 	} else {
1410 		hdev->pfc_max = hdev->tc_max;
1411 	}
1412 
1413 	hdev->tm_info.num_tc = 1;
1414 
1415 	/* Currently not support uncontiuous tc */
1416 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1417 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1418 
1419 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1420 
1421 	hclge_init_kdump_kernel_config(hdev);
1422 
1423 	/* Set the init affinity based on pci func number */
1424 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1425 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1426 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1427 			&hdev->affinity_mask);
1428 
1429 	return ret;
1430 }
1431 
1432 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1433 			    unsigned int tso_mss_max)
1434 {
1435 	struct hclge_cfg_tso_status_cmd *req;
1436 	struct hclge_desc desc;
1437 	u16 tso_mss;
1438 
1439 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1440 
1441 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1442 
1443 	tso_mss = 0;
1444 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1445 			HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1446 	req->tso_mss_min = cpu_to_le16(tso_mss);
1447 
1448 	tso_mss = 0;
1449 	hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1450 			HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1451 	req->tso_mss_max = cpu_to_le16(tso_mss);
1452 
1453 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1454 }
1455 
1456 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1457 {
1458 	struct hclge_cfg_gro_status_cmd *req;
1459 	struct hclge_desc desc;
1460 	int ret;
1461 
1462 	if (!hnae3_dev_gro_supported(hdev))
1463 		return 0;
1464 
1465 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1466 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1467 
1468 	req->gro_en = cpu_to_le16(en ? 1 : 0);
1469 
1470 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1471 	if (ret)
1472 		dev_err(&hdev->pdev->dev,
1473 			"GRO hardware config cmd failed, ret = %d\n", ret);
1474 
1475 	return ret;
1476 }
1477 
1478 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1479 {
1480 	struct hclge_tqp *tqp;
1481 	int i;
1482 
1483 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1484 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1485 	if (!hdev->htqp)
1486 		return -ENOMEM;
1487 
1488 	tqp = hdev->htqp;
1489 
1490 	for (i = 0; i < hdev->num_tqps; i++) {
1491 		tqp->dev = &hdev->pdev->dev;
1492 		tqp->index = i;
1493 
1494 		tqp->q.ae_algo = &ae_algo;
1495 		tqp->q.buf_size = hdev->rx_buf_len;
1496 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1497 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1498 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1499 			i * HCLGE_TQP_REG_SIZE;
1500 
1501 		tqp++;
1502 	}
1503 
1504 	return 0;
1505 }
1506 
1507 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1508 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1509 {
1510 	struct hclge_tqp_map_cmd *req;
1511 	struct hclge_desc desc;
1512 	int ret;
1513 
1514 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1515 
1516 	req = (struct hclge_tqp_map_cmd *)desc.data;
1517 	req->tqp_id = cpu_to_le16(tqp_pid);
1518 	req->tqp_vf = func_id;
1519 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1520 	if (!is_pf)
1521 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1522 	req->tqp_vid = cpu_to_le16(tqp_vid);
1523 
1524 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1525 	if (ret)
1526 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1527 
1528 	return ret;
1529 }
1530 
1531 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1532 {
1533 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1534 	struct hclge_dev *hdev = vport->back;
1535 	int i, alloced;
1536 
1537 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1538 	     alloced < num_tqps; i++) {
1539 		if (!hdev->htqp[i].alloced) {
1540 			hdev->htqp[i].q.handle = &vport->nic;
1541 			hdev->htqp[i].q.tqp_index = alloced;
1542 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1543 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1544 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1545 			hdev->htqp[i].alloced = true;
1546 			alloced++;
1547 		}
1548 	}
1549 	vport->alloc_tqps = alloced;
1550 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1551 				vport->alloc_tqps / hdev->tm_info.num_tc);
1552 
1553 	/* ensure one to one mapping between irq and queue at default */
1554 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1555 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1556 
1557 	return 0;
1558 }
1559 
1560 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1561 			    u16 num_tx_desc, u16 num_rx_desc)
1562 
1563 {
1564 	struct hnae3_handle *nic = &vport->nic;
1565 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1566 	struct hclge_dev *hdev = vport->back;
1567 	int ret;
1568 
1569 	kinfo->num_tx_desc = num_tx_desc;
1570 	kinfo->num_rx_desc = num_rx_desc;
1571 
1572 	kinfo->rx_buf_len = hdev->rx_buf_len;
1573 
1574 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1575 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1576 	if (!kinfo->tqp)
1577 		return -ENOMEM;
1578 
1579 	ret = hclge_assign_tqp(vport, num_tqps);
1580 	if (ret)
1581 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1582 
1583 	return ret;
1584 }
1585 
1586 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1587 				  struct hclge_vport *vport)
1588 {
1589 	struct hnae3_handle *nic = &vport->nic;
1590 	struct hnae3_knic_private_info *kinfo;
1591 	u16 i;
1592 
1593 	kinfo = &nic->kinfo;
1594 	for (i = 0; i < vport->alloc_tqps; i++) {
1595 		struct hclge_tqp *q =
1596 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1597 		bool is_pf;
1598 		int ret;
1599 
1600 		is_pf = !(vport->vport_id);
1601 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1602 					     i, is_pf);
1603 		if (ret)
1604 			return ret;
1605 	}
1606 
1607 	return 0;
1608 }
1609 
1610 static int hclge_map_tqp(struct hclge_dev *hdev)
1611 {
1612 	struct hclge_vport *vport = hdev->vport;
1613 	u16 i, num_vport;
1614 
1615 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1616 	for (i = 0; i < num_vport; i++)	{
1617 		int ret;
1618 
1619 		ret = hclge_map_tqp_to_vport(hdev, vport);
1620 		if (ret)
1621 			return ret;
1622 
1623 		vport++;
1624 	}
1625 
1626 	return 0;
1627 }
1628 
1629 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1630 {
1631 	struct hnae3_handle *nic = &vport->nic;
1632 	struct hclge_dev *hdev = vport->back;
1633 	int ret;
1634 
1635 	nic->pdev = hdev->pdev;
1636 	nic->ae_algo = &ae_algo;
1637 	nic->numa_node_mask = hdev->numa_node_mask;
1638 
1639 	ret = hclge_knic_setup(vport, num_tqps,
1640 			       hdev->num_tx_desc, hdev->num_rx_desc);
1641 	if (ret)
1642 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1643 
1644 	return ret;
1645 }
1646 
1647 static int hclge_alloc_vport(struct hclge_dev *hdev)
1648 {
1649 	struct pci_dev *pdev = hdev->pdev;
1650 	struct hclge_vport *vport;
1651 	u32 tqp_main_vport;
1652 	u32 tqp_per_vport;
1653 	int num_vport, i;
1654 	int ret;
1655 
1656 	/* We need to alloc a vport for main NIC of PF */
1657 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1658 
1659 	if (hdev->num_tqps < num_vport) {
1660 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1661 			hdev->num_tqps, num_vport);
1662 		return -EINVAL;
1663 	}
1664 
1665 	/* Alloc the same number of TQPs for every vport */
1666 	tqp_per_vport = hdev->num_tqps / num_vport;
1667 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1668 
1669 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1670 			     GFP_KERNEL);
1671 	if (!vport)
1672 		return -ENOMEM;
1673 
1674 	hdev->vport = vport;
1675 	hdev->num_alloc_vport = num_vport;
1676 
1677 	if (IS_ENABLED(CONFIG_PCI_IOV))
1678 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1679 
1680 	for (i = 0; i < num_vport; i++) {
1681 		vport->back = hdev;
1682 		vport->vport_id = i;
1683 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1684 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1685 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1686 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1687 		INIT_LIST_HEAD(&vport->vlan_list);
1688 		INIT_LIST_HEAD(&vport->uc_mac_list);
1689 		INIT_LIST_HEAD(&vport->mc_mac_list);
1690 
1691 		if (i == 0)
1692 			ret = hclge_vport_setup(vport, tqp_main_vport);
1693 		else
1694 			ret = hclge_vport_setup(vport, tqp_per_vport);
1695 		if (ret) {
1696 			dev_err(&pdev->dev,
1697 				"vport setup failed for vport %d, %d\n",
1698 				i, ret);
1699 			return ret;
1700 		}
1701 
1702 		vport++;
1703 	}
1704 
1705 	return 0;
1706 }
1707 
1708 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1709 				    struct hclge_pkt_buf_alloc *buf_alloc)
1710 {
1711 /* TX buffer size is unit by 128 byte */
1712 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1713 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1714 	struct hclge_tx_buff_alloc_cmd *req;
1715 	struct hclge_desc desc;
1716 	int ret;
1717 	u8 i;
1718 
1719 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1720 
1721 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1722 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1723 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1724 
1725 		req->tx_pkt_buff[i] =
1726 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1727 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1728 	}
1729 
1730 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1731 	if (ret)
1732 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1733 			ret);
1734 
1735 	return ret;
1736 }
1737 
1738 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1739 				 struct hclge_pkt_buf_alloc *buf_alloc)
1740 {
1741 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1742 
1743 	if (ret)
1744 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1745 
1746 	return ret;
1747 }
1748 
1749 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1750 {
1751 	unsigned int i;
1752 	u32 cnt = 0;
1753 
1754 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1755 		if (hdev->hw_tc_map & BIT(i))
1756 			cnt++;
1757 	return cnt;
1758 }
1759 
1760 /* Get the number of pfc enabled TCs, which have private buffer */
1761 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1762 				  struct hclge_pkt_buf_alloc *buf_alloc)
1763 {
1764 	struct hclge_priv_buf *priv;
1765 	unsigned int i;
1766 	int cnt = 0;
1767 
1768 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1769 		priv = &buf_alloc->priv_buf[i];
1770 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1771 		    priv->enable)
1772 			cnt++;
1773 	}
1774 
1775 	return cnt;
1776 }
1777 
1778 /* Get the number of pfc disabled TCs, which have private buffer */
1779 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1780 				     struct hclge_pkt_buf_alloc *buf_alloc)
1781 {
1782 	struct hclge_priv_buf *priv;
1783 	unsigned int i;
1784 	int cnt = 0;
1785 
1786 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1787 		priv = &buf_alloc->priv_buf[i];
1788 		if (hdev->hw_tc_map & BIT(i) &&
1789 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1790 		    priv->enable)
1791 			cnt++;
1792 	}
1793 
1794 	return cnt;
1795 }
1796 
1797 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1798 {
1799 	struct hclge_priv_buf *priv;
1800 	u32 rx_priv = 0;
1801 	int i;
1802 
1803 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1804 		priv = &buf_alloc->priv_buf[i];
1805 		if (priv->enable)
1806 			rx_priv += priv->buf_size;
1807 	}
1808 	return rx_priv;
1809 }
1810 
1811 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1812 {
1813 	u32 i, total_tx_size = 0;
1814 
1815 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1816 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1817 
1818 	return total_tx_size;
1819 }
1820 
1821 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1822 				struct hclge_pkt_buf_alloc *buf_alloc,
1823 				u32 rx_all)
1824 {
1825 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1826 	u32 tc_num = hclge_get_tc_num(hdev);
1827 	u32 shared_buf, aligned_mps;
1828 	u32 rx_priv;
1829 	int i;
1830 
1831 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1832 
1833 	if (hnae3_dev_dcb_supported(hdev))
1834 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1835 					hdev->dv_buf_size;
1836 	else
1837 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1838 					+ hdev->dv_buf_size;
1839 
1840 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1841 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1842 			     HCLGE_BUF_SIZE_UNIT);
1843 
1844 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1845 	if (rx_all < rx_priv + shared_std)
1846 		return false;
1847 
1848 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1849 	buf_alloc->s_buf.buf_size = shared_buf;
1850 	if (hnae3_dev_dcb_supported(hdev)) {
1851 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1852 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1853 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1854 				  HCLGE_BUF_SIZE_UNIT);
1855 	} else {
1856 		buf_alloc->s_buf.self.high = aligned_mps +
1857 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1858 		buf_alloc->s_buf.self.low = aligned_mps;
1859 	}
1860 
1861 	if (hnae3_dev_dcb_supported(hdev)) {
1862 		hi_thrd = shared_buf - hdev->dv_buf_size;
1863 
1864 		if (tc_num <= NEED_RESERVE_TC_NUM)
1865 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1866 					/ BUF_MAX_PERCENT;
1867 
1868 		if (tc_num)
1869 			hi_thrd = hi_thrd / tc_num;
1870 
1871 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1872 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1873 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1874 	} else {
1875 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1876 		lo_thrd = aligned_mps;
1877 	}
1878 
1879 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1880 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1881 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1882 	}
1883 
1884 	return true;
1885 }
1886 
1887 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1888 				struct hclge_pkt_buf_alloc *buf_alloc)
1889 {
1890 	u32 i, total_size;
1891 
1892 	total_size = hdev->pkt_buf_size;
1893 
1894 	/* alloc tx buffer for all enabled tc */
1895 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1896 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1897 
1898 		if (hdev->hw_tc_map & BIT(i)) {
1899 			if (total_size < hdev->tx_buf_size)
1900 				return -ENOMEM;
1901 
1902 			priv->tx_buf_size = hdev->tx_buf_size;
1903 		} else {
1904 			priv->tx_buf_size = 0;
1905 		}
1906 
1907 		total_size -= priv->tx_buf_size;
1908 	}
1909 
1910 	return 0;
1911 }
1912 
1913 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1914 				  struct hclge_pkt_buf_alloc *buf_alloc)
1915 {
1916 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1917 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1918 	unsigned int i;
1919 
1920 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1921 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1922 
1923 		priv->enable = 0;
1924 		priv->wl.low = 0;
1925 		priv->wl.high = 0;
1926 		priv->buf_size = 0;
1927 
1928 		if (!(hdev->hw_tc_map & BIT(i)))
1929 			continue;
1930 
1931 		priv->enable = 1;
1932 
1933 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1934 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1935 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
1936 						HCLGE_BUF_SIZE_UNIT);
1937 		} else {
1938 			priv->wl.low = 0;
1939 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1940 					aligned_mps;
1941 		}
1942 
1943 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1944 	}
1945 
1946 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1947 }
1948 
1949 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1950 					  struct hclge_pkt_buf_alloc *buf_alloc)
1951 {
1952 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1953 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1954 	int i;
1955 
1956 	/* let the last to be cleared first */
1957 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1958 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1959 		unsigned int mask = BIT((unsigned int)i);
1960 
1961 		if (hdev->hw_tc_map & mask &&
1962 		    !(hdev->tm_info.hw_pfc_map & mask)) {
1963 			/* Clear the no pfc TC private buffer */
1964 			priv->wl.low = 0;
1965 			priv->wl.high = 0;
1966 			priv->buf_size = 0;
1967 			priv->enable = 0;
1968 			no_pfc_priv_num--;
1969 		}
1970 
1971 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1972 		    no_pfc_priv_num == 0)
1973 			break;
1974 	}
1975 
1976 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1977 }
1978 
1979 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1980 					struct hclge_pkt_buf_alloc *buf_alloc)
1981 {
1982 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1983 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1984 	int i;
1985 
1986 	/* let the last to be cleared first */
1987 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1988 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1989 		unsigned int mask = BIT((unsigned int)i);
1990 
1991 		if (hdev->hw_tc_map & mask &&
1992 		    hdev->tm_info.hw_pfc_map & mask) {
1993 			/* Reduce the number of pfc TC with private buffer */
1994 			priv->wl.low = 0;
1995 			priv->enable = 0;
1996 			priv->wl.high = 0;
1997 			priv->buf_size = 0;
1998 			pfc_priv_num--;
1999 		}
2000 
2001 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2002 		    pfc_priv_num == 0)
2003 			break;
2004 	}
2005 
2006 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2007 }
2008 
2009 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2010 				      struct hclge_pkt_buf_alloc *buf_alloc)
2011 {
2012 #define COMPENSATE_BUFFER	0x3C00
2013 #define COMPENSATE_HALF_MPS_NUM	5
2014 #define PRIV_WL_GAP		0x1800
2015 
2016 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2017 	u32 tc_num = hclge_get_tc_num(hdev);
2018 	u32 half_mps = hdev->mps >> 1;
2019 	u32 min_rx_priv;
2020 	unsigned int i;
2021 
2022 	if (tc_num)
2023 		rx_priv = rx_priv / tc_num;
2024 
2025 	if (tc_num <= NEED_RESERVE_TC_NUM)
2026 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2027 
2028 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2029 			COMPENSATE_HALF_MPS_NUM * half_mps;
2030 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2031 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2032 
2033 	if (rx_priv < min_rx_priv)
2034 		return false;
2035 
2036 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2037 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2038 
2039 		priv->enable = 0;
2040 		priv->wl.low = 0;
2041 		priv->wl.high = 0;
2042 		priv->buf_size = 0;
2043 
2044 		if (!(hdev->hw_tc_map & BIT(i)))
2045 			continue;
2046 
2047 		priv->enable = 1;
2048 		priv->buf_size = rx_priv;
2049 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2050 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2051 	}
2052 
2053 	buf_alloc->s_buf.buf_size = 0;
2054 
2055 	return true;
2056 }
2057 
2058 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2059  * @hdev: pointer to struct hclge_dev
2060  * @buf_alloc: pointer to buffer calculation data
2061  * @return: 0: calculate sucessful, negative: fail
2062  */
2063 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2064 				struct hclge_pkt_buf_alloc *buf_alloc)
2065 {
2066 	/* When DCB is not supported, rx private buffer is not allocated. */
2067 	if (!hnae3_dev_dcb_supported(hdev)) {
2068 		u32 rx_all = hdev->pkt_buf_size;
2069 
2070 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2071 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2072 			return -ENOMEM;
2073 
2074 		return 0;
2075 	}
2076 
2077 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2078 		return 0;
2079 
2080 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2081 		return 0;
2082 
2083 	/* try to decrease the buffer size */
2084 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2085 		return 0;
2086 
2087 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2088 		return 0;
2089 
2090 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2091 		return 0;
2092 
2093 	return -ENOMEM;
2094 }
2095 
2096 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2097 				   struct hclge_pkt_buf_alloc *buf_alloc)
2098 {
2099 	struct hclge_rx_priv_buff_cmd *req;
2100 	struct hclge_desc desc;
2101 	int ret;
2102 	int i;
2103 
2104 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2105 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2106 
2107 	/* Alloc private buffer TCs */
2108 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2109 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2110 
2111 		req->buf_num[i] =
2112 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2113 		req->buf_num[i] |=
2114 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2115 	}
2116 
2117 	req->shared_buf =
2118 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2119 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2120 
2121 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2122 	if (ret)
2123 		dev_err(&hdev->pdev->dev,
2124 			"rx private buffer alloc cmd failed %d\n", ret);
2125 
2126 	return ret;
2127 }
2128 
2129 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2130 				   struct hclge_pkt_buf_alloc *buf_alloc)
2131 {
2132 	struct hclge_rx_priv_wl_buf *req;
2133 	struct hclge_priv_buf *priv;
2134 	struct hclge_desc desc[2];
2135 	int i, j;
2136 	int ret;
2137 
2138 	for (i = 0; i < 2; i++) {
2139 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2140 					   false);
2141 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2142 
2143 		/* The first descriptor set the NEXT bit to 1 */
2144 		if (i == 0)
2145 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2146 		else
2147 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2148 
2149 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2150 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2151 
2152 			priv = &buf_alloc->priv_buf[idx];
2153 			req->tc_wl[j].high =
2154 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2155 			req->tc_wl[j].high |=
2156 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2157 			req->tc_wl[j].low =
2158 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2159 			req->tc_wl[j].low |=
2160 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2161 		}
2162 	}
2163 
2164 	/* Send 2 descriptor at one time */
2165 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2166 	if (ret)
2167 		dev_err(&hdev->pdev->dev,
2168 			"rx private waterline config cmd failed %d\n",
2169 			ret);
2170 	return ret;
2171 }
2172 
2173 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2174 				    struct hclge_pkt_buf_alloc *buf_alloc)
2175 {
2176 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2177 	struct hclge_rx_com_thrd *req;
2178 	struct hclge_desc desc[2];
2179 	struct hclge_tc_thrd *tc;
2180 	int i, j;
2181 	int ret;
2182 
2183 	for (i = 0; i < 2; i++) {
2184 		hclge_cmd_setup_basic_desc(&desc[i],
2185 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2186 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2187 
2188 		/* The first descriptor set the NEXT bit to 1 */
2189 		if (i == 0)
2190 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2191 		else
2192 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2193 
2194 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2195 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2196 
2197 			req->com_thrd[j].high =
2198 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2199 			req->com_thrd[j].high |=
2200 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2201 			req->com_thrd[j].low =
2202 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2203 			req->com_thrd[j].low |=
2204 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2205 		}
2206 	}
2207 
2208 	/* Send 2 descriptors at one time */
2209 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2210 	if (ret)
2211 		dev_err(&hdev->pdev->dev,
2212 			"common threshold config cmd failed %d\n", ret);
2213 	return ret;
2214 }
2215 
2216 static int hclge_common_wl_config(struct hclge_dev *hdev,
2217 				  struct hclge_pkt_buf_alloc *buf_alloc)
2218 {
2219 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2220 	struct hclge_rx_com_wl *req;
2221 	struct hclge_desc desc;
2222 	int ret;
2223 
2224 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2225 
2226 	req = (struct hclge_rx_com_wl *)desc.data;
2227 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2228 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2229 
2230 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2231 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2232 
2233 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2234 	if (ret)
2235 		dev_err(&hdev->pdev->dev,
2236 			"common waterline config cmd failed %d\n", ret);
2237 
2238 	return ret;
2239 }
2240 
2241 int hclge_buffer_alloc(struct hclge_dev *hdev)
2242 {
2243 	struct hclge_pkt_buf_alloc *pkt_buf;
2244 	int ret;
2245 
2246 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2247 	if (!pkt_buf)
2248 		return -ENOMEM;
2249 
2250 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2251 	if (ret) {
2252 		dev_err(&hdev->pdev->dev,
2253 			"could not calc tx buffer size for all TCs %d\n", ret);
2254 		goto out;
2255 	}
2256 
2257 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2258 	if (ret) {
2259 		dev_err(&hdev->pdev->dev,
2260 			"could not alloc tx buffers %d\n", ret);
2261 		goto out;
2262 	}
2263 
2264 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2265 	if (ret) {
2266 		dev_err(&hdev->pdev->dev,
2267 			"could not calc rx priv buffer size for all TCs %d\n",
2268 			ret);
2269 		goto out;
2270 	}
2271 
2272 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2273 	if (ret) {
2274 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2275 			ret);
2276 		goto out;
2277 	}
2278 
2279 	if (hnae3_dev_dcb_supported(hdev)) {
2280 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2281 		if (ret) {
2282 			dev_err(&hdev->pdev->dev,
2283 				"could not configure rx private waterline %d\n",
2284 				ret);
2285 			goto out;
2286 		}
2287 
2288 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2289 		if (ret) {
2290 			dev_err(&hdev->pdev->dev,
2291 				"could not configure common threshold %d\n",
2292 				ret);
2293 			goto out;
2294 		}
2295 	}
2296 
2297 	ret = hclge_common_wl_config(hdev, pkt_buf);
2298 	if (ret)
2299 		dev_err(&hdev->pdev->dev,
2300 			"could not configure common waterline %d\n", ret);
2301 
2302 out:
2303 	kfree(pkt_buf);
2304 	return ret;
2305 }
2306 
2307 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2308 {
2309 	struct hnae3_handle *roce = &vport->roce;
2310 	struct hnae3_handle *nic = &vport->nic;
2311 
2312 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2313 
2314 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2315 	    vport->back->num_msi_left == 0)
2316 		return -EINVAL;
2317 
2318 	roce->rinfo.base_vector = vport->back->roce_base_vector;
2319 
2320 	roce->rinfo.netdev = nic->kinfo.netdev;
2321 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
2322 
2323 	roce->pdev = nic->pdev;
2324 	roce->ae_algo = nic->ae_algo;
2325 	roce->numa_node_mask = nic->numa_node_mask;
2326 
2327 	return 0;
2328 }
2329 
2330 static int hclge_init_msi(struct hclge_dev *hdev)
2331 {
2332 	struct pci_dev *pdev = hdev->pdev;
2333 	int vectors;
2334 	int i;
2335 
2336 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2337 					hdev->num_msi,
2338 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2339 	if (vectors < 0) {
2340 		dev_err(&pdev->dev,
2341 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2342 			vectors);
2343 		return vectors;
2344 	}
2345 	if (vectors < hdev->num_msi)
2346 		dev_warn(&hdev->pdev->dev,
2347 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2348 			 hdev->num_msi, vectors);
2349 
2350 	hdev->num_msi = vectors;
2351 	hdev->num_msi_left = vectors;
2352 
2353 	hdev->base_msi_vector = pdev->irq;
2354 	hdev->roce_base_vector = hdev->base_msi_vector +
2355 				hdev->roce_base_msix_offset;
2356 
2357 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2358 					   sizeof(u16), GFP_KERNEL);
2359 	if (!hdev->vector_status) {
2360 		pci_free_irq_vectors(pdev);
2361 		return -ENOMEM;
2362 	}
2363 
2364 	for (i = 0; i < hdev->num_msi; i++)
2365 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2366 
2367 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2368 					sizeof(int), GFP_KERNEL);
2369 	if (!hdev->vector_irq) {
2370 		pci_free_irq_vectors(pdev);
2371 		return -ENOMEM;
2372 	}
2373 
2374 	return 0;
2375 }
2376 
2377 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2378 {
2379 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2380 		duplex = HCLGE_MAC_FULL;
2381 
2382 	return duplex;
2383 }
2384 
2385 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2386 				      u8 duplex)
2387 {
2388 	struct hclge_config_mac_speed_dup_cmd *req;
2389 	struct hclge_desc desc;
2390 	int ret;
2391 
2392 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2393 
2394 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2395 
2396 	if (duplex)
2397 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2398 
2399 	switch (speed) {
2400 	case HCLGE_MAC_SPEED_10M:
2401 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2402 				HCLGE_CFG_SPEED_S, 6);
2403 		break;
2404 	case HCLGE_MAC_SPEED_100M:
2405 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2406 				HCLGE_CFG_SPEED_S, 7);
2407 		break;
2408 	case HCLGE_MAC_SPEED_1G:
2409 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2410 				HCLGE_CFG_SPEED_S, 0);
2411 		break;
2412 	case HCLGE_MAC_SPEED_10G:
2413 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2414 				HCLGE_CFG_SPEED_S, 1);
2415 		break;
2416 	case HCLGE_MAC_SPEED_25G:
2417 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2418 				HCLGE_CFG_SPEED_S, 2);
2419 		break;
2420 	case HCLGE_MAC_SPEED_40G:
2421 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2422 				HCLGE_CFG_SPEED_S, 3);
2423 		break;
2424 	case HCLGE_MAC_SPEED_50G:
2425 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2426 				HCLGE_CFG_SPEED_S, 4);
2427 		break;
2428 	case HCLGE_MAC_SPEED_100G:
2429 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2430 				HCLGE_CFG_SPEED_S, 5);
2431 		break;
2432 	default:
2433 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2434 		return -EINVAL;
2435 	}
2436 
2437 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2438 		      1);
2439 
2440 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2441 	if (ret) {
2442 		dev_err(&hdev->pdev->dev,
2443 			"mac speed/duplex config cmd failed %d.\n", ret);
2444 		return ret;
2445 	}
2446 
2447 	return 0;
2448 }
2449 
2450 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2451 {
2452 	struct hclge_mac *mac = &hdev->hw.mac;
2453 	int ret;
2454 
2455 	duplex = hclge_check_speed_dup(duplex, speed);
2456 	if (!mac->support_autoneg && mac->speed == speed &&
2457 	    mac->duplex == duplex)
2458 		return 0;
2459 
2460 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2461 	if (ret)
2462 		return ret;
2463 
2464 	hdev->hw.mac.speed = speed;
2465 	hdev->hw.mac.duplex = duplex;
2466 
2467 	return 0;
2468 }
2469 
2470 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2471 				     u8 duplex)
2472 {
2473 	struct hclge_vport *vport = hclge_get_vport(handle);
2474 	struct hclge_dev *hdev = vport->back;
2475 
2476 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2477 }
2478 
2479 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2480 {
2481 	struct hclge_config_auto_neg_cmd *req;
2482 	struct hclge_desc desc;
2483 	u32 flag = 0;
2484 	int ret;
2485 
2486 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2487 
2488 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2489 	if (enable)
2490 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2491 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2492 
2493 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2494 	if (ret)
2495 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2496 			ret);
2497 
2498 	return ret;
2499 }
2500 
2501 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2502 {
2503 	struct hclge_vport *vport = hclge_get_vport(handle);
2504 	struct hclge_dev *hdev = vport->back;
2505 
2506 	if (!hdev->hw.mac.support_autoneg) {
2507 		if (enable) {
2508 			dev_err(&hdev->pdev->dev,
2509 				"autoneg is not supported by current port\n");
2510 			return -EOPNOTSUPP;
2511 		} else {
2512 			return 0;
2513 		}
2514 	}
2515 
2516 	return hclge_set_autoneg_en(hdev, enable);
2517 }
2518 
2519 static int hclge_get_autoneg(struct hnae3_handle *handle)
2520 {
2521 	struct hclge_vport *vport = hclge_get_vport(handle);
2522 	struct hclge_dev *hdev = vport->back;
2523 	struct phy_device *phydev = hdev->hw.mac.phydev;
2524 
2525 	if (phydev)
2526 		return phydev->autoneg;
2527 
2528 	return hdev->hw.mac.autoneg;
2529 }
2530 
2531 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2532 {
2533 	struct hclge_vport *vport = hclge_get_vport(handle);
2534 	struct hclge_dev *hdev = vport->back;
2535 	int ret;
2536 
2537 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2538 
2539 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2540 	if (ret)
2541 		return ret;
2542 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2543 }
2544 
2545 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2546 {
2547 	struct hclge_vport *vport = hclge_get_vport(handle);
2548 	struct hclge_dev *hdev = vport->back;
2549 
2550 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2551 		return hclge_set_autoneg_en(hdev, !halt);
2552 
2553 	return 0;
2554 }
2555 
2556 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2557 {
2558 	struct hclge_config_fec_cmd *req;
2559 	struct hclge_desc desc;
2560 	int ret;
2561 
2562 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2563 
2564 	req = (struct hclge_config_fec_cmd *)desc.data;
2565 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2566 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2567 	if (fec_mode & BIT(HNAE3_FEC_RS))
2568 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2569 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2570 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2571 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2572 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2573 
2574 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2575 	if (ret)
2576 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2577 
2578 	return ret;
2579 }
2580 
2581 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2582 {
2583 	struct hclge_vport *vport = hclge_get_vport(handle);
2584 	struct hclge_dev *hdev = vport->back;
2585 	struct hclge_mac *mac = &hdev->hw.mac;
2586 	int ret;
2587 
2588 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2589 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2590 		return -EINVAL;
2591 	}
2592 
2593 	ret = hclge_set_fec_hw(hdev, fec_mode);
2594 	if (ret)
2595 		return ret;
2596 
2597 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2598 	return 0;
2599 }
2600 
2601 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2602 			  u8 *fec_mode)
2603 {
2604 	struct hclge_vport *vport = hclge_get_vport(handle);
2605 	struct hclge_dev *hdev = vport->back;
2606 	struct hclge_mac *mac = &hdev->hw.mac;
2607 
2608 	if (fec_ability)
2609 		*fec_ability = mac->fec_ability;
2610 	if (fec_mode)
2611 		*fec_mode = mac->fec_mode;
2612 }
2613 
2614 static int hclge_mac_init(struct hclge_dev *hdev)
2615 {
2616 	struct hclge_mac *mac = &hdev->hw.mac;
2617 	int ret;
2618 
2619 	hdev->support_sfp_query = true;
2620 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2621 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2622 					 hdev->hw.mac.duplex);
2623 	if (ret)
2624 		return ret;
2625 
2626 	if (hdev->hw.mac.support_autoneg) {
2627 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2628 		if (ret)
2629 			return ret;
2630 	}
2631 
2632 	mac->link = 0;
2633 
2634 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2635 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2636 		if (ret)
2637 			return ret;
2638 	}
2639 
2640 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2641 	if (ret) {
2642 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2643 		return ret;
2644 	}
2645 
2646 	ret = hclge_set_default_loopback(hdev);
2647 	if (ret)
2648 		return ret;
2649 
2650 	ret = hclge_buffer_alloc(hdev);
2651 	if (ret)
2652 		dev_err(&hdev->pdev->dev,
2653 			"allocate buffer fail, ret=%d\n", ret);
2654 
2655 	return ret;
2656 }
2657 
2658 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2659 {
2660 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2661 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2662 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2663 				    hclge_wq, &hdev->service_task, 0);
2664 }
2665 
2666 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2667 {
2668 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2669 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2670 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2671 				    hclge_wq, &hdev->service_task, 0);
2672 }
2673 
2674 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2675 {
2676 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2677 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2678 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2679 				    hclge_wq, &hdev->service_task,
2680 				    delay_time);
2681 }
2682 
2683 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2684 {
2685 	struct hclge_link_status_cmd *req;
2686 	struct hclge_desc desc;
2687 	int link_status;
2688 	int ret;
2689 
2690 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2691 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2692 	if (ret) {
2693 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2694 			ret);
2695 		return ret;
2696 	}
2697 
2698 	req = (struct hclge_link_status_cmd *)desc.data;
2699 	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2700 
2701 	return !!link_status;
2702 }
2703 
2704 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2705 {
2706 	unsigned int mac_state;
2707 	int link_stat;
2708 
2709 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2710 		return 0;
2711 
2712 	mac_state = hclge_get_mac_link_status(hdev);
2713 
2714 	if (hdev->hw.mac.phydev) {
2715 		if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2716 			link_stat = mac_state &
2717 				hdev->hw.mac.phydev->link;
2718 		else
2719 			link_stat = 0;
2720 
2721 	} else {
2722 		link_stat = mac_state;
2723 	}
2724 
2725 	return !!link_stat;
2726 }
2727 
2728 static void hclge_update_link_status(struct hclge_dev *hdev)
2729 {
2730 	struct hnae3_client *rclient = hdev->roce_client;
2731 	struct hnae3_client *client = hdev->nic_client;
2732 	struct hnae3_handle *rhandle;
2733 	struct hnae3_handle *handle;
2734 	int state;
2735 	int i;
2736 
2737 	if (!client)
2738 		return;
2739 
2740 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2741 		return;
2742 
2743 	state = hclge_get_mac_phy_link(hdev);
2744 	if (state != hdev->hw.mac.link) {
2745 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2746 			handle = &hdev->vport[i].nic;
2747 			client->ops->link_status_change(handle, state);
2748 			hclge_config_mac_tnl_int(hdev, state);
2749 			rhandle = &hdev->vport[i].roce;
2750 			if (rclient && rclient->ops->link_status_change)
2751 				rclient->ops->link_status_change(rhandle,
2752 								 state);
2753 		}
2754 		hdev->hw.mac.link = state;
2755 	}
2756 
2757 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2758 }
2759 
2760 static void hclge_update_port_capability(struct hclge_mac *mac)
2761 {
2762 	/* update fec ability by speed */
2763 	hclge_convert_setting_fec(mac);
2764 
2765 	/* firmware can not identify back plane type, the media type
2766 	 * read from configuration can help deal it
2767 	 */
2768 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2769 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2770 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2771 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2772 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2773 
2774 	if (mac->support_autoneg) {
2775 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2776 		linkmode_copy(mac->advertising, mac->supported);
2777 	} else {
2778 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2779 				   mac->supported);
2780 		linkmode_zero(mac->advertising);
2781 	}
2782 }
2783 
2784 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2785 {
2786 	struct hclge_sfp_info_cmd *resp;
2787 	struct hclge_desc desc;
2788 	int ret;
2789 
2790 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2791 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2792 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2793 	if (ret == -EOPNOTSUPP) {
2794 		dev_warn(&hdev->pdev->dev,
2795 			 "IMP do not support get SFP speed %d\n", ret);
2796 		return ret;
2797 	} else if (ret) {
2798 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2799 		return ret;
2800 	}
2801 
2802 	*speed = le32_to_cpu(resp->speed);
2803 
2804 	return 0;
2805 }
2806 
2807 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2808 {
2809 	struct hclge_sfp_info_cmd *resp;
2810 	struct hclge_desc desc;
2811 	int ret;
2812 
2813 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2814 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2815 
2816 	resp->query_type = QUERY_ACTIVE_SPEED;
2817 
2818 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2819 	if (ret == -EOPNOTSUPP) {
2820 		dev_warn(&hdev->pdev->dev,
2821 			 "IMP does not support get SFP info %d\n", ret);
2822 		return ret;
2823 	} else if (ret) {
2824 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2825 		return ret;
2826 	}
2827 
2828 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
2829 	 * set to mac->speed.
2830 	 */
2831 	if (!le32_to_cpu(resp->speed))
2832 		return 0;
2833 
2834 	mac->speed = le32_to_cpu(resp->speed);
2835 	/* if resp->speed_ability is 0, it means it's an old version
2836 	 * firmware, do not update these params
2837 	 */
2838 	if (resp->speed_ability) {
2839 		mac->module_type = le32_to_cpu(resp->module_type);
2840 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
2841 		mac->autoneg = resp->autoneg;
2842 		mac->support_autoneg = resp->autoneg_ability;
2843 		mac->speed_type = QUERY_ACTIVE_SPEED;
2844 		if (!resp->active_fec)
2845 			mac->fec_mode = 0;
2846 		else
2847 			mac->fec_mode = BIT(resp->active_fec);
2848 	} else {
2849 		mac->speed_type = QUERY_SFP_SPEED;
2850 	}
2851 
2852 	return 0;
2853 }
2854 
2855 static int hclge_update_port_info(struct hclge_dev *hdev)
2856 {
2857 	struct hclge_mac *mac = &hdev->hw.mac;
2858 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
2859 	int ret;
2860 
2861 	/* get the port info from SFP cmd if not copper port */
2862 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2863 		return 0;
2864 
2865 	/* if IMP does not support get SFP/qSFP info, return directly */
2866 	if (!hdev->support_sfp_query)
2867 		return 0;
2868 
2869 	if (hdev->pdev->revision >= 0x21)
2870 		ret = hclge_get_sfp_info(hdev, mac);
2871 	else
2872 		ret = hclge_get_sfp_speed(hdev, &speed);
2873 
2874 	if (ret == -EOPNOTSUPP) {
2875 		hdev->support_sfp_query = false;
2876 		return ret;
2877 	} else if (ret) {
2878 		return ret;
2879 	}
2880 
2881 	if (hdev->pdev->revision >= 0x21) {
2882 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2883 			hclge_update_port_capability(mac);
2884 			return 0;
2885 		}
2886 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2887 					       HCLGE_MAC_FULL);
2888 	} else {
2889 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2890 			return 0; /* do nothing if no SFP */
2891 
2892 		/* must config full duplex for SFP */
2893 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2894 	}
2895 }
2896 
2897 static int hclge_get_status(struct hnae3_handle *handle)
2898 {
2899 	struct hclge_vport *vport = hclge_get_vport(handle);
2900 	struct hclge_dev *hdev = vport->back;
2901 
2902 	hclge_update_link_status(hdev);
2903 
2904 	return hdev->hw.mac.link;
2905 }
2906 
2907 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2908 {
2909 	if (!pci_num_vf(hdev->pdev)) {
2910 		dev_err(&hdev->pdev->dev,
2911 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
2912 		return NULL;
2913 	}
2914 
2915 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2916 		dev_err(&hdev->pdev->dev,
2917 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
2918 			vf, pci_num_vf(hdev->pdev));
2919 		return NULL;
2920 	}
2921 
2922 	/* VF start from 1 in vport */
2923 	vf += HCLGE_VF_VPORT_START_NUM;
2924 	return &hdev->vport[vf];
2925 }
2926 
2927 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2928 			       struct ifla_vf_info *ivf)
2929 {
2930 	struct hclge_vport *vport = hclge_get_vport(handle);
2931 	struct hclge_dev *hdev = vport->back;
2932 
2933 	vport = hclge_get_vf_vport(hdev, vf);
2934 	if (!vport)
2935 		return -EINVAL;
2936 
2937 	ivf->vf = vf;
2938 	ivf->linkstate = vport->vf_info.link_state;
2939 	ivf->spoofchk = vport->vf_info.spoofchk;
2940 	ivf->trusted = vport->vf_info.trusted;
2941 	ivf->min_tx_rate = 0;
2942 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2943 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2944 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2945 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2946 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
2947 
2948 	return 0;
2949 }
2950 
2951 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2952 				   int link_state)
2953 {
2954 	struct hclge_vport *vport = hclge_get_vport(handle);
2955 	struct hclge_dev *hdev = vport->back;
2956 
2957 	vport = hclge_get_vf_vport(hdev, vf);
2958 	if (!vport)
2959 		return -EINVAL;
2960 
2961 	vport->vf_info.link_state = link_state;
2962 
2963 	return 0;
2964 }
2965 
2966 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2967 {
2968 	u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2969 
2970 	/* fetch the events from their corresponding regs */
2971 	rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2972 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2973 	msix_src_reg = hclge_read_dev(&hdev->hw,
2974 				      HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2975 
2976 	/* Assumption: If by any chance reset and mailbox events are reported
2977 	 * together then we will only process reset event in this go and will
2978 	 * defer the processing of the mailbox events. Since, we would have not
2979 	 * cleared RX CMDQ event this time we would receive again another
2980 	 * interrupt from H/W just for the mailbox.
2981 	 *
2982 	 * check for vector0 reset event sources
2983 	 */
2984 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2985 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2986 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2987 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2988 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2989 		hdev->rst_stats.imp_rst_cnt++;
2990 		return HCLGE_VECTOR0_EVENT_RST;
2991 	}
2992 
2993 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2994 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2995 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2996 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2997 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2998 		hdev->rst_stats.global_rst_cnt++;
2999 		return HCLGE_VECTOR0_EVENT_RST;
3000 	}
3001 
3002 	/* check for vector0 msix event source */
3003 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3004 		*clearval = msix_src_reg;
3005 		return HCLGE_VECTOR0_EVENT_ERR;
3006 	}
3007 
3008 	/* check for vector0 mailbox(=CMDQ RX) event source */
3009 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3010 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3011 		*clearval = cmdq_src_reg;
3012 		return HCLGE_VECTOR0_EVENT_MBX;
3013 	}
3014 
3015 	/* print other vector0 event source */
3016 	dev_info(&hdev->pdev->dev,
3017 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3018 		 cmdq_src_reg, msix_src_reg);
3019 	*clearval = msix_src_reg;
3020 
3021 	return HCLGE_VECTOR0_EVENT_OTHER;
3022 }
3023 
3024 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3025 				    u32 regclr)
3026 {
3027 	switch (event_type) {
3028 	case HCLGE_VECTOR0_EVENT_RST:
3029 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3030 		break;
3031 	case HCLGE_VECTOR0_EVENT_MBX:
3032 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3033 		break;
3034 	default:
3035 		break;
3036 	}
3037 }
3038 
3039 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3040 {
3041 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3042 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3043 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3044 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3045 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3046 }
3047 
3048 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3049 {
3050 	writel(enable ? 1 : 0, vector->addr);
3051 }
3052 
3053 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3054 {
3055 	struct hclge_dev *hdev = data;
3056 	u32 clearval = 0;
3057 	u32 event_cause;
3058 
3059 	hclge_enable_vector(&hdev->misc_vector, false);
3060 	event_cause = hclge_check_event_cause(hdev, &clearval);
3061 
3062 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
3063 	switch (event_cause) {
3064 	case HCLGE_VECTOR0_EVENT_ERR:
3065 		/* we do not know what type of reset is required now. This could
3066 		 * only be decided after we fetch the type of errors which
3067 		 * caused this event. Therefore, we will do below for now:
3068 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3069 		 *    have defered type of reset to be used.
3070 		 * 2. Schedule the reset serivce task.
3071 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3072 		 *    will fetch the correct type of reset.  This would be done
3073 		 *    by first decoding the types of errors.
3074 		 */
3075 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3076 		/* fall through */
3077 	case HCLGE_VECTOR0_EVENT_RST:
3078 		hclge_reset_task_schedule(hdev);
3079 		break;
3080 	case HCLGE_VECTOR0_EVENT_MBX:
3081 		/* If we are here then,
3082 		 * 1. Either we are not handling any mbx task and we are not
3083 		 *    scheduled as well
3084 		 *                        OR
3085 		 * 2. We could be handling a mbx task but nothing more is
3086 		 *    scheduled.
3087 		 * In both cases, we should schedule mbx task as there are more
3088 		 * mbx messages reported by this interrupt.
3089 		 */
3090 		hclge_mbx_task_schedule(hdev);
3091 		break;
3092 	default:
3093 		dev_warn(&hdev->pdev->dev,
3094 			 "received unknown or unhandled event of vector0\n");
3095 		break;
3096 	}
3097 
3098 	hclge_clear_event_cause(hdev, event_cause, clearval);
3099 
3100 	/* Enable interrupt if it is not cause by reset. And when
3101 	 * clearval equal to 0, it means interrupt status may be
3102 	 * cleared by hardware before driver reads status register.
3103 	 * For this case, vector0 interrupt also should be enabled.
3104 	 */
3105 	if (!clearval ||
3106 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3107 		hclge_enable_vector(&hdev->misc_vector, true);
3108 	}
3109 
3110 	return IRQ_HANDLED;
3111 }
3112 
3113 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3114 {
3115 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3116 		dev_warn(&hdev->pdev->dev,
3117 			 "vector(vector_id %d) has been freed.\n", vector_id);
3118 		return;
3119 	}
3120 
3121 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3122 	hdev->num_msi_left += 1;
3123 	hdev->num_msi_used -= 1;
3124 }
3125 
3126 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3127 {
3128 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3129 
3130 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3131 
3132 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3133 	hdev->vector_status[0] = 0;
3134 
3135 	hdev->num_msi_left -= 1;
3136 	hdev->num_msi_used += 1;
3137 }
3138 
3139 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3140 				      const cpumask_t *mask)
3141 {
3142 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3143 					      affinity_notify);
3144 
3145 	cpumask_copy(&hdev->affinity_mask, mask);
3146 }
3147 
3148 static void hclge_irq_affinity_release(struct kref *ref)
3149 {
3150 }
3151 
3152 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3153 {
3154 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3155 			      &hdev->affinity_mask);
3156 
3157 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3158 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3159 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3160 				  &hdev->affinity_notify);
3161 }
3162 
3163 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3164 {
3165 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3166 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3167 }
3168 
3169 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3170 {
3171 	int ret;
3172 
3173 	hclge_get_misc_vector(hdev);
3174 
3175 	/* this would be explicitly freed in the end */
3176 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3177 		 HCLGE_NAME, pci_name(hdev->pdev));
3178 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3179 			  0, hdev->misc_vector.name, hdev);
3180 	if (ret) {
3181 		hclge_free_vector(hdev, 0);
3182 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3183 			hdev->misc_vector.vector_irq);
3184 	}
3185 
3186 	return ret;
3187 }
3188 
3189 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3190 {
3191 	free_irq(hdev->misc_vector.vector_irq, hdev);
3192 	hclge_free_vector(hdev, 0);
3193 }
3194 
3195 int hclge_notify_client(struct hclge_dev *hdev,
3196 			enum hnae3_reset_notify_type type)
3197 {
3198 	struct hnae3_client *client = hdev->nic_client;
3199 	u16 i;
3200 
3201 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3202 		return 0;
3203 
3204 	if (!client->ops->reset_notify)
3205 		return -EOPNOTSUPP;
3206 
3207 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3208 		struct hnae3_handle *handle = &hdev->vport[i].nic;
3209 		int ret;
3210 
3211 		ret = client->ops->reset_notify(handle, type);
3212 		if (ret) {
3213 			dev_err(&hdev->pdev->dev,
3214 				"notify nic client failed %d(%d)\n", type, ret);
3215 			return ret;
3216 		}
3217 	}
3218 
3219 	return 0;
3220 }
3221 
3222 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3223 				    enum hnae3_reset_notify_type type)
3224 {
3225 	struct hnae3_client *client = hdev->roce_client;
3226 	int ret = 0;
3227 	u16 i;
3228 
3229 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3230 		return 0;
3231 
3232 	if (!client->ops->reset_notify)
3233 		return -EOPNOTSUPP;
3234 
3235 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3236 		struct hnae3_handle *handle = &hdev->vport[i].roce;
3237 
3238 		ret = client->ops->reset_notify(handle, type);
3239 		if (ret) {
3240 			dev_err(&hdev->pdev->dev,
3241 				"notify roce client failed %d(%d)",
3242 				type, ret);
3243 			return ret;
3244 		}
3245 	}
3246 
3247 	return ret;
3248 }
3249 
3250 static int hclge_reset_wait(struct hclge_dev *hdev)
3251 {
3252 #define HCLGE_RESET_WATI_MS	100
3253 #define HCLGE_RESET_WAIT_CNT	350
3254 
3255 	u32 val, reg, reg_bit;
3256 	u32 cnt = 0;
3257 
3258 	switch (hdev->reset_type) {
3259 	case HNAE3_IMP_RESET:
3260 		reg = HCLGE_GLOBAL_RESET_REG;
3261 		reg_bit = HCLGE_IMP_RESET_BIT;
3262 		break;
3263 	case HNAE3_GLOBAL_RESET:
3264 		reg = HCLGE_GLOBAL_RESET_REG;
3265 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3266 		break;
3267 	case HNAE3_FUNC_RESET:
3268 		reg = HCLGE_FUN_RST_ING;
3269 		reg_bit = HCLGE_FUN_RST_ING_B;
3270 		break;
3271 	default:
3272 		dev_err(&hdev->pdev->dev,
3273 			"Wait for unsupported reset type: %d\n",
3274 			hdev->reset_type);
3275 		return -EINVAL;
3276 	}
3277 
3278 	val = hclge_read_dev(&hdev->hw, reg);
3279 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3280 		msleep(HCLGE_RESET_WATI_MS);
3281 		val = hclge_read_dev(&hdev->hw, reg);
3282 		cnt++;
3283 	}
3284 
3285 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3286 		dev_warn(&hdev->pdev->dev,
3287 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3288 		return -EBUSY;
3289 	}
3290 
3291 	return 0;
3292 }
3293 
3294 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3295 {
3296 	struct hclge_vf_rst_cmd *req;
3297 	struct hclge_desc desc;
3298 
3299 	req = (struct hclge_vf_rst_cmd *)desc.data;
3300 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3301 	req->dest_vfid = func_id;
3302 
3303 	if (reset)
3304 		req->vf_rst = 0x1;
3305 
3306 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3307 }
3308 
3309 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3310 {
3311 	int i;
3312 
3313 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3314 		struct hclge_vport *vport = &hdev->vport[i];
3315 		int ret;
3316 
3317 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3318 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3319 		if (ret) {
3320 			dev_err(&hdev->pdev->dev,
3321 				"set vf(%u) rst failed %d!\n",
3322 				vport->vport_id, ret);
3323 			return ret;
3324 		}
3325 
3326 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3327 			continue;
3328 
3329 		/* Inform VF to process the reset.
3330 		 * hclge_inform_reset_assert_to_vf may fail if VF
3331 		 * driver is not loaded.
3332 		 */
3333 		ret = hclge_inform_reset_assert_to_vf(vport);
3334 		if (ret)
3335 			dev_warn(&hdev->pdev->dev,
3336 				 "inform reset to vf(%u) failed %d!\n",
3337 				 vport->vport_id, ret);
3338 	}
3339 
3340 	return 0;
3341 }
3342 
3343 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3344 {
3345 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3346 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3347 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3348 		return;
3349 
3350 	hclge_mbx_handler(hdev);
3351 
3352 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3353 }
3354 
3355 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3356 {
3357 	struct hclge_pf_rst_sync_cmd *req;
3358 	struct hclge_desc desc;
3359 	int cnt = 0;
3360 	int ret;
3361 
3362 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3363 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3364 
3365 	do {
3366 		/* vf need to down netdev by mbx during PF or FLR reset */
3367 		hclge_mailbox_service_task(hdev);
3368 
3369 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3370 		/* for compatible with old firmware, wait
3371 		 * 100 ms for VF to stop IO
3372 		 */
3373 		if (ret == -EOPNOTSUPP) {
3374 			msleep(HCLGE_RESET_SYNC_TIME);
3375 			return;
3376 		} else if (ret) {
3377 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3378 				 ret);
3379 			return;
3380 		} else if (req->all_vf_ready) {
3381 			return;
3382 		}
3383 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3384 		hclge_cmd_reuse_desc(&desc, true);
3385 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3386 
3387 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3388 }
3389 
3390 void hclge_report_hw_error(struct hclge_dev *hdev,
3391 			   enum hnae3_hw_error_type type)
3392 {
3393 	struct hnae3_client *client = hdev->nic_client;
3394 	u16 i;
3395 
3396 	if (!client || !client->ops->process_hw_error ||
3397 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3398 		return;
3399 
3400 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3401 		client->ops->process_hw_error(&hdev->vport[i].nic, type);
3402 }
3403 
3404 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3405 {
3406 	u32 reg_val;
3407 
3408 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3409 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3410 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3411 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3412 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3413 	}
3414 
3415 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3416 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3417 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3418 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3419 	}
3420 }
3421 
3422 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3423 {
3424 	struct hclge_desc desc;
3425 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3426 	int ret;
3427 
3428 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3429 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3430 	req->fun_reset_vfid = func_id;
3431 
3432 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3433 	if (ret)
3434 		dev_err(&hdev->pdev->dev,
3435 			"send function reset cmd fail, status =%d\n", ret);
3436 
3437 	return ret;
3438 }
3439 
3440 static void hclge_do_reset(struct hclge_dev *hdev)
3441 {
3442 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3443 	struct pci_dev *pdev = hdev->pdev;
3444 	u32 val;
3445 
3446 	if (hclge_get_hw_reset_stat(handle)) {
3447 		dev_info(&pdev->dev, "hardware reset not finish\n");
3448 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3449 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3450 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3451 		return;
3452 	}
3453 
3454 	switch (hdev->reset_type) {
3455 	case HNAE3_GLOBAL_RESET:
3456 		dev_info(&pdev->dev, "global reset requested\n");
3457 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3458 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3459 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3460 		break;
3461 	case HNAE3_FUNC_RESET:
3462 		dev_info(&pdev->dev, "PF reset requested\n");
3463 		/* schedule again to check later */
3464 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3465 		hclge_reset_task_schedule(hdev);
3466 		break;
3467 	default:
3468 		dev_warn(&pdev->dev,
3469 			 "unsupported reset type: %d\n", hdev->reset_type);
3470 		break;
3471 	}
3472 }
3473 
3474 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3475 						   unsigned long *addr)
3476 {
3477 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3478 	struct hclge_dev *hdev = ae_dev->priv;
3479 
3480 	/* first, resolve any unknown reset type to the known type(s) */
3481 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3482 		u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3483 					HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
3484 		/* we will intentionally ignore any errors from this function
3485 		 *  as we will end up in *some* reset request in any case
3486 		 */
3487 		if (hclge_handle_hw_msix_error(hdev, addr))
3488 			dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3489 				 msix_sts_reg);
3490 
3491 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3492 		/* We defered the clearing of the error event which caused
3493 		 * interrupt since it was not posssible to do that in
3494 		 * interrupt context (and this is the reason we introduced
3495 		 * new UNKNOWN reset type). Now, the errors have been
3496 		 * handled and cleared in hardware we can safely enable
3497 		 * interrupts. This is an exception to the norm.
3498 		 */
3499 		hclge_enable_vector(&hdev->misc_vector, true);
3500 	}
3501 
3502 	/* return the highest priority reset level amongst all */
3503 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3504 		rst_level = HNAE3_IMP_RESET;
3505 		clear_bit(HNAE3_IMP_RESET, addr);
3506 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3507 		clear_bit(HNAE3_FUNC_RESET, addr);
3508 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3509 		rst_level = HNAE3_GLOBAL_RESET;
3510 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3511 		clear_bit(HNAE3_FUNC_RESET, addr);
3512 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3513 		rst_level = HNAE3_FUNC_RESET;
3514 		clear_bit(HNAE3_FUNC_RESET, addr);
3515 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3516 		rst_level = HNAE3_FLR_RESET;
3517 		clear_bit(HNAE3_FLR_RESET, addr);
3518 	}
3519 
3520 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3521 	    rst_level < hdev->reset_type)
3522 		return HNAE3_NONE_RESET;
3523 
3524 	return rst_level;
3525 }
3526 
3527 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3528 {
3529 	u32 clearval = 0;
3530 
3531 	switch (hdev->reset_type) {
3532 	case HNAE3_IMP_RESET:
3533 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3534 		break;
3535 	case HNAE3_GLOBAL_RESET:
3536 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3537 		break;
3538 	default:
3539 		break;
3540 	}
3541 
3542 	if (!clearval)
3543 		return;
3544 
3545 	/* For revision 0x20, the reset interrupt source
3546 	 * can only be cleared after hardware reset done
3547 	 */
3548 	if (hdev->pdev->revision == 0x20)
3549 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3550 				clearval);
3551 
3552 	hclge_enable_vector(&hdev->misc_vector, true);
3553 }
3554 
3555 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3556 {
3557 	u32 reg_val;
3558 
3559 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3560 	if (enable)
3561 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3562 	else
3563 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3564 
3565 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3566 }
3567 
3568 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3569 {
3570 	int ret;
3571 
3572 	ret = hclge_set_all_vf_rst(hdev, true);
3573 	if (ret)
3574 		return ret;
3575 
3576 	hclge_func_reset_sync_vf(hdev);
3577 
3578 	return 0;
3579 }
3580 
3581 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3582 {
3583 	u32 reg_val;
3584 	int ret = 0;
3585 
3586 	switch (hdev->reset_type) {
3587 	case HNAE3_FUNC_RESET:
3588 		ret = hclge_func_reset_notify_vf(hdev);
3589 		if (ret)
3590 			return ret;
3591 
3592 		ret = hclge_func_reset_cmd(hdev, 0);
3593 		if (ret) {
3594 			dev_err(&hdev->pdev->dev,
3595 				"asserting function reset fail %d!\n", ret);
3596 			return ret;
3597 		}
3598 
3599 		/* After performaning pf reset, it is not necessary to do the
3600 		 * mailbox handling or send any command to firmware, because
3601 		 * any mailbox handling or command to firmware is only valid
3602 		 * after hclge_cmd_init is called.
3603 		 */
3604 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3605 		hdev->rst_stats.pf_rst_cnt++;
3606 		break;
3607 	case HNAE3_FLR_RESET:
3608 		ret = hclge_func_reset_notify_vf(hdev);
3609 		if (ret)
3610 			return ret;
3611 		break;
3612 	case HNAE3_IMP_RESET:
3613 		hclge_handle_imp_error(hdev);
3614 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3615 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3616 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3617 		break;
3618 	default:
3619 		break;
3620 	}
3621 
3622 	/* inform hardware that preparatory work is done */
3623 	msleep(HCLGE_RESET_SYNC_TIME);
3624 	hclge_reset_handshake(hdev, true);
3625 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3626 
3627 	return ret;
3628 }
3629 
3630 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3631 {
3632 #define MAX_RESET_FAIL_CNT 5
3633 
3634 	if (hdev->reset_pending) {
3635 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3636 			 hdev->reset_pending);
3637 		return true;
3638 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3639 		   HCLGE_RESET_INT_M) {
3640 		dev_info(&hdev->pdev->dev,
3641 			 "reset failed because new reset interrupt\n");
3642 		hclge_clear_reset_cause(hdev);
3643 		return false;
3644 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3645 		hdev->rst_stats.reset_fail_cnt++;
3646 		set_bit(hdev->reset_type, &hdev->reset_pending);
3647 		dev_info(&hdev->pdev->dev,
3648 			 "re-schedule reset task(%u)\n",
3649 			 hdev->rst_stats.reset_fail_cnt);
3650 		return true;
3651 	}
3652 
3653 	hclge_clear_reset_cause(hdev);
3654 
3655 	/* recover the handshake status when reset fail */
3656 	hclge_reset_handshake(hdev, true);
3657 
3658 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3659 
3660 	hclge_dbg_dump_rst_info(hdev);
3661 
3662 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3663 
3664 	return false;
3665 }
3666 
3667 static int hclge_set_rst_done(struct hclge_dev *hdev)
3668 {
3669 	struct hclge_pf_rst_done_cmd *req;
3670 	struct hclge_desc desc;
3671 	int ret;
3672 
3673 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
3674 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3675 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3676 
3677 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3678 	/* To be compatible with the old firmware, which does not support
3679 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3680 	 * return success
3681 	 */
3682 	if (ret == -EOPNOTSUPP) {
3683 		dev_warn(&hdev->pdev->dev,
3684 			 "current firmware does not support command(0x%x)!\n",
3685 			 HCLGE_OPC_PF_RST_DONE);
3686 		return 0;
3687 	} else if (ret) {
3688 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3689 			ret);
3690 	}
3691 
3692 	return ret;
3693 }
3694 
3695 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3696 {
3697 	int ret = 0;
3698 
3699 	switch (hdev->reset_type) {
3700 	case HNAE3_FUNC_RESET:
3701 		/* fall through */
3702 	case HNAE3_FLR_RESET:
3703 		ret = hclge_set_all_vf_rst(hdev, false);
3704 		break;
3705 	case HNAE3_GLOBAL_RESET:
3706 		/* fall through */
3707 	case HNAE3_IMP_RESET:
3708 		ret = hclge_set_rst_done(hdev);
3709 		break;
3710 	default:
3711 		break;
3712 	}
3713 
3714 	/* clear up the handshake status after re-initialize done */
3715 	hclge_reset_handshake(hdev, false);
3716 
3717 	return ret;
3718 }
3719 
3720 static int hclge_reset_stack(struct hclge_dev *hdev)
3721 {
3722 	int ret;
3723 
3724 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3725 	if (ret)
3726 		return ret;
3727 
3728 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3729 	if (ret)
3730 		return ret;
3731 
3732 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3733 	if (ret)
3734 		return ret;
3735 
3736 	return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3737 }
3738 
3739 static int hclge_reset_prepare(struct hclge_dev *hdev)
3740 {
3741 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3742 	int ret;
3743 
3744 	/* Initialize ae_dev reset status as well, in case enet layer wants to
3745 	 * know if device is undergoing reset
3746 	 */
3747 	ae_dev->reset_type = hdev->reset_type;
3748 	hdev->rst_stats.reset_cnt++;
3749 	/* perform reset of the stack & ae device for a client */
3750 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3751 	if (ret)
3752 		return ret;
3753 
3754 	rtnl_lock();
3755 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3756 	rtnl_unlock();
3757 	if (ret)
3758 		return ret;
3759 
3760 	return hclge_reset_prepare_wait(hdev);
3761 }
3762 
3763 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3764 {
3765 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3766 	enum hnae3_reset_type reset_level;
3767 	int ret;
3768 
3769 	hdev->rst_stats.hw_reset_done_cnt++;
3770 
3771 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3772 	if (ret)
3773 		return ret;
3774 
3775 	rtnl_lock();
3776 	ret = hclge_reset_stack(hdev);
3777 	rtnl_unlock();
3778 	if (ret)
3779 		return ret;
3780 
3781 	hclge_clear_reset_cause(hdev);
3782 
3783 	ret = hclge_reset_prepare_up(hdev);
3784 	if (ret)
3785 		return ret;
3786 
3787 
3788 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3789 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3790 	 * times
3791 	 */
3792 	if (ret &&
3793 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3794 		return ret;
3795 
3796 	rtnl_lock();
3797 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3798 	rtnl_unlock();
3799 	if (ret)
3800 		return ret;
3801 
3802 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3803 	if (ret)
3804 		return ret;
3805 
3806 	hdev->last_reset_time = jiffies;
3807 	hdev->rst_stats.reset_fail_cnt = 0;
3808 	hdev->rst_stats.reset_done_cnt++;
3809 	ae_dev->reset_type = HNAE3_NONE_RESET;
3810 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3811 
3812 	/* if default_reset_request has a higher level reset request,
3813 	 * it should be handled as soon as possible. since some errors
3814 	 * need this kind of reset to fix.
3815 	 */
3816 	reset_level = hclge_get_reset_level(ae_dev,
3817 					    &hdev->default_reset_request);
3818 	if (reset_level != HNAE3_NONE_RESET)
3819 		set_bit(reset_level, &hdev->reset_request);
3820 
3821 	return 0;
3822 }
3823 
3824 static void hclge_reset(struct hclge_dev *hdev)
3825 {
3826 	if (hclge_reset_prepare(hdev))
3827 		goto err_reset;
3828 
3829 	if (hclge_reset_wait(hdev))
3830 		goto err_reset;
3831 
3832 	if (hclge_reset_rebuild(hdev))
3833 		goto err_reset;
3834 
3835 	return;
3836 
3837 err_reset:
3838 	if (hclge_reset_err_handle(hdev))
3839 		hclge_reset_task_schedule(hdev);
3840 }
3841 
3842 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3843 {
3844 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3845 	struct hclge_dev *hdev = ae_dev->priv;
3846 
3847 	/* We might end up getting called broadly because of 2 below cases:
3848 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3849 	 *    normalcy is to reset.
3850 	 * 2. A new reset request from the stack due to timeout
3851 	 *
3852 	 * For the first case,error event might not have ae handle available.
3853 	 * check if this is a new reset request and we are not here just because
3854 	 * last reset attempt did not succeed and watchdog hit us again. We will
3855 	 * know this if last reset request did not occur very recently (watchdog
3856 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3857 	 * In case of new request we reset the "reset level" to PF reset.
3858 	 * And if it is a repeat reset request of the most recent one then we
3859 	 * want to make sure we throttle the reset request. Therefore, we will
3860 	 * not allow it again before 3*HZ times.
3861 	 */
3862 	if (!handle)
3863 		handle = &hdev->vport[0].nic;
3864 
3865 	if (time_before(jiffies, (hdev->last_reset_time +
3866 				  HCLGE_RESET_INTERVAL))) {
3867 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3868 		return;
3869 	} else if (hdev->default_reset_request) {
3870 		hdev->reset_level =
3871 			hclge_get_reset_level(ae_dev,
3872 					      &hdev->default_reset_request);
3873 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3874 		hdev->reset_level = HNAE3_FUNC_RESET;
3875 	}
3876 
3877 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3878 		 hdev->reset_level);
3879 
3880 	/* request reset & schedule reset task */
3881 	set_bit(hdev->reset_level, &hdev->reset_request);
3882 	hclge_reset_task_schedule(hdev);
3883 
3884 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3885 		hdev->reset_level++;
3886 }
3887 
3888 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3889 					enum hnae3_reset_type rst_type)
3890 {
3891 	struct hclge_dev *hdev = ae_dev->priv;
3892 
3893 	set_bit(rst_type, &hdev->default_reset_request);
3894 }
3895 
3896 static void hclge_reset_timer(struct timer_list *t)
3897 {
3898 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3899 
3900 	/* if default_reset_request has no value, it means that this reset
3901 	 * request has already be handled, so just return here
3902 	 */
3903 	if (!hdev->default_reset_request)
3904 		return;
3905 
3906 	dev_info(&hdev->pdev->dev,
3907 		 "triggering reset in reset timer\n");
3908 	hclge_reset_event(hdev->pdev, NULL);
3909 }
3910 
3911 static void hclge_reset_subtask(struct hclge_dev *hdev)
3912 {
3913 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3914 
3915 	/* check if there is any ongoing reset in the hardware. This status can
3916 	 * be checked from reset_pending. If there is then, we need to wait for
3917 	 * hardware to complete reset.
3918 	 *    a. If we are able to figure out in reasonable time that hardware
3919 	 *       has fully resetted then, we can proceed with driver, client
3920 	 *       reset.
3921 	 *    b. else, we can come back later to check this status so re-sched
3922 	 *       now.
3923 	 */
3924 	hdev->last_reset_time = jiffies;
3925 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3926 	if (hdev->reset_type != HNAE3_NONE_RESET)
3927 		hclge_reset(hdev);
3928 
3929 	/* check if we got any *new* reset requests to be honored */
3930 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3931 	if (hdev->reset_type != HNAE3_NONE_RESET)
3932 		hclge_do_reset(hdev);
3933 
3934 	hdev->reset_type = HNAE3_NONE_RESET;
3935 }
3936 
3937 static void hclge_reset_service_task(struct hclge_dev *hdev)
3938 {
3939 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3940 		return;
3941 
3942 	down(&hdev->reset_sem);
3943 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3944 
3945 	hclge_reset_subtask(hdev);
3946 
3947 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3948 	up(&hdev->reset_sem);
3949 }
3950 
3951 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3952 {
3953 	int i;
3954 
3955 	/* start from vport 1 for PF is always alive */
3956 	for (i = 1; i < hdev->num_alloc_vport; i++) {
3957 		struct hclge_vport *vport = &hdev->vport[i];
3958 
3959 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3960 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3961 
3962 		/* If vf is not alive, set to default value */
3963 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3964 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3965 	}
3966 }
3967 
3968 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3969 {
3970 	unsigned long delta = round_jiffies_relative(HZ);
3971 
3972 	/* Always handle the link updating to make sure link state is
3973 	 * updated when it is triggered by mbx.
3974 	 */
3975 	hclge_update_link_status(hdev);
3976 
3977 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3978 		delta = jiffies - hdev->last_serv_processed;
3979 
3980 		if (delta < round_jiffies_relative(HZ)) {
3981 			delta = round_jiffies_relative(HZ) - delta;
3982 			goto out;
3983 		}
3984 	}
3985 
3986 	hdev->serv_processed_cnt++;
3987 	hclge_update_vport_alive(hdev);
3988 
3989 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3990 		hdev->last_serv_processed = jiffies;
3991 		goto out;
3992 	}
3993 
3994 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3995 		hclge_update_stats_for_all(hdev);
3996 
3997 	hclge_update_port_info(hdev);
3998 	hclge_sync_vlan_filter(hdev);
3999 
4000 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4001 		hclge_rfs_filter_expire(hdev);
4002 
4003 	hdev->last_serv_processed = jiffies;
4004 
4005 out:
4006 	hclge_task_schedule(hdev, delta);
4007 }
4008 
4009 static void hclge_service_task(struct work_struct *work)
4010 {
4011 	struct hclge_dev *hdev =
4012 		container_of(work, struct hclge_dev, service_task.work);
4013 
4014 	hclge_reset_service_task(hdev);
4015 	hclge_mailbox_service_task(hdev);
4016 	hclge_periodic_service_task(hdev);
4017 
4018 	/* Handle reset and mbx again in case periodical task delays the
4019 	 * handling by calling hclge_task_schedule() in
4020 	 * hclge_periodic_service_task().
4021 	 */
4022 	hclge_reset_service_task(hdev);
4023 	hclge_mailbox_service_task(hdev);
4024 }
4025 
4026 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4027 {
4028 	/* VF handle has no client */
4029 	if (!handle->client)
4030 		return container_of(handle, struct hclge_vport, nic);
4031 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4032 		return container_of(handle, struct hclge_vport, roce);
4033 	else
4034 		return container_of(handle, struct hclge_vport, nic);
4035 }
4036 
4037 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4038 			    struct hnae3_vector_info *vector_info)
4039 {
4040 	struct hclge_vport *vport = hclge_get_vport(handle);
4041 	struct hnae3_vector_info *vector = vector_info;
4042 	struct hclge_dev *hdev = vport->back;
4043 	int alloc = 0;
4044 	int i, j;
4045 
4046 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4047 	vector_num = min(hdev->num_msi_left, vector_num);
4048 
4049 	for (j = 0; j < vector_num; j++) {
4050 		for (i = 1; i < hdev->num_msi; i++) {
4051 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4052 				vector->vector = pci_irq_vector(hdev->pdev, i);
4053 				vector->io_addr = hdev->hw.io_base +
4054 					HCLGE_VECTOR_REG_BASE +
4055 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
4056 					vport->vport_id *
4057 					HCLGE_VECTOR_VF_OFFSET;
4058 				hdev->vector_status[i] = vport->vport_id;
4059 				hdev->vector_irq[i] = vector->vector;
4060 
4061 				vector++;
4062 				alloc++;
4063 
4064 				break;
4065 			}
4066 		}
4067 	}
4068 	hdev->num_msi_left -= alloc;
4069 	hdev->num_msi_used += alloc;
4070 
4071 	return alloc;
4072 }
4073 
4074 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4075 {
4076 	int i;
4077 
4078 	for (i = 0; i < hdev->num_msi; i++)
4079 		if (vector == hdev->vector_irq[i])
4080 			return i;
4081 
4082 	return -EINVAL;
4083 }
4084 
4085 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4086 {
4087 	struct hclge_vport *vport = hclge_get_vport(handle);
4088 	struct hclge_dev *hdev = vport->back;
4089 	int vector_id;
4090 
4091 	vector_id = hclge_get_vector_index(hdev, vector);
4092 	if (vector_id < 0) {
4093 		dev_err(&hdev->pdev->dev,
4094 			"Get vector index fail. vector = %d\n", vector);
4095 		return vector_id;
4096 	}
4097 
4098 	hclge_free_vector(hdev, vector_id);
4099 
4100 	return 0;
4101 }
4102 
4103 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4104 {
4105 	return HCLGE_RSS_KEY_SIZE;
4106 }
4107 
4108 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4109 {
4110 	return HCLGE_RSS_IND_TBL_SIZE;
4111 }
4112 
4113 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4114 				  const u8 hfunc, const u8 *key)
4115 {
4116 	struct hclge_rss_config_cmd *req;
4117 	unsigned int key_offset = 0;
4118 	struct hclge_desc desc;
4119 	int key_counts;
4120 	int key_size;
4121 	int ret;
4122 
4123 	key_counts = HCLGE_RSS_KEY_SIZE;
4124 	req = (struct hclge_rss_config_cmd *)desc.data;
4125 
4126 	while (key_counts) {
4127 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4128 					   false);
4129 
4130 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4131 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4132 
4133 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4134 		memcpy(req->hash_key,
4135 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4136 
4137 		key_counts -= key_size;
4138 		key_offset++;
4139 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4140 		if (ret) {
4141 			dev_err(&hdev->pdev->dev,
4142 				"Configure RSS config fail, status = %d\n",
4143 				ret);
4144 			return ret;
4145 		}
4146 	}
4147 	return 0;
4148 }
4149 
4150 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4151 {
4152 	struct hclge_rss_indirection_table_cmd *req;
4153 	struct hclge_desc desc;
4154 	int i, j;
4155 	int ret;
4156 
4157 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4158 
4159 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4160 		hclge_cmd_setup_basic_desc
4161 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4162 
4163 		req->start_table_index =
4164 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4165 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4166 
4167 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4168 			req->rss_result[j] =
4169 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4170 
4171 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4172 		if (ret) {
4173 			dev_err(&hdev->pdev->dev,
4174 				"Configure rss indir table fail,status = %d\n",
4175 				ret);
4176 			return ret;
4177 		}
4178 	}
4179 	return 0;
4180 }
4181 
4182 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4183 				 u16 *tc_size, u16 *tc_offset)
4184 {
4185 	struct hclge_rss_tc_mode_cmd *req;
4186 	struct hclge_desc desc;
4187 	int ret;
4188 	int i;
4189 
4190 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4191 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4192 
4193 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4194 		u16 mode = 0;
4195 
4196 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4197 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4198 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4199 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4200 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4201 
4202 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4203 	}
4204 
4205 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4206 	if (ret)
4207 		dev_err(&hdev->pdev->dev,
4208 			"Configure rss tc mode fail, status = %d\n", ret);
4209 
4210 	return ret;
4211 }
4212 
4213 static void hclge_get_rss_type(struct hclge_vport *vport)
4214 {
4215 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4216 	    vport->rss_tuple_sets.ipv4_udp_en ||
4217 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4218 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4219 	    vport->rss_tuple_sets.ipv6_udp_en ||
4220 	    vport->rss_tuple_sets.ipv6_sctp_en)
4221 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4222 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4223 		 vport->rss_tuple_sets.ipv6_fragment_en)
4224 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4225 	else
4226 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4227 }
4228 
4229 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4230 {
4231 	struct hclge_rss_input_tuple_cmd *req;
4232 	struct hclge_desc desc;
4233 	int ret;
4234 
4235 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4236 
4237 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4238 
4239 	/* Get the tuple cfg from pf */
4240 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4241 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4242 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4243 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4244 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4245 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4246 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4247 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4248 	hclge_get_rss_type(&hdev->vport[0]);
4249 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4250 	if (ret)
4251 		dev_err(&hdev->pdev->dev,
4252 			"Configure rss input fail, status = %d\n", ret);
4253 	return ret;
4254 }
4255 
4256 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4257 			 u8 *key, u8 *hfunc)
4258 {
4259 	struct hclge_vport *vport = hclge_get_vport(handle);
4260 	int i;
4261 
4262 	/* Get hash algorithm */
4263 	if (hfunc) {
4264 		switch (vport->rss_algo) {
4265 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4266 			*hfunc = ETH_RSS_HASH_TOP;
4267 			break;
4268 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4269 			*hfunc = ETH_RSS_HASH_XOR;
4270 			break;
4271 		default:
4272 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4273 			break;
4274 		}
4275 	}
4276 
4277 	/* Get the RSS Key required by the user */
4278 	if (key)
4279 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4280 
4281 	/* Get indirect table */
4282 	if (indir)
4283 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4284 			indir[i] =  vport->rss_indirection_tbl[i];
4285 
4286 	return 0;
4287 }
4288 
4289 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4290 			 const  u8 *key, const  u8 hfunc)
4291 {
4292 	struct hclge_vport *vport = hclge_get_vport(handle);
4293 	struct hclge_dev *hdev = vport->back;
4294 	u8 hash_algo;
4295 	int ret, i;
4296 
4297 	/* Set the RSS Hash Key if specififed by the user */
4298 	if (key) {
4299 		switch (hfunc) {
4300 		case ETH_RSS_HASH_TOP:
4301 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4302 			break;
4303 		case ETH_RSS_HASH_XOR:
4304 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4305 			break;
4306 		case ETH_RSS_HASH_NO_CHANGE:
4307 			hash_algo = vport->rss_algo;
4308 			break;
4309 		default:
4310 			return -EINVAL;
4311 		}
4312 
4313 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4314 		if (ret)
4315 			return ret;
4316 
4317 		/* Update the shadow RSS key with user specified qids */
4318 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4319 		vport->rss_algo = hash_algo;
4320 	}
4321 
4322 	/* Update the shadow RSS table with user specified qids */
4323 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4324 		vport->rss_indirection_tbl[i] = indir[i];
4325 
4326 	/* Update the hardware */
4327 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4328 }
4329 
4330 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4331 {
4332 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4333 
4334 	if (nfc->data & RXH_L4_B_2_3)
4335 		hash_sets |= HCLGE_D_PORT_BIT;
4336 	else
4337 		hash_sets &= ~HCLGE_D_PORT_BIT;
4338 
4339 	if (nfc->data & RXH_IP_SRC)
4340 		hash_sets |= HCLGE_S_IP_BIT;
4341 	else
4342 		hash_sets &= ~HCLGE_S_IP_BIT;
4343 
4344 	if (nfc->data & RXH_IP_DST)
4345 		hash_sets |= HCLGE_D_IP_BIT;
4346 	else
4347 		hash_sets &= ~HCLGE_D_IP_BIT;
4348 
4349 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4350 		hash_sets |= HCLGE_V_TAG_BIT;
4351 
4352 	return hash_sets;
4353 }
4354 
4355 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4356 			       struct ethtool_rxnfc *nfc)
4357 {
4358 	struct hclge_vport *vport = hclge_get_vport(handle);
4359 	struct hclge_dev *hdev = vport->back;
4360 	struct hclge_rss_input_tuple_cmd *req;
4361 	struct hclge_desc desc;
4362 	u8 tuple_sets;
4363 	int ret;
4364 
4365 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4366 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4367 		return -EINVAL;
4368 
4369 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4370 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4371 
4372 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4373 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4374 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4375 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4376 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4377 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4378 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4379 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4380 
4381 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4382 	switch (nfc->flow_type) {
4383 	case TCP_V4_FLOW:
4384 		req->ipv4_tcp_en = tuple_sets;
4385 		break;
4386 	case TCP_V6_FLOW:
4387 		req->ipv6_tcp_en = tuple_sets;
4388 		break;
4389 	case UDP_V4_FLOW:
4390 		req->ipv4_udp_en = tuple_sets;
4391 		break;
4392 	case UDP_V6_FLOW:
4393 		req->ipv6_udp_en = tuple_sets;
4394 		break;
4395 	case SCTP_V4_FLOW:
4396 		req->ipv4_sctp_en = tuple_sets;
4397 		break;
4398 	case SCTP_V6_FLOW:
4399 		if ((nfc->data & RXH_L4_B_0_1) ||
4400 		    (nfc->data & RXH_L4_B_2_3))
4401 			return -EINVAL;
4402 
4403 		req->ipv6_sctp_en = tuple_sets;
4404 		break;
4405 	case IPV4_FLOW:
4406 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4407 		break;
4408 	case IPV6_FLOW:
4409 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4410 		break;
4411 	default:
4412 		return -EINVAL;
4413 	}
4414 
4415 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4416 	if (ret) {
4417 		dev_err(&hdev->pdev->dev,
4418 			"Set rss tuple fail, status = %d\n", ret);
4419 		return ret;
4420 	}
4421 
4422 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4423 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4424 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4425 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4426 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4427 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4428 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4429 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4430 	hclge_get_rss_type(vport);
4431 	return 0;
4432 }
4433 
4434 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4435 			       struct ethtool_rxnfc *nfc)
4436 {
4437 	struct hclge_vport *vport = hclge_get_vport(handle);
4438 	u8 tuple_sets;
4439 
4440 	nfc->data = 0;
4441 
4442 	switch (nfc->flow_type) {
4443 	case TCP_V4_FLOW:
4444 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4445 		break;
4446 	case UDP_V4_FLOW:
4447 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4448 		break;
4449 	case TCP_V6_FLOW:
4450 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4451 		break;
4452 	case UDP_V6_FLOW:
4453 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4454 		break;
4455 	case SCTP_V4_FLOW:
4456 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4457 		break;
4458 	case SCTP_V6_FLOW:
4459 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4460 		break;
4461 	case IPV4_FLOW:
4462 	case IPV6_FLOW:
4463 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4464 		break;
4465 	default:
4466 		return -EINVAL;
4467 	}
4468 
4469 	if (!tuple_sets)
4470 		return 0;
4471 
4472 	if (tuple_sets & HCLGE_D_PORT_BIT)
4473 		nfc->data |= RXH_L4_B_2_3;
4474 	if (tuple_sets & HCLGE_S_PORT_BIT)
4475 		nfc->data |= RXH_L4_B_0_1;
4476 	if (tuple_sets & HCLGE_D_IP_BIT)
4477 		nfc->data |= RXH_IP_DST;
4478 	if (tuple_sets & HCLGE_S_IP_BIT)
4479 		nfc->data |= RXH_IP_SRC;
4480 
4481 	return 0;
4482 }
4483 
4484 static int hclge_get_tc_size(struct hnae3_handle *handle)
4485 {
4486 	struct hclge_vport *vport = hclge_get_vport(handle);
4487 	struct hclge_dev *hdev = vport->back;
4488 
4489 	return hdev->rss_size_max;
4490 }
4491 
4492 int hclge_rss_init_hw(struct hclge_dev *hdev)
4493 {
4494 	struct hclge_vport *vport = hdev->vport;
4495 	u8 *rss_indir = vport[0].rss_indirection_tbl;
4496 	u16 rss_size = vport[0].alloc_rss_size;
4497 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4498 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4499 	u8 *key = vport[0].rss_hash_key;
4500 	u8 hfunc = vport[0].rss_algo;
4501 	u16 tc_valid[HCLGE_MAX_TC_NUM];
4502 	u16 roundup_size;
4503 	unsigned int i;
4504 	int ret;
4505 
4506 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4507 	if (ret)
4508 		return ret;
4509 
4510 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4511 	if (ret)
4512 		return ret;
4513 
4514 	ret = hclge_set_rss_input_tuple(hdev);
4515 	if (ret)
4516 		return ret;
4517 
4518 	/* Each TC have the same queue size, and tc_size set to hardware is
4519 	 * the log2 of roundup power of two of rss_size, the acutal queue
4520 	 * size is limited by indirection table.
4521 	 */
4522 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4523 		dev_err(&hdev->pdev->dev,
4524 			"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4525 			rss_size);
4526 		return -EINVAL;
4527 	}
4528 
4529 	roundup_size = roundup_pow_of_two(rss_size);
4530 	roundup_size = ilog2(roundup_size);
4531 
4532 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4533 		tc_valid[i] = 0;
4534 
4535 		if (!(hdev->hw_tc_map & BIT(i)))
4536 			continue;
4537 
4538 		tc_valid[i] = 1;
4539 		tc_size[i] = roundup_size;
4540 		tc_offset[i] = rss_size * i;
4541 	}
4542 
4543 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4544 }
4545 
4546 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4547 {
4548 	struct hclge_vport *vport = hdev->vport;
4549 	int i, j;
4550 
4551 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4552 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4553 			vport[j].rss_indirection_tbl[i] =
4554 				i % vport[j].alloc_rss_size;
4555 	}
4556 }
4557 
4558 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4559 {
4560 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4561 	struct hclge_vport *vport = hdev->vport;
4562 
4563 	if (hdev->pdev->revision >= 0x21)
4564 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4565 
4566 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4567 		vport[i].rss_tuple_sets.ipv4_tcp_en =
4568 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4569 		vport[i].rss_tuple_sets.ipv4_udp_en =
4570 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4571 		vport[i].rss_tuple_sets.ipv4_sctp_en =
4572 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4573 		vport[i].rss_tuple_sets.ipv4_fragment_en =
4574 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4575 		vport[i].rss_tuple_sets.ipv6_tcp_en =
4576 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4577 		vport[i].rss_tuple_sets.ipv6_udp_en =
4578 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4579 		vport[i].rss_tuple_sets.ipv6_sctp_en =
4580 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4581 		vport[i].rss_tuple_sets.ipv6_fragment_en =
4582 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4583 
4584 		vport[i].rss_algo = rss_algo;
4585 
4586 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
4587 		       HCLGE_RSS_KEY_SIZE);
4588 	}
4589 
4590 	hclge_rss_indir_init_cfg(hdev);
4591 }
4592 
4593 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4594 				int vector_id, bool en,
4595 				struct hnae3_ring_chain_node *ring_chain)
4596 {
4597 	struct hclge_dev *hdev = vport->back;
4598 	struct hnae3_ring_chain_node *node;
4599 	struct hclge_desc desc;
4600 	struct hclge_ctrl_vector_chain_cmd *req =
4601 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
4602 	enum hclge_cmd_status status;
4603 	enum hclge_opcode_type op;
4604 	u16 tqp_type_and_id;
4605 	int i;
4606 
4607 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4608 	hclge_cmd_setup_basic_desc(&desc, op, false);
4609 	req->int_vector_id = vector_id;
4610 
4611 	i = 0;
4612 	for (node = ring_chain; node; node = node->next) {
4613 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4614 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4615 				HCLGE_INT_TYPE_S,
4616 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4617 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4618 				HCLGE_TQP_ID_S, node->tqp_index);
4619 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4620 				HCLGE_INT_GL_IDX_S,
4621 				hnae3_get_field(node->int_gl_idx,
4622 						HNAE3_RING_GL_IDX_M,
4623 						HNAE3_RING_GL_IDX_S));
4624 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4625 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4626 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4627 			req->vfid = vport->vport_id;
4628 
4629 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
4630 			if (status) {
4631 				dev_err(&hdev->pdev->dev,
4632 					"Map TQP fail, status is %d.\n",
4633 					status);
4634 				return -EIO;
4635 			}
4636 			i = 0;
4637 
4638 			hclge_cmd_setup_basic_desc(&desc,
4639 						   op,
4640 						   false);
4641 			req->int_vector_id = vector_id;
4642 		}
4643 	}
4644 
4645 	if (i > 0) {
4646 		req->int_cause_num = i;
4647 		req->vfid = vport->vport_id;
4648 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
4649 		if (status) {
4650 			dev_err(&hdev->pdev->dev,
4651 				"Map TQP fail, status is %d.\n", status);
4652 			return -EIO;
4653 		}
4654 	}
4655 
4656 	return 0;
4657 }
4658 
4659 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4660 				    struct hnae3_ring_chain_node *ring_chain)
4661 {
4662 	struct hclge_vport *vport = hclge_get_vport(handle);
4663 	struct hclge_dev *hdev = vport->back;
4664 	int vector_id;
4665 
4666 	vector_id = hclge_get_vector_index(hdev, vector);
4667 	if (vector_id < 0) {
4668 		dev_err(&hdev->pdev->dev,
4669 			"failed to get vector index. vector=%d\n", vector);
4670 		return vector_id;
4671 	}
4672 
4673 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4674 }
4675 
4676 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4677 				       struct hnae3_ring_chain_node *ring_chain)
4678 {
4679 	struct hclge_vport *vport = hclge_get_vport(handle);
4680 	struct hclge_dev *hdev = vport->back;
4681 	int vector_id, ret;
4682 
4683 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4684 		return 0;
4685 
4686 	vector_id = hclge_get_vector_index(hdev, vector);
4687 	if (vector_id < 0) {
4688 		dev_err(&handle->pdev->dev,
4689 			"Get vector index fail. ret =%d\n", vector_id);
4690 		return vector_id;
4691 	}
4692 
4693 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4694 	if (ret)
4695 		dev_err(&handle->pdev->dev,
4696 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4697 			vector_id, ret);
4698 
4699 	return ret;
4700 }
4701 
4702 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4703 				      struct hclge_promisc_param *param)
4704 {
4705 	struct hclge_promisc_cfg_cmd *req;
4706 	struct hclge_desc desc;
4707 	int ret;
4708 
4709 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4710 
4711 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
4712 	req->vf_id = param->vf_id;
4713 
4714 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4715 	 * pdev revision(0x20), new revision support them. The
4716 	 * value of this two fields will not return error when driver
4717 	 * send command to fireware in revision(0x20).
4718 	 */
4719 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4720 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4721 
4722 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4723 	if (ret)
4724 		dev_err(&hdev->pdev->dev,
4725 			"Set promisc mode fail, status is %d.\n", ret);
4726 
4727 	return ret;
4728 }
4729 
4730 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4731 				     bool en_uc, bool en_mc, bool en_bc,
4732 				     int vport_id)
4733 {
4734 	if (!param)
4735 		return;
4736 
4737 	memset(param, 0, sizeof(struct hclge_promisc_param));
4738 	if (en_uc)
4739 		param->enable = HCLGE_PROMISC_EN_UC;
4740 	if (en_mc)
4741 		param->enable |= HCLGE_PROMISC_EN_MC;
4742 	if (en_bc)
4743 		param->enable |= HCLGE_PROMISC_EN_BC;
4744 	param->vf_id = vport_id;
4745 }
4746 
4747 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4748 				 bool en_mc_pmc, bool en_bc_pmc)
4749 {
4750 	struct hclge_dev *hdev = vport->back;
4751 	struct hclge_promisc_param param;
4752 
4753 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4754 				 vport->vport_id);
4755 	return hclge_cmd_set_promisc_mode(hdev, &param);
4756 }
4757 
4758 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4759 				  bool en_mc_pmc)
4760 {
4761 	struct hclge_vport *vport = hclge_get_vport(handle);
4762 	bool en_bc_pmc = true;
4763 
4764 	/* For revision 0x20, if broadcast promisc enabled, vlan filter is
4765 	 * always bypassed. So broadcast promisc should be disabled until
4766 	 * user enable promisc mode
4767 	 */
4768 	if (handle->pdev->revision == 0x20)
4769 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4770 
4771 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4772 					    en_bc_pmc);
4773 }
4774 
4775 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4776 {
4777 	struct hclge_get_fd_mode_cmd *req;
4778 	struct hclge_desc desc;
4779 	int ret;
4780 
4781 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4782 
4783 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
4784 
4785 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4786 	if (ret) {
4787 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4788 		return ret;
4789 	}
4790 
4791 	*fd_mode = req->mode;
4792 
4793 	return ret;
4794 }
4795 
4796 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4797 				   u32 *stage1_entry_num,
4798 				   u32 *stage2_entry_num,
4799 				   u16 *stage1_counter_num,
4800 				   u16 *stage2_counter_num)
4801 {
4802 	struct hclge_get_fd_allocation_cmd *req;
4803 	struct hclge_desc desc;
4804 	int ret;
4805 
4806 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4807 
4808 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4809 
4810 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4811 	if (ret) {
4812 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4813 			ret);
4814 		return ret;
4815 	}
4816 
4817 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4818 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4819 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4820 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4821 
4822 	return ret;
4823 }
4824 
4825 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4826 {
4827 	struct hclge_set_fd_key_config_cmd *req;
4828 	struct hclge_fd_key_cfg *stage;
4829 	struct hclge_desc desc;
4830 	int ret;
4831 
4832 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4833 
4834 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4835 	stage = &hdev->fd_cfg.key_cfg[stage_num];
4836 	req->stage = stage_num;
4837 	req->key_select = stage->key_sel;
4838 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4839 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4840 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4841 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4842 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4843 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4844 
4845 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4846 	if (ret)
4847 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4848 
4849 	return ret;
4850 }
4851 
4852 static int hclge_init_fd_config(struct hclge_dev *hdev)
4853 {
4854 #define LOW_2_WORDS		0x03
4855 	struct hclge_fd_key_cfg *key_cfg;
4856 	int ret;
4857 
4858 	if (!hnae3_dev_fd_supported(hdev))
4859 		return 0;
4860 
4861 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4862 	if (ret)
4863 		return ret;
4864 
4865 	switch (hdev->fd_cfg.fd_mode) {
4866 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4867 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4868 		break;
4869 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4870 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4871 		break;
4872 	default:
4873 		dev_err(&hdev->pdev->dev,
4874 			"Unsupported flow director mode %u\n",
4875 			hdev->fd_cfg.fd_mode);
4876 		return -EOPNOTSUPP;
4877 	}
4878 
4879 	hdev->fd_cfg.proto_support =
4880 		TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4881 		UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4882 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4883 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4884 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4885 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4886 	key_cfg->outer_sipv6_word_en = 0;
4887 	key_cfg->outer_dipv6_word_en = 0;
4888 
4889 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4890 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4891 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4892 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4893 
4894 	/* If use max 400bit key, we can support tuples for ether type */
4895 	if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4896 		hdev->fd_cfg.proto_support |= ETHER_FLOW;
4897 		key_cfg->tuple_active |=
4898 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4899 	}
4900 
4901 	/* roce_type is used to filter roce frames
4902 	 * dst_vport is used to specify the rule
4903 	 */
4904 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4905 
4906 	ret = hclge_get_fd_allocation(hdev,
4907 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4908 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4909 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4910 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4911 	if (ret)
4912 		return ret;
4913 
4914 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4915 }
4916 
4917 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4918 				int loc, u8 *key, bool is_add)
4919 {
4920 	struct hclge_fd_tcam_config_1_cmd *req1;
4921 	struct hclge_fd_tcam_config_2_cmd *req2;
4922 	struct hclge_fd_tcam_config_3_cmd *req3;
4923 	struct hclge_desc desc[3];
4924 	int ret;
4925 
4926 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4927 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4928 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4929 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4930 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4931 
4932 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4933 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4934 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4935 
4936 	req1->stage = stage;
4937 	req1->xy_sel = sel_x ? 1 : 0;
4938 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4939 	req1->index = cpu_to_le32(loc);
4940 	req1->entry_vld = sel_x ? is_add : 0;
4941 
4942 	if (key) {
4943 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4944 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4945 		       sizeof(req2->tcam_data));
4946 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4947 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4948 	}
4949 
4950 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
4951 	if (ret)
4952 		dev_err(&hdev->pdev->dev,
4953 			"config tcam key fail, ret=%d\n",
4954 			ret);
4955 
4956 	return ret;
4957 }
4958 
4959 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4960 			      struct hclge_fd_ad_data *action)
4961 {
4962 	struct hclge_fd_ad_config_cmd *req;
4963 	struct hclge_desc desc;
4964 	u64 ad_data = 0;
4965 	int ret;
4966 
4967 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4968 
4969 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
4970 	req->index = cpu_to_le32(loc);
4971 	req->stage = stage;
4972 
4973 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4974 		      action->write_rule_id_to_bd);
4975 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4976 			action->rule_id);
4977 	ad_data <<= 32;
4978 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4979 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4980 		      action->forward_to_direct_queue);
4981 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4982 			action->queue_id);
4983 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4984 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4985 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4986 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4987 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4988 			action->counter_id);
4989 
4990 	req->ad_data = cpu_to_le64(ad_data);
4991 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4992 	if (ret)
4993 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4994 
4995 	return ret;
4996 }
4997 
4998 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4999 				   struct hclge_fd_rule *rule)
5000 {
5001 	u16 tmp_x_s, tmp_y_s;
5002 	u32 tmp_x_l, tmp_y_l;
5003 	int i;
5004 
5005 	if (rule->unused_tuple & tuple_bit)
5006 		return true;
5007 
5008 	switch (tuple_bit) {
5009 	case 0:
5010 		return false;
5011 	case BIT(INNER_DST_MAC):
5012 		for (i = 0; i < ETH_ALEN; i++) {
5013 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5014 			       rule->tuples_mask.dst_mac[i]);
5015 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5016 			       rule->tuples_mask.dst_mac[i]);
5017 		}
5018 
5019 		return true;
5020 	case BIT(INNER_SRC_MAC):
5021 		for (i = 0; i < ETH_ALEN; i++) {
5022 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5023 			       rule->tuples.src_mac[i]);
5024 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5025 			       rule->tuples.src_mac[i]);
5026 		}
5027 
5028 		return true;
5029 	case BIT(INNER_VLAN_TAG_FST):
5030 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5031 		       rule->tuples_mask.vlan_tag1);
5032 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5033 		       rule->tuples_mask.vlan_tag1);
5034 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5035 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5036 
5037 		return true;
5038 	case BIT(INNER_ETH_TYPE):
5039 		calc_x(tmp_x_s, rule->tuples.ether_proto,
5040 		       rule->tuples_mask.ether_proto);
5041 		calc_y(tmp_y_s, rule->tuples.ether_proto,
5042 		       rule->tuples_mask.ether_proto);
5043 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5044 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5045 
5046 		return true;
5047 	case BIT(INNER_IP_TOS):
5048 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5049 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5050 
5051 		return true;
5052 	case BIT(INNER_IP_PROTO):
5053 		calc_x(*key_x, rule->tuples.ip_proto,
5054 		       rule->tuples_mask.ip_proto);
5055 		calc_y(*key_y, rule->tuples.ip_proto,
5056 		       rule->tuples_mask.ip_proto);
5057 
5058 		return true;
5059 	case BIT(INNER_SRC_IP):
5060 		calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5061 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5062 		calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5063 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5064 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5065 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5066 
5067 		return true;
5068 	case BIT(INNER_DST_IP):
5069 		calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5070 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5071 		calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5072 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5073 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5074 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5075 
5076 		return true;
5077 	case BIT(INNER_SRC_PORT):
5078 		calc_x(tmp_x_s, rule->tuples.src_port,
5079 		       rule->tuples_mask.src_port);
5080 		calc_y(tmp_y_s, rule->tuples.src_port,
5081 		       rule->tuples_mask.src_port);
5082 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5083 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5084 
5085 		return true;
5086 	case BIT(INNER_DST_PORT):
5087 		calc_x(tmp_x_s, rule->tuples.dst_port,
5088 		       rule->tuples_mask.dst_port);
5089 		calc_y(tmp_y_s, rule->tuples.dst_port,
5090 		       rule->tuples_mask.dst_port);
5091 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5092 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5093 
5094 		return true;
5095 	default:
5096 		return false;
5097 	}
5098 }
5099 
5100 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5101 				 u8 vf_id, u8 network_port_id)
5102 {
5103 	u32 port_number = 0;
5104 
5105 	if (port_type == HOST_PORT) {
5106 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5107 				pf_id);
5108 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5109 				vf_id);
5110 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5111 	} else {
5112 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5113 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5114 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5115 	}
5116 
5117 	return port_number;
5118 }
5119 
5120 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5121 				       __le32 *key_x, __le32 *key_y,
5122 				       struct hclge_fd_rule *rule)
5123 {
5124 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5125 	u8 cur_pos = 0, tuple_size, shift_bits;
5126 	unsigned int i;
5127 
5128 	for (i = 0; i < MAX_META_DATA; i++) {
5129 		tuple_size = meta_data_key_info[i].key_length;
5130 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5131 
5132 		switch (tuple_bit) {
5133 		case BIT(ROCE_TYPE):
5134 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5135 			cur_pos += tuple_size;
5136 			break;
5137 		case BIT(DST_VPORT):
5138 			port_number = hclge_get_port_number(HOST_PORT, 0,
5139 							    rule->vf_id, 0);
5140 			hnae3_set_field(meta_data,
5141 					GENMASK(cur_pos + tuple_size, cur_pos),
5142 					cur_pos, port_number);
5143 			cur_pos += tuple_size;
5144 			break;
5145 		default:
5146 			break;
5147 		}
5148 	}
5149 
5150 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5151 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5152 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5153 
5154 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5155 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5156 }
5157 
5158 /* A complete key is combined with meta data key and tuple key.
5159  * Meta data key is stored at the MSB region, and tuple key is stored at
5160  * the LSB region, unused bits will be filled 0.
5161  */
5162 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5163 			    struct hclge_fd_rule *rule)
5164 {
5165 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5166 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5167 	u8 *cur_key_x, *cur_key_y;
5168 	unsigned int i;
5169 	int ret, tuple_size;
5170 	u8 meta_data_region;
5171 
5172 	memset(key_x, 0, sizeof(key_x));
5173 	memset(key_y, 0, sizeof(key_y));
5174 	cur_key_x = key_x;
5175 	cur_key_y = key_y;
5176 
5177 	for (i = 0 ; i < MAX_TUPLE; i++) {
5178 		bool tuple_valid;
5179 		u32 check_tuple;
5180 
5181 		tuple_size = tuple_key_info[i].key_length / 8;
5182 		check_tuple = key_cfg->tuple_active & BIT(i);
5183 
5184 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5185 						     cur_key_y, rule);
5186 		if (tuple_valid) {
5187 			cur_key_x += tuple_size;
5188 			cur_key_y += tuple_size;
5189 		}
5190 	}
5191 
5192 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5193 			MAX_META_DATA_LENGTH / 8;
5194 
5195 	hclge_fd_convert_meta_data(key_cfg,
5196 				   (__le32 *)(key_x + meta_data_region),
5197 				   (__le32 *)(key_y + meta_data_region),
5198 				   rule);
5199 
5200 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5201 				   true);
5202 	if (ret) {
5203 		dev_err(&hdev->pdev->dev,
5204 			"fd key_y config fail, loc=%u, ret=%d\n",
5205 			rule->queue_id, ret);
5206 		return ret;
5207 	}
5208 
5209 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5210 				   true);
5211 	if (ret)
5212 		dev_err(&hdev->pdev->dev,
5213 			"fd key_x config fail, loc=%u, ret=%d\n",
5214 			rule->queue_id, ret);
5215 	return ret;
5216 }
5217 
5218 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5219 			       struct hclge_fd_rule *rule)
5220 {
5221 	struct hclge_fd_ad_data ad_data;
5222 
5223 	ad_data.ad_id = rule->location;
5224 
5225 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5226 		ad_data.drop_packet = true;
5227 		ad_data.forward_to_direct_queue = false;
5228 		ad_data.queue_id = 0;
5229 	} else {
5230 		ad_data.drop_packet = false;
5231 		ad_data.forward_to_direct_queue = true;
5232 		ad_data.queue_id = rule->queue_id;
5233 	}
5234 
5235 	ad_data.use_counter = false;
5236 	ad_data.counter_id = 0;
5237 
5238 	ad_data.use_next_stage = false;
5239 	ad_data.next_input_key = 0;
5240 
5241 	ad_data.write_rule_id_to_bd = true;
5242 	ad_data.rule_id = rule->location;
5243 
5244 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5245 }
5246 
5247 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5248 			       struct ethtool_rx_flow_spec *fs, u32 *unused)
5249 {
5250 	struct ethtool_tcpip4_spec *tcp_ip4_spec;
5251 	struct ethtool_usrip4_spec *usr_ip4_spec;
5252 	struct ethtool_tcpip6_spec *tcp_ip6_spec;
5253 	struct ethtool_usrip6_spec *usr_ip6_spec;
5254 	struct ethhdr *ether_spec;
5255 
5256 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5257 		return -EINVAL;
5258 
5259 	if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5260 		return -EOPNOTSUPP;
5261 
5262 	if ((fs->flow_type & FLOW_EXT) &&
5263 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5264 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5265 		return -EOPNOTSUPP;
5266 	}
5267 
5268 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5269 	case SCTP_V4_FLOW:
5270 	case TCP_V4_FLOW:
5271 	case UDP_V4_FLOW:
5272 		tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5273 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5274 
5275 		if (!tcp_ip4_spec->ip4src)
5276 			*unused |= BIT(INNER_SRC_IP);
5277 
5278 		if (!tcp_ip4_spec->ip4dst)
5279 			*unused |= BIT(INNER_DST_IP);
5280 
5281 		if (!tcp_ip4_spec->psrc)
5282 			*unused |= BIT(INNER_SRC_PORT);
5283 
5284 		if (!tcp_ip4_spec->pdst)
5285 			*unused |= BIT(INNER_DST_PORT);
5286 
5287 		if (!tcp_ip4_spec->tos)
5288 			*unused |= BIT(INNER_IP_TOS);
5289 
5290 		break;
5291 	case IP_USER_FLOW:
5292 		usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5293 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5294 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5295 
5296 		if (!usr_ip4_spec->ip4src)
5297 			*unused |= BIT(INNER_SRC_IP);
5298 
5299 		if (!usr_ip4_spec->ip4dst)
5300 			*unused |= BIT(INNER_DST_IP);
5301 
5302 		if (!usr_ip4_spec->tos)
5303 			*unused |= BIT(INNER_IP_TOS);
5304 
5305 		if (!usr_ip4_spec->proto)
5306 			*unused |= BIT(INNER_IP_PROTO);
5307 
5308 		if (usr_ip4_spec->l4_4_bytes)
5309 			return -EOPNOTSUPP;
5310 
5311 		if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5312 			return -EOPNOTSUPP;
5313 
5314 		break;
5315 	case SCTP_V6_FLOW:
5316 	case TCP_V6_FLOW:
5317 	case UDP_V6_FLOW:
5318 		tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5319 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5320 			BIT(INNER_IP_TOS);
5321 
5322 		/* check whether src/dst ip address used */
5323 		if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5324 		    !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5325 			*unused |= BIT(INNER_SRC_IP);
5326 
5327 		if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5328 		    !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5329 			*unused |= BIT(INNER_DST_IP);
5330 
5331 		if (!tcp_ip6_spec->psrc)
5332 			*unused |= BIT(INNER_SRC_PORT);
5333 
5334 		if (!tcp_ip6_spec->pdst)
5335 			*unused |= BIT(INNER_DST_PORT);
5336 
5337 		if (tcp_ip6_spec->tclass)
5338 			return -EOPNOTSUPP;
5339 
5340 		break;
5341 	case IPV6_USER_FLOW:
5342 		usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5343 		*unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5344 			BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5345 			BIT(INNER_DST_PORT);
5346 
5347 		/* check whether src/dst ip address used */
5348 		if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5349 		    !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5350 			*unused |= BIT(INNER_SRC_IP);
5351 
5352 		if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5353 		    !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5354 			*unused |= BIT(INNER_DST_IP);
5355 
5356 		if (!usr_ip6_spec->l4_proto)
5357 			*unused |= BIT(INNER_IP_PROTO);
5358 
5359 		if (usr_ip6_spec->tclass)
5360 			return -EOPNOTSUPP;
5361 
5362 		if (usr_ip6_spec->l4_4_bytes)
5363 			return -EOPNOTSUPP;
5364 
5365 		break;
5366 	case ETHER_FLOW:
5367 		ether_spec = &fs->h_u.ether_spec;
5368 		*unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5369 			BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5370 			BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5371 
5372 		if (is_zero_ether_addr(ether_spec->h_source))
5373 			*unused |= BIT(INNER_SRC_MAC);
5374 
5375 		if (is_zero_ether_addr(ether_spec->h_dest))
5376 			*unused |= BIT(INNER_DST_MAC);
5377 
5378 		if (!ether_spec->h_proto)
5379 			*unused |= BIT(INNER_ETH_TYPE);
5380 
5381 		break;
5382 	default:
5383 		return -EOPNOTSUPP;
5384 	}
5385 
5386 	if ((fs->flow_type & FLOW_EXT)) {
5387 		if (fs->h_ext.vlan_etype)
5388 			return -EOPNOTSUPP;
5389 		if (!fs->h_ext.vlan_tci)
5390 			*unused |= BIT(INNER_VLAN_TAG_FST);
5391 
5392 		if (fs->m_ext.vlan_tci) {
5393 			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5394 				return -EINVAL;
5395 		}
5396 	} else {
5397 		*unused |= BIT(INNER_VLAN_TAG_FST);
5398 	}
5399 
5400 	if (fs->flow_type & FLOW_MAC_EXT) {
5401 		if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5402 			return -EOPNOTSUPP;
5403 
5404 		if (is_zero_ether_addr(fs->h_ext.h_dest))
5405 			*unused |= BIT(INNER_DST_MAC);
5406 		else
5407 			*unused &= ~(BIT(INNER_DST_MAC));
5408 	}
5409 
5410 	return 0;
5411 }
5412 
5413 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5414 {
5415 	struct hclge_fd_rule *rule = NULL;
5416 	struct hlist_node *node2;
5417 
5418 	spin_lock_bh(&hdev->fd_rule_lock);
5419 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5420 		if (rule->location >= location)
5421 			break;
5422 	}
5423 
5424 	spin_unlock_bh(&hdev->fd_rule_lock);
5425 
5426 	return  rule && rule->location == location;
5427 }
5428 
5429 /* make sure being called after lock up with fd_rule_lock */
5430 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5431 				     struct hclge_fd_rule *new_rule,
5432 				     u16 location,
5433 				     bool is_add)
5434 {
5435 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
5436 	struct hlist_node *node2;
5437 
5438 	if (is_add && !new_rule)
5439 		return -EINVAL;
5440 
5441 	hlist_for_each_entry_safe(rule, node2,
5442 				  &hdev->fd_rule_list, rule_node) {
5443 		if (rule->location >= location)
5444 			break;
5445 		parent = rule;
5446 	}
5447 
5448 	if (rule && rule->location == location) {
5449 		hlist_del(&rule->rule_node);
5450 		kfree(rule);
5451 		hdev->hclge_fd_rule_num--;
5452 
5453 		if (!is_add) {
5454 			if (!hdev->hclge_fd_rule_num)
5455 				hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5456 			clear_bit(location, hdev->fd_bmap);
5457 
5458 			return 0;
5459 		}
5460 	} else if (!is_add) {
5461 		dev_err(&hdev->pdev->dev,
5462 			"delete fail, rule %u is inexistent\n",
5463 			location);
5464 		return -EINVAL;
5465 	}
5466 
5467 	INIT_HLIST_NODE(&new_rule->rule_node);
5468 
5469 	if (parent)
5470 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5471 	else
5472 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5473 
5474 	set_bit(location, hdev->fd_bmap);
5475 	hdev->hclge_fd_rule_num++;
5476 	hdev->fd_active_type = new_rule->rule_type;
5477 
5478 	return 0;
5479 }
5480 
5481 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5482 			      struct ethtool_rx_flow_spec *fs,
5483 			      struct hclge_fd_rule *rule)
5484 {
5485 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5486 
5487 	switch (flow_type) {
5488 	case SCTP_V4_FLOW:
5489 	case TCP_V4_FLOW:
5490 	case UDP_V4_FLOW:
5491 		rule->tuples.src_ip[IPV4_INDEX] =
5492 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5493 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5494 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5495 
5496 		rule->tuples.dst_ip[IPV4_INDEX] =
5497 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5498 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5499 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5500 
5501 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5502 		rule->tuples_mask.src_port =
5503 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5504 
5505 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5506 		rule->tuples_mask.dst_port =
5507 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5508 
5509 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5510 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5511 
5512 		rule->tuples.ether_proto = ETH_P_IP;
5513 		rule->tuples_mask.ether_proto = 0xFFFF;
5514 
5515 		break;
5516 	case IP_USER_FLOW:
5517 		rule->tuples.src_ip[IPV4_INDEX] =
5518 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5519 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5520 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5521 
5522 		rule->tuples.dst_ip[IPV4_INDEX] =
5523 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5524 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5525 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5526 
5527 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5528 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5529 
5530 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5531 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5532 
5533 		rule->tuples.ether_proto = ETH_P_IP;
5534 		rule->tuples_mask.ether_proto = 0xFFFF;
5535 
5536 		break;
5537 	case SCTP_V6_FLOW:
5538 	case TCP_V6_FLOW:
5539 	case UDP_V6_FLOW:
5540 		be32_to_cpu_array(rule->tuples.src_ip,
5541 				  fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5542 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5543 				  fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5544 
5545 		be32_to_cpu_array(rule->tuples.dst_ip,
5546 				  fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5547 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5548 				  fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5549 
5550 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5551 		rule->tuples_mask.src_port =
5552 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5553 
5554 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5555 		rule->tuples_mask.dst_port =
5556 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5557 
5558 		rule->tuples.ether_proto = ETH_P_IPV6;
5559 		rule->tuples_mask.ether_proto = 0xFFFF;
5560 
5561 		break;
5562 	case IPV6_USER_FLOW:
5563 		be32_to_cpu_array(rule->tuples.src_ip,
5564 				  fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5565 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5566 				  fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5567 
5568 		be32_to_cpu_array(rule->tuples.dst_ip,
5569 				  fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5570 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5571 				  fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5572 
5573 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5574 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5575 
5576 		rule->tuples.ether_proto = ETH_P_IPV6;
5577 		rule->tuples_mask.ether_proto = 0xFFFF;
5578 
5579 		break;
5580 	case ETHER_FLOW:
5581 		ether_addr_copy(rule->tuples.src_mac,
5582 				fs->h_u.ether_spec.h_source);
5583 		ether_addr_copy(rule->tuples_mask.src_mac,
5584 				fs->m_u.ether_spec.h_source);
5585 
5586 		ether_addr_copy(rule->tuples.dst_mac,
5587 				fs->h_u.ether_spec.h_dest);
5588 		ether_addr_copy(rule->tuples_mask.dst_mac,
5589 				fs->m_u.ether_spec.h_dest);
5590 
5591 		rule->tuples.ether_proto =
5592 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
5593 		rule->tuples_mask.ether_proto =
5594 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
5595 
5596 		break;
5597 	default:
5598 		return -EOPNOTSUPP;
5599 	}
5600 
5601 	switch (flow_type) {
5602 	case SCTP_V4_FLOW:
5603 	case SCTP_V6_FLOW:
5604 		rule->tuples.ip_proto = IPPROTO_SCTP;
5605 		rule->tuples_mask.ip_proto = 0xFF;
5606 		break;
5607 	case TCP_V4_FLOW:
5608 	case TCP_V6_FLOW:
5609 		rule->tuples.ip_proto = IPPROTO_TCP;
5610 		rule->tuples_mask.ip_proto = 0xFF;
5611 		break;
5612 	case UDP_V4_FLOW:
5613 	case UDP_V6_FLOW:
5614 		rule->tuples.ip_proto = IPPROTO_UDP;
5615 		rule->tuples_mask.ip_proto = 0xFF;
5616 		break;
5617 	default:
5618 		break;
5619 	}
5620 
5621 	if ((fs->flow_type & FLOW_EXT)) {
5622 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5623 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5624 	}
5625 
5626 	if (fs->flow_type & FLOW_MAC_EXT) {
5627 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5628 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5629 	}
5630 
5631 	return 0;
5632 }
5633 
5634 /* make sure being called after lock up with fd_rule_lock */
5635 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5636 				struct hclge_fd_rule *rule)
5637 {
5638 	int ret;
5639 
5640 	if (!rule) {
5641 		dev_err(&hdev->pdev->dev,
5642 			"The flow director rule is NULL\n");
5643 		return -EINVAL;
5644 	}
5645 
5646 	/* it will never fail here, so needn't to check return value */
5647 	hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5648 
5649 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5650 	if (ret)
5651 		goto clear_rule;
5652 
5653 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5654 	if (ret)
5655 		goto clear_rule;
5656 
5657 	return 0;
5658 
5659 clear_rule:
5660 	hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5661 	return ret;
5662 }
5663 
5664 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5665 			      struct ethtool_rxnfc *cmd)
5666 {
5667 	struct hclge_vport *vport = hclge_get_vport(handle);
5668 	struct hclge_dev *hdev = vport->back;
5669 	u16 dst_vport_id = 0, q_index = 0;
5670 	struct ethtool_rx_flow_spec *fs;
5671 	struct hclge_fd_rule *rule;
5672 	u32 unused = 0;
5673 	u8 action;
5674 	int ret;
5675 
5676 	if (!hnae3_dev_fd_supported(hdev))
5677 		return -EOPNOTSUPP;
5678 
5679 	if (!hdev->fd_en) {
5680 		dev_warn(&hdev->pdev->dev,
5681 			 "Please enable flow director first\n");
5682 		return -EOPNOTSUPP;
5683 	}
5684 
5685 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5686 
5687 	ret = hclge_fd_check_spec(hdev, fs, &unused);
5688 	if (ret) {
5689 		dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5690 		return ret;
5691 	}
5692 
5693 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5694 		action = HCLGE_FD_ACTION_DROP_PACKET;
5695 	} else {
5696 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5697 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5698 		u16 tqps;
5699 
5700 		if (vf > hdev->num_req_vfs) {
5701 			dev_err(&hdev->pdev->dev,
5702 				"Error: vf id (%u) > max vf num (%u)\n",
5703 				vf, hdev->num_req_vfs);
5704 			return -EINVAL;
5705 		}
5706 
5707 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5708 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5709 
5710 		if (ring >= tqps) {
5711 			dev_err(&hdev->pdev->dev,
5712 				"Error: queue id (%u) > max tqp num (%u)\n",
5713 				ring, tqps - 1);
5714 			return -EINVAL;
5715 		}
5716 
5717 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5718 		q_index = ring;
5719 	}
5720 
5721 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5722 	if (!rule)
5723 		return -ENOMEM;
5724 
5725 	ret = hclge_fd_get_tuple(hdev, fs, rule);
5726 	if (ret) {
5727 		kfree(rule);
5728 		return ret;
5729 	}
5730 
5731 	rule->flow_type = fs->flow_type;
5732 
5733 	rule->location = fs->location;
5734 	rule->unused_tuple = unused;
5735 	rule->vf_id = dst_vport_id;
5736 	rule->queue_id = q_index;
5737 	rule->action = action;
5738 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
5739 
5740 	/* to avoid rule conflict, when user configure rule by ethtool,
5741 	 * we need to clear all arfs rules
5742 	 */
5743 	hclge_clear_arfs_rules(handle);
5744 
5745 	spin_lock_bh(&hdev->fd_rule_lock);
5746 	ret = hclge_fd_config_rule(hdev, rule);
5747 
5748 	spin_unlock_bh(&hdev->fd_rule_lock);
5749 
5750 	return ret;
5751 }
5752 
5753 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5754 			      struct ethtool_rxnfc *cmd)
5755 {
5756 	struct hclge_vport *vport = hclge_get_vport(handle);
5757 	struct hclge_dev *hdev = vport->back;
5758 	struct ethtool_rx_flow_spec *fs;
5759 	int ret;
5760 
5761 	if (!hnae3_dev_fd_supported(hdev))
5762 		return -EOPNOTSUPP;
5763 
5764 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5765 
5766 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5767 		return -EINVAL;
5768 
5769 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
5770 		dev_err(&hdev->pdev->dev,
5771 			"Delete fail, rule %u is inexistent\n", fs->location);
5772 		return -ENOENT;
5773 	}
5774 
5775 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5776 				   NULL, false);
5777 	if (ret)
5778 		return ret;
5779 
5780 	spin_lock_bh(&hdev->fd_rule_lock);
5781 	ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5782 
5783 	spin_unlock_bh(&hdev->fd_rule_lock);
5784 
5785 	return ret;
5786 }
5787 
5788 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5789 				     bool clear_list)
5790 {
5791 	struct hclge_vport *vport = hclge_get_vport(handle);
5792 	struct hclge_dev *hdev = vport->back;
5793 	struct hclge_fd_rule *rule;
5794 	struct hlist_node *node;
5795 	u16 location;
5796 
5797 	if (!hnae3_dev_fd_supported(hdev))
5798 		return;
5799 
5800 	spin_lock_bh(&hdev->fd_rule_lock);
5801 	for_each_set_bit(location, hdev->fd_bmap,
5802 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5803 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5804 				     NULL, false);
5805 
5806 	if (clear_list) {
5807 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5808 					  rule_node) {
5809 			hlist_del(&rule->rule_node);
5810 			kfree(rule);
5811 		}
5812 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5813 		hdev->hclge_fd_rule_num = 0;
5814 		bitmap_zero(hdev->fd_bmap,
5815 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5816 	}
5817 
5818 	spin_unlock_bh(&hdev->fd_rule_lock);
5819 }
5820 
5821 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5822 {
5823 	struct hclge_vport *vport = hclge_get_vport(handle);
5824 	struct hclge_dev *hdev = vport->back;
5825 	struct hclge_fd_rule *rule;
5826 	struct hlist_node *node;
5827 	int ret;
5828 
5829 	/* Return ok here, because reset error handling will check this
5830 	 * return value. If error is returned here, the reset process will
5831 	 * fail.
5832 	 */
5833 	if (!hnae3_dev_fd_supported(hdev))
5834 		return 0;
5835 
5836 	/* if fd is disabled, should not restore it when reset */
5837 	if (!hdev->fd_en)
5838 		return 0;
5839 
5840 	spin_lock_bh(&hdev->fd_rule_lock);
5841 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5842 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5843 		if (!ret)
5844 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5845 
5846 		if (ret) {
5847 			dev_warn(&hdev->pdev->dev,
5848 				 "Restore rule %u failed, remove it\n",
5849 				 rule->location);
5850 			clear_bit(rule->location, hdev->fd_bmap);
5851 			hlist_del(&rule->rule_node);
5852 			kfree(rule);
5853 			hdev->hclge_fd_rule_num--;
5854 		}
5855 	}
5856 
5857 	if (hdev->hclge_fd_rule_num)
5858 		hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5859 
5860 	spin_unlock_bh(&hdev->fd_rule_lock);
5861 
5862 	return 0;
5863 }
5864 
5865 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5866 				 struct ethtool_rxnfc *cmd)
5867 {
5868 	struct hclge_vport *vport = hclge_get_vport(handle);
5869 	struct hclge_dev *hdev = vport->back;
5870 
5871 	if (!hnae3_dev_fd_supported(hdev))
5872 		return -EOPNOTSUPP;
5873 
5874 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
5875 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5876 
5877 	return 0;
5878 }
5879 
5880 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5881 				  struct ethtool_rxnfc *cmd)
5882 {
5883 	struct hclge_vport *vport = hclge_get_vport(handle);
5884 	struct hclge_fd_rule *rule = NULL;
5885 	struct hclge_dev *hdev = vport->back;
5886 	struct ethtool_rx_flow_spec *fs;
5887 	struct hlist_node *node2;
5888 
5889 	if (!hnae3_dev_fd_supported(hdev))
5890 		return -EOPNOTSUPP;
5891 
5892 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5893 
5894 	spin_lock_bh(&hdev->fd_rule_lock);
5895 
5896 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5897 		if (rule->location >= fs->location)
5898 			break;
5899 	}
5900 
5901 	if (!rule || fs->location != rule->location) {
5902 		spin_unlock_bh(&hdev->fd_rule_lock);
5903 
5904 		return -ENOENT;
5905 	}
5906 
5907 	fs->flow_type = rule->flow_type;
5908 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5909 	case SCTP_V4_FLOW:
5910 	case TCP_V4_FLOW:
5911 	case UDP_V4_FLOW:
5912 		fs->h_u.tcp_ip4_spec.ip4src =
5913 				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5914 		fs->m_u.tcp_ip4_spec.ip4src =
5915 			rule->unused_tuple & BIT(INNER_SRC_IP) ?
5916 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5917 
5918 		fs->h_u.tcp_ip4_spec.ip4dst =
5919 				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5920 		fs->m_u.tcp_ip4_spec.ip4dst =
5921 			rule->unused_tuple & BIT(INNER_DST_IP) ?
5922 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5923 
5924 		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5925 		fs->m_u.tcp_ip4_spec.psrc =
5926 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5927 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5928 
5929 		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5930 		fs->m_u.tcp_ip4_spec.pdst =
5931 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5932 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5933 
5934 		fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5935 		fs->m_u.tcp_ip4_spec.tos =
5936 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5937 				0 : rule->tuples_mask.ip_tos;
5938 
5939 		break;
5940 	case IP_USER_FLOW:
5941 		fs->h_u.usr_ip4_spec.ip4src =
5942 				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5943 		fs->m_u.tcp_ip4_spec.ip4src =
5944 			rule->unused_tuple & BIT(INNER_SRC_IP) ?
5945 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5946 
5947 		fs->h_u.usr_ip4_spec.ip4dst =
5948 				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5949 		fs->m_u.usr_ip4_spec.ip4dst =
5950 			rule->unused_tuple & BIT(INNER_DST_IP) ?
5951 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5952 
5953 		fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5954 		fs->m_u.usr_ip4_spec.tos =
5955 				rule->unused_tuple & BIT(INNER_IP_TOS) ?
5956 				0 : rule->tuples_mask.ip_tos;
5957 
5958 		fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5959 		fs->m_u.usr_ip4_spec.proto =
5960 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5961 				0 : rule->tuples_mask.ip_proto;
5962 
5963 		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5964 
5965 		break;
5966 	case SCTP_V6_FLOW:
5967 	case TCP_V6_FLOW:
5968 	case UDP_V6_FLOW:
5969 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5970 				  rule->tuples.src_ip, IPV6_SIZE);
5971 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
5972 			memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5973 			       sizeof(int) * IPV6_SIZE);
5974 		else
5975 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5976 					  rule->tuples_mask.src_ip, IPV6_SIZE);
5977 
5978 		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5979 				  rule->tuples.dst_ip, IPV6_SIZE);
5980 		if (rule->unused_tuple & BIT(INNER_DST_IP))
5981 			memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5982 			       sizeof(int) * IPV6_SIZE);
5983 		else
5984 			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5985 					  rule->tuples_mask.dst_ip, IPV6_SIZE);
5986 
5987 		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5988 		fs->m_u.tcp_ip6_spec.psrc =
5989 				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5990 				0 : cpu_to_be16(rule->tuples_mask.src_port);
5991 
5992 		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5993 		fs->m_u.tcp_ip6_spec.pdst =
5994 				rule->unused_tuple & BIT(INNER_DST_PORT) ?
5995 				0 : cpu_to_be16(rule->tuples_mask.dst_port);
5996 
5997 		break;
5998 	case IPV6_USER_FLOW:
5999 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
6000 				  rule->tuples.src_ip, IPV6_SIZE);
6001 		if (rule->unused_tuple & BIT(INNER_SRC_IP))
6002 			memset(fs->m_u.usr_ip6_spec.ip6src, 0,
6003 			       sizeof(int) * IPV6_SIZE);
6004 		else
6005 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
6006 					  rule->tuples_mask.src_ip, IPV6_SIZE);
6007 
6008 		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
6009 				  rule->tuples.dst_ip, IPV6_SIZE);
6010 		if (rule->unused_tuple & BIT(INNER_DST_IP))
6011 			memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
6012 			       sizeof(int) * IPV6_SIZE);
6013 		else
6014 			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
6015 					  rule->tuples_mask.dst_ip, IPV6_SIZE);
6016 
6017 		fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
6018 		fs->m_u.usr_ip6_spec.l4_proto =
6019 				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6020 				0 : rule->tuples_mask.ip_proto;
6021 
6022 		break;
6023 	case ETHER_FLOW:
6024 		ether_addr_copy(fs->h_u.ether_spec.h_source,
6025 				rule->tuples.src_mac);
6026 		if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6027 			eth_zero_addr(fs->m_u.ether_spec.h_source);
6028 		else
6029 			ether_addr_copy(fs->m_u.ether_spec.h_source,
6030 					rule->tuples_mask.src_mac);
6031 
6032 		ether_addr_copy(fs->h_u.ether_spec.h_dest,
6033 				rule->tuples.dst_mac);
6034 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6035 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6036 		else
6037 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6038 					rule->tuples_mask.dst_mac);
6039 
6040 		fs->h_u.ether_spec.h_proto =
6041 				cpu_to_be16(rule->tuples.ether_proto);
6042 		fs->m_u.ether_spec.h_proto =
6043 				rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6044 				0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6045 
6046 		break;
6047 	default:
6048 		spin_unlock_bh(&hdev->fd_rule_lock);
6049 		return -EOPNOTSUPP;
6050 	}
6051 
6052 	if (fs->flow_type & FLOW_EXT) {
6053 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6054 		fs->m_ext.vlan_tci =
6055 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6056 				cpu_to_be16(VLAN_VID_MASK) :
6057 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
6058 	}
6059 
6060 	if (fs->flow_type & FLOW_MAC_EXT) {
6061 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6062 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6063 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6064 		else
6065 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6066 					rule->tuples_mask.dst_mac);
6067 	}
6068 
6069 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6070 		fs->ring_cookie = RX_CLS_FLOW_DISC;
6071 	} else {
6072 		u64 vf_id;
6073 
6074 		fs->ring_cookie = rule->queue_id;
6075 		vf_id = rule->vf_id;
6076 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6077 		fs->ring_cookie |= vf_id;
6078 	}
6079 
6080 	spin_unlock_bh(&hdev->fd_rule_lock);
6081 
6082 	return 0;
6083 }
6084 
6085 static int hclge_get_all_rules(struct hnae3_handle *handle,
6086 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
6087 {
6088 	struct hclge_vport *vport = hclge_get_vport(handle);
6089 	struct hclge_dev *hdev = vport->back;
6090 	struct hclge_fd_rule *rule;
6091 	struct hlist_node *node2;
6092 	int cnt = 0;
6093 
6094 	if (!hnae3_dev_fd_supported(hdev))
6095 		return -EOPNOTSUPP;
6096 
6097 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6098 
6099 	spin_lock_bh(&hdev->fd_rule_lock);
6100 	hlist_for_each_entry_safe(rule, node2,
6101 				  &hdev->fd_rule_list, rule_node) {
6102 		if (cnt == cmd->rule_cnt) {
6103 			spin_unlock_bh(&hdev->fd_rule_lock);
6104 			return -EMSGSIZE;
6105 		}
6106 
6107 		rule_locs[cnt] = rule->location;
6108 		cnt++;
6109 	}
6110 
6111 	spin_unlock_bh(&hdev->fd_rule_lock);
6112 
6113 	cmd->rule_cnt = cnt;
6114 
6115 	return 0;
6116 }
6117 
6118 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6119 				     struct hclge_fd_rule_tuples *tuples)
6120 {
6121 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6122 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6123 
6124 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6125 	tuples->ip_proto = fkeys->basic.ip_proto;
6126 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6127 
6128 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6129 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6130 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6131 	} else {
6132 		int i;
6133 
6134 		for (i = 0; i < IPV6_SIZE; i++) {
6135 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6136 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6137 		}
6138 	}
6139 }
6140 
6141 /* traverse all rules, check whether an existed rule has the same tuples */
6142 static struct hclge_fd_rule *
6143 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6144 			  const struct hclge_fd_rule_tuples *tuples)
6145 {
6146 	struct hclge_fd_rule *rule = NULL;
6147 	struct hlist_node *node;
6148 
6149 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6150 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6151 			return rule;
6152 	}
6153 
6154 	return NULL;
6155 }
6156 
6157 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6158 				     struct hclge_fd_rule *rule)
6159 {
6160 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6161 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6162 			     BIT(INNER_SRC_PORT);
6163 	rule->action = 0;
6164 	rule->vf_id = 0;
6165 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6166 	if (tuples->ether_proto == ETH_P_IP) {
6167 		if (tuples->ip_proto == IPPROTO_TCP)
6168 			rule->flow_type = TCP_V4_FLOW;
6169 		else
6170 			rule->flow_type = UDP_V4_FLOW;
6171 	} else {
6172 		if (tuples->ip_proto == IPPROTO_TCP)
6173 			rule->flow_type = TCP_V6_FLOW;
6174 		else
6175 			rule->flow_type = UDP_V6_FLOW;
6176 	}
6177 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6178 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6179 }
6180 
6181 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6182 				      u16 flow_id, struct flow_keys *fkeys)
6183 {
6184 	struct hclge_vport *vport = hclge_get_vport(handle);
6185 	struct hclge_fd_rule_tuples new_tuples;
6186 	struct hclge_dev *hdev = vport->back;
6187 	struct hclge_fd_rule *rule;
6188 	u16 tmp_queue_id;
6189 	u16 bit_id;
6190 	int ret;
6191 
6192 	if (!hnae3_dev_fd_supported(hdev))
6193 		return -EOPNOTSUPP;
6194 
6195 	memset(&new_tuples, 0, sizeof(new_tuples));
6196 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6197 
6198 	spin_lock_bh(&hdev->fd_rule_lock);
6199 
6200 	/* when there is already fd rule existed add by user,
6201 	 * arfs should not work
6202 	 */
6203 	if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6204 		spin_unlock_bh(&hdev->fd_rule_lock);
6205 
6206 		return -EOPNOTSUPP;
6207 	}
6208 
6209 	/* check is there flow director filter existed for this flow,
6210 	 * if not, create a new filter for it;
6211 	 * if filter exist with different queue id, modify the filter;
6212 	 * if filter exist with same queue id, do nothing
6213 	 */
6214 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6215 	if (!rule) {
6216 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6217 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6218 			spin_unlock_bh(&hdev->fd_rule_lock);
6219 
6220 			return -ENOSPC;
6221 		}
6222 
6223 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6224 		if (!rule) {
6225 			spin_unlock_bh(&hdev->fd_rule_lock);
6226 
6227 			return -ENOMEM;
6228 		}
6229 
6230 		set_bit(bit_id, hdev->fd_bmap);
6231 		rule->location = bit_id;
6232 		rule->flow_id = flow_id;
6233 		rule->queue_id = queue_id;
6234 		hclge_fd_build_arfs_rule(&new_tuples, rule);
6235 		ret = hclge_fd_config_rule(hdev, rule);
6236 
6237 		spin_unlock_bh(&hdev->fd_rule_lock);
6238 
6239 		if (ret)
6240 			return ret;
6241 
6242 		return rule->location;
6243 	}
6244 
6245 	spin_unlock_bh(&hdev->fd_rule_lock);
6246 
6247 	if (rule->queue_id == queue_id)
6248 		return rule->location;
6249 
6250 	tmp_queue_id = rule->queue_id;
6251 	rule->queue_id = queue_id;
6252 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6253 	if (ret) {
6254 		rule->queue_id = tmp_queue_id;
6255 		return ret;
6256 	}
6257 
6258 	return rule->location;
6259 }
6260 
6261 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6262 {
6263 #ifdef CONFIG_RFS_ACCEL
6264 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6265 	struct hclge_fd_rule *rule;
6266 	struct hlist_node *node;
6267 	HLIST_HEAD(del_list);
6268 
6269 	spin_lock_bh(&hdev->fd_rule_lock);
6270 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6271 		spin_unlock_bh(&hdev->fd_rule_lock);
6272 		return;
6273 	}
6274 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6275 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6276 					rule->flow_id, rule->location)) {
6277 			hlist_del_init(&rule->rule_node);
6278 			hlist_add_head(&rule->rule_node, &del_list);
6279 			hdev->hclge_fd_rule_num--;
6280 			clear_bit(rule->location, hdev->fd_bmap);
6281 		}
6282 	}
6283 	spin_unlock_bh(&hdev->fd_rule_lock);
6284 
6285 	hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6286 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6287 				     rule->location, NULL, false);
6288 		kfree(rule);
6289 	}
6290 #endif
6291 }
6292 
6293 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6294 {
6295 #ifdef CONFIG_RFS_ACCEL
6296 	struct hclge_vport *vport = hclge_get_vport(handle);
6297 	struct hclge_dev *hdev = vport->back;
6298 
6299 	if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6300 		hclge_del_all_fd_entries(handle, true);
6301 #endif
6302 }
6303 
6304 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6305 {
6306 	struct hclge_vport *vport = hclge_get_vport(handle);
6307 	struct hclge_dev *hdev = vport->back;
6308 
6309 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6310 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6311 }
6312 
6313 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6314 {
6315 	struct hclge_vport *vport = hclge_get_vport(handle);
6316 	struct hclge_dev *hdev = vport->back;
6317 
6318 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6319 }
6320 
6321 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6322 {
6323 	struct hclge_vport *vport = hclge_get_vport(handle);
6324 	struct hclge_dev *hdev = vport->back;
6325 
6326 	return hdev->rst_stats.hw_reset_done_cnt;
6327 }
6328 
6329 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6330 {
6331 	struct hclge_vport *vport = hclge_get_vport(handle);
6332 	struct hclge_dev *hdev = vport->back;
6333 	bool clear;
6334 
6335 	hdev->fd_en = enable;
6336 	clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6337 	if (!enable)
6338 		hclge_del_all_fd_entries(handle, clear);
6339 	else
6340 		hclge_restore_fd_entries(handle);
6341 }
6342 
6343 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6344 {
6345 	struct hclge_desc desc;
6346 	struct hclge_config_mac_mode_cmd *req =
6347 		(struct hclge_config_mac_mode_cmd *)desc.data;
6348 	u32 loop_en = 0;
6349 	int ret;
6350 
6351 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6352 
6353 	if (enable) {
6354 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6355 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6356 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6357 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6358 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6359 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6360 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6361 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6362 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6363 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6364 	}
6365 
6366 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6367 
6368 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6369 	if (ret)
6370 		dev_err(&hdev->pdev->dev,
6371 			"mac enable fail, ret =%d.\n", ret);
6372 }
6373 
6374 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6375 				     u8 switch_param, u8 param_mask)
6376 {
6377 	struct hclge_mac_vlan_switch_cmd *req;
6378 	struct hclge_desc desc;
6379 	u32 func_id;
6380 	int ret;
6381 
6382 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6383 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6384 
6385 	/* read current config parameter */
6386 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6387 				   true);
6388 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6389 	req->func_id = cpu_to_le32(func_id);
6390 
6391 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6392 	if (ret) {
6393 		dev_err(&hdev->pdev->dev,
6394 			"read mac vlan switch parameter fail, ret = %d\n", ret);
6395 		return ret;
6396 	}
6397 
6398 	/* modify and write new config parameter */
6399 	hclge_cmd_reuse_desc(&desc, false);
6400 	req->switch_param = (req->switch_param & param_mask) | switch_param;
6401 	req->param_mask = param_mask;
6402 
6403 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6404 	if (ret)
6405 		dev_err(&hdev->pdev->dev,
6406 			"set mac vlan switch parameter fail, ret = %d\n", ret);
6407 	return ret;
6408 }
6409 
6410 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6411 				       int link_ret)
6412 {
6413 #define HCLGE_PHY_LINK_STATUS_NUM  200
6414 
6415 	struct phy_device *phydev = hdev->hw.mac.phydev;
6416 	int i = 0;
6417 	int ret;
6418 
6419 	do {
6420 		ret = phy_read_status(phydev);
6421 		if (ret) {
6422 			dev_err(&hdev->pdev->dev,
6423 				"phy update link status fail, ret = %d\n", ret);
6424 			return;
6425 		}
6426 
6427 		if (phydev->link == link_ret)
6428 			break;
6429 
6430 		msleep(HCLGE_LINK_STATUS_MS);
6431 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6432 }
6433 
6434 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6435 {
6436 #define HCLGE_MAC_LINK_STATUS_NUM  100
6437 
6438 	int i = 0;
6439 	int ret;
6440 
6441 	do {
6442 		ret = hclge_get_mac_link_status(hdev);
6443 		if (ret < 0)
6444 			return ret;
6445 		else if (ret == link_ret)
6446 			return 0;
6447 
6448 		msleep(HCLGE_LINK_STATUS_MS);
6449 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6450 	return -EBUSY;
6451 }
6452 
6453 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6454 					  bool is_phy)
6455 {
6456 #define HCLGE_LINK_STATUS_DOWN 0
6457 #define HCLGE_LINK_STATUS_UP   1
6458 
6459 	int link_ret;
6460 
6461 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6462 
6463 	if (is_phy)
6464 		hclge_phy_link_status_wait(hdev, link_ret);
6465 
6466 	return hclge_mac_link_status_wait(hdev, link_ret);
6467 }
6468 
6469 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6470 {
6471 	struct hclge_config_mac_mode_cmd *req;
6472 	struct hclge_desc desc;
6473 	u32 loop_en;
6474 	int ret;
6475 
6476 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6477 	/* 1 Read out the MAC mode config at first */
6478 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6479 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6480 	if (ret) {
6481 		dev_err(&hdev->pdev->dev,
6482 			"mac loopback get fail, ret =%d.\n", ret);
6483 		return ret;
6484 	}
6485 
6486 	/* 2 Then setup the loopback flag */
6487 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6488 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6489 	hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6490 	hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6491 
6492 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6493 
6494 	/* 3 Config mac work mode with loopback flag
6495 	 * and its original configure parameters
6496 	 */
6497 	hclge_cmd_reuse_desc(&desc, false);
6498 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6499 	if (ret)
6500 		dev_err(&hdev->pdev->dev,
6501 			"mac loopback set fail, ret =%d.\n", ret);
6502 	return ret;
6503 }
6504 
6505 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6506 				     enum hnae3_loop loop_mode)
6507 {
6508 #define HCLGE_SERDES_RETRY_MS	10
6509 #define HCLGE_SERDES_RETRY_NUM	100
6510 
6511 	struct hclge_serdes_lb_cmd *req;
6512 	struct hclge_desc desc;
6513 	int ret, i = 0;
6514 	u8 loop_mode_b;
6515 
6516 	req = (struct hclge_serdes_lb_cmd *)desc.data;
6517 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6518 
6519 	switch (loop_mode) {
6520 	case HNAE3_LOOP_SERIAL_SERDES:
6521 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6522 		break;
6523 	case HNAE3_LOOP_PARALLEL_SERDES:
6524 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6525 		break;
6526 	default:
6527 		dev_err(&hdev->pdev->dev,
6528 			"unsupported serdes loopback mode %d\n", loop_mode);
6529 		return -ENOTSUPP;
6530 	}
6531 
6532 	if (en) {
6533 		req->enable = loop_mode_b;
6534 		req->mask = loop_mode_b;
6535 	} else {
6536 		req->mask = loop_mode_b;
6537 	}
6538 
6539 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6540 	if (ret) {
6541 		dev_err(&hdev->pdev->dev,
6542 			"serdes loopback set fail, ret = %d\n", ret);
6543 		return ret;
6544 	}
6545 
6546 	do {
6547 		msleep(HCLGE_SERDES_RETRY_MS);
6548 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6549 					   true);
6550 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6551 		if (ret) {
6552 			dev_err(&hdev->pdev->dev,
6553 				"serdes loopback get, ret = %d\n", ret);
6554 			return ret;
6555 		}
6556 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
6557 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6558 
6559 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6560 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6561 		return -EBUSY;
6562 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6563 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6564 		return -EIO;
6565 	}
6566 	return ret;
6567 }
6568 
6569 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6570 				     enum hnae3_loop loop_mode)
6571 {
6572 	int ret;
6573 
6574 	ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6575 	if (ret)
6576 		return ret;
6577 
6578 	hclge_cfg_mac_mode(hdev, en);
6579 
6580 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6581 	if (ret)
6582 		dev_err(&hdev->pdev->dev,
6583 			"serdes loopback config mac mode timeout\n");
6584 
6585 	return ret;
6586 }
6587 
6588 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6589 				     struct phy_device *phydev)
6590 {
6591 	int ret;
6592 
6593 	if (!phydev->suspended) {
6594 		ret = phy_suspend(phydev);
6595 		if (ret)
6596 			return ret;
6597 	}
6598 
6599 	ret = phy_resume(phydev);
6600 	if (ret)
6601 		return ret;
6602 
6603 	return phy_loopback(phydev, true);
6604 }
6605 
6606 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6607 				      struct phy_device *phydev)
6608 {
6609 	int ret;
6610 
6611 	ret = phy_loopback(phydev, false);
6612 	if (ret)
6613 		return ret;
6614 
6615 	return phy_suspend(phydev);
6616 }
6617 
6618 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6619 {
6620 	struct phy_device *phydev = hdev->hw.mac.phydev;
6621 	int ret;
6622 
6623 	if (!phydev)
6624 		return -ENOTSUPP;
6625 
6626 	if (en)
6627 		ret = hclge_enable_phy_loopback(hdev, phydev);
6628 	else
6629 		ret = hclge_disable_phy_loopback(hdev, phydev);
6630 	if (ret) {
6631 		dev_err(&hdev->pdev->dev,
6632 			"set phy loopback fail, ret = %d\n", ret);
6633 		return ret;
6634 	}
6635 
6636 	hclge_cfg_mac_mode(hdev, en);
6637 
6638 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6639 	if (ret)
6640 		dev_err(&hdev->pdev->dev,
6641 			"phy loopback config mac mode timeout\n");
6642 
6643 	return ret;
6644 }
6645 
6646 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6647 			    int stream_id, bool enable)
6648 {
6649 	struct hclge_desc desc;
6650 	struct hclge_cfg_com_tqp_queue_cmd *req =
6651 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6652 	int ret;
6653 
6654 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6655 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6656 	req->stream_id = cpu_to_le16(stream_id);
6657 	if (enable)
6658 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6659 
6660 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6661 	if (ret)
6662 		dev_err(&hdev->pdev->dev,
6663 			"Tqp enable fail, status =%d.\n", ret);
6664 	return ret;
6665 }
6666 
6667 static int hclge_set_loopback(struct hnae3_handle *handle,
6668 			      enum hnae3_loop loop_mode, bool en)
6669 {
6670 	struct hclge_vport *vport = hclge_get_vport(handle);
6671 	struct hnae3_knic_private_info *kinfo;
6672 	struct hclge_dev *hdev = vport->back;
6673 	int i, ret;
6674 
6675 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6676 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6677 	 * the same, the packets are looped back in the SSU. If SSU loopback
6678 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6679 	 */
6680 	if (hdev->pdev->revision >= 0x21) {
6681 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6682 
6683 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6684 						HCLGE_SWITCH_ALW_LPBK_MASK);
6685 		if (ret)
6686 			return ret;
6687 	}
6688 
6689 	switch (loop_mode) {
6690 	case HNAE3_LOOP_APP:
6691 		ret = hclge_set_app_loopback(hdev, en);
6692 		break;
6693 	case HNAE3_LOOP_SERIAL_SERDES:
6694 	case HNAE3_LOOP_PARALLEL_SERDES:
6695 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6696 		break;
6697 	case HNAE3_LOOP_PHY:
6698 		ret = hclge_set_phy_loopback(hdev, en);
6699 		break;
6700 	default:
6701 		ret = -ENOTSUPP;
6702 		dev_err(&hdev->pdev->dev,
6703 			"loop_mode %d is not supported\n", loop_mode);
6704 		break;
6705 	}
6706 
6707 	if (ret)
6708 		return ret;
6709 
6710 	kinfo = &vport->nic.kinfo;
6711 	for (i = 0; i < kinfo->num_tqps; i++) {
6712 		ret = hclge_tqp_enable(hdev, i, 0, en);
6713 		if (ret)
6714 			return ret;
6715 	}
6716 
6717 	return 0;
6718 }
6719 
6720 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6721 {
6722 	int ret;
6723 
6724 	ret = hclge_set_app_loopback(hdev, false);
6725 	if (ret)
6726 		return ret;
6727 
6728 	ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6729 	if (ret)
6730 		return ret;
6731 
6732 	return hclge_cfg_serdes_loopback(hdev, false,
6733 					 HNAE3_LOOP_PARALLEL_SERDES);
6734 }
6735 
6736 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6737 {
6738 	struct hclge_vport *vport = hclge_get_vport(handle);
6739 	struct hnae3_knic_private_info *kinfo;
6740 	struct hnae3_queue *queue;
6741 	struct hclge_tqp *tqp;
6742 	int i;
6743 
6744 	kinfo = &vport->nic.kinfo;
6745 	for (i = 0; i < kinfo->num_tqps; i++) {
6746 		queue = handle->kinfo.tqp[i];
6747 		tqp = container_of(queue, struct hclge_tqp, q);
6748 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6749 	}
6750 }
6751 
6752 static void hclge_flush_link_update(struct hclge_dev *hdev)
6753 {
6754 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
6755 
6756 	unsigned long last = hdev->serv_processed_cnt;
6757 	int i = 0;
6758 
6759 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6760 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6761 	       last == hdev->serv_processed_cnt)
6762 		usleep_range(1, 1);
6763 }
6764 
6765 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6766 {
6767 	struct hclge_vport *vport = hclge_get_vport(handle);
6768 	struct hclge_dev *hdev = vport->back;
6769 
6770 	if (enable) {
6771 		hclge_task_schedule(hdev, 0);
6772 	} else {
6773 		/* Set the DOWN flag here to disable link updating */
6774 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
6775 
6776 		/* flush memory to make sure DOWN is seen by service task */
6777 		smp_mb__before_atomic();
6778 		hclge_flush_link_update(hdev);
6779 	}
6780 }
6781 
6782 static int hclge_ae_start(struct hnae3_handle *handle)
6783 {
6784 	struct hclge_vport *vport = hclge_get_vport(handle);
6785 	struct hclge_dev *hdev = vport->back;
6786 
6787 	/* mac enable */
6788 	hclge_cfg_mac_mode(hdev, true);
6789 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6790 	hdev->hw.mac.link = 0;
6791 
6792 	/* reset tqp stats */
6793 	hclge_reset_tqp_stats(handle);
6794 
6795 	hclge_mac_start_phy(hdev);
6796 
6797 	return 0;
6798 }
6799 
6800 static void hclge_ae_stop(struct hnae3_handle *handle)
6801 {
6802 	struct hclge_vport *vport = hclge_get_vport(handle);
6803 	struct hclge_dev *hdev = vport->back;
6804 	int i;
6805 
6806 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
6807 
6808 	hclge_clear_arfs_rules(handle);
6809 
6810 	/* If it is not PF reset, the firmware will disable the MAC,
6811 	 * so it only need to stop phy here.
6812 	 */
6813 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6814 	    hdev->reset_type != HNAE3_FUNC_RESET) {
6815 		hclge_mac_stop_phy(hdev);
6816 		hclge_update_link_status(hdev);
6817 		return;
6818 	}
6819 
6820 	for (i = 0; i < handle->kinfo.num_tqps; i++)
6821 		hclge_reset_tqp(handle, i);
6822 
6823 	hclge_config_mac_tnl_int(hdev, false);
6824 
6825 	/* Mac disable */
6826 	hclge_cfg_mac_mode(hdev, false);
6827 
6828 	hclge_mac_stop_phy(hdev);
6829 
6830 	/* reset tqp stats */
6831 	hclge_reset_tqp_stats(handle);
6832 	hclge_update_link_status(hdev);
6833 }
6834 
6835 int hclge_vport_start(struct hclge_vport *vport)
6836 {
6837 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6838 	vport->last_active_jiffies = jiffies;
6839 	return 0;
6840 }
6841 
6842 void hclge_vport_stop(struct hclge_vport *vport)
6843 {
6844 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6845 }
6846 
6847 static int hclge_client_start(struct hnae3_handle *handle)
6848 {
6849 	struct hclge_vport *vport = hclge_get_vport(handle);
6850 
6851 	return hclge_vport_start(vport);
6852 }
6853 
6854 static void hclge_client_stop(struct hnae3_handle *handle)
6855 {
6856 	struct hclge_vport *vport = hclge_get_vport(handle);
6857 
6858 	hclge_vport_stop(vport);
6859 }
6860 
6861 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6862 					 u16 cmdq_resp, u8  resp_code,
6863 					 enum hclge_mac_vlan_tbl_opcode op)
6864 {
6865 	struct hclge_dev *hdev = vport->back;
6866 
6867 	if (cmdq_resp) {
6868 		dev_err(&hdev->pdev->dev,
6869 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6870 			cmdq_resp);
6871 		return -EIO;
6872 	}
6873 
6874 	if (op == HCLGE_MAC_VLAN_ADD) {
6875 		if ((!resp_code) || (resp_code == 1)) {
6876 			return 0;
6877 		} else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6878 			dev_err(&hdev->pdev->dev,
6879 				"add mac addr failed for uc_overflow.\n");
6880 			return -ENOSPC;
6881 		} else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6882 			dev_err(&hdev->pdev->dev,
6883 				"add mac addr failed for mc_overflow.\n");
6884 			return -ENOSPC;
6885 		}
6886 
6887 		dev_err(&hdev->pdev->dev,
6888 			"add mac addr failed for undefined, code=%u.\n",
6889 			resp_code);
6890 		return -EIO;
6891 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
6892 		if (!resp_code) {
6893 			return 0;
6894 		} else if (resp_code == 1) {
6895 			dev_dbg(&hdev->pdev->dev,
6896 				"remove mac addr failed for miss.\n");
6897 			return -ENOENT;
6898 		}
6899 
6900 		dev_err(&hdev->pdev->dev,
6901 			"remove mac addr failed for undefined, code=%u.\n",
6902 			resp_code);
6903 		return -EIO;
6904 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
6905 		if (!resp_code) {
6906 			return 0;
6907 		} else if (resp_code == 1) {
6908 			dev_dbg(&hdev->pdev->dev,
6909 				"lookup mac addr failed for miss.\n");
6910 			return -ENOENT;
6911 		}
6912 
6913 		dev_err(&hdev->pdev->dev,
6914 			"lookup mac addr failed for undefined, code=%u.\n",
6915 			resp_code);
6916 		return -EIO;
6917 	}
6918 
6919 	dev_err(&hdev->pdev->dev,
6920 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6921 
6922 	return -EINVAL;
6923 }
6924 
6925 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6926 {
6927 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6928 
6929 	unsigned int word_num;
6930 	unsigned int bit_num;
6931 
6932 	if (vfid > 255 || vfid < 0)
6933 		return -EIO;
6934 
6935 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6936 		word_num = vfid / 32;
6937 		bit_num  = vfid % 32;
6938 		if (clr)
6939 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6940 		else
6941 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6942 	} else {
6943 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6944 		bit_num  = vfid % 32;
6945 		if (clr)
6946 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6947 		else
6948 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6949 	}
6950 
6951 	return 0;
6952 }
6953 
6954 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6955 {
6956 #define HCLGE_DESC_NUMBER 3
6957 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6958 	int i, j;
6959 
6960 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6961 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6962 			if (desc[i].data[j])
6963 				return false;
6964 
6965 	return true;
6966 }
6967 
6968 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6969 				   const u8 *addr, bool is_mc)
6970 {
6971 	const unsigned char *mac_addr = addr;
6972 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6973 		       (mac_addr[0]) | (mac_addr[1] << 8);
6974 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6975 
6976 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6977 	if (is_mc) {
6978 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6979 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6980 	}
6981 
6982 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6983 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6984 }
6985 
6986 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6987 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
6988 {
6989 	struct hclge_dev *hdev = vport->back;
6990 	struct hclge_desc desc;
6991 	u8 resp_code;
6992 	u16 retval;
6993 	int ret;
6994 
6995 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6996 
6997 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6998 
6999 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7000 	if (ret) {
7001 		dev_err(&hdev->pdev->dev,
7002 			"del mac addr failed for cmd_send, ret =%d.\n",
7003 			ret);
7004 		return ret;
7005 	}
7006 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7007 	retval = le16_to_cpu(desc.retval);
7008 
7009 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7010 					     HCLGE_MAC_VLAN_REMOVE);
7011 }
7012 
7013 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7014 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
7015 				     struct hclge_desc *desc,
7016 				     bool is_mc)
7017 {
7018 	struct hclge_dev *hdev = vport->back;
7019 	u8 resp_code;
7020 	u16 retval;
7021 	int ret;
7022 
7023 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7024 	if (is_mc) {
7025 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7026 		memcpy(desc[0].data,
7027 		       req,
7028 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7029 		hclge_cmd_setup_basic_desc(&desc[1],
7030 					   HCLGE_OPC_MAC_VLAN_ADD,
7031 					   true);
7032 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7033 		hclge_cmd_setup_basic_desc(&desc[2],
7034 					   HCLGE_OPC_MAC_VLAN_ADD,
7035 					   true);
7036 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
7037 	} else {
7038 		memcpy(desc[0].data,
7039 		       req,
7040 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7041 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
7042 	}
7043 	if (ret) {
7044 		dev_err(&hdev->pdev->dev,
7045 			"lookup mac addr failed for cmd_send, ret =%d.\n",
7046 			ret);
7047 		return ret;
7048 	}
7049 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7050 	retval = le16_to_cpu(desc[0].retval);
7051 
7052 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7053 					     HCLGE_MAC_VLAN_LKUP);
7054 }
7055 
7056 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7057 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
7058 				  struct hclge_desc *mc_desc)
7059 {
7060 	struct hclge_dev *hdev = vport->back;
7061 	int cfg_status;
7062 	u8 resp_code;
7063 	u16 retval;
7064 	int ret;
7065 
7066 	if (!mc_desc) {
7067 		struct hclge_desc desc;
7068 
7069 		hclge_cmd_setup_basic_desc(&desc,
7070 					   HCLGE_OPC_MAC_VLAN_ADD,
7071 					   false);
7072 		memcpy(desc.data, req,
7073 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7074 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7075 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7076 		retval = le16_to_cpu(desc.retval);
7077 
7078 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7079 							   resp_code,
7080 							   HCLGE_MAC_VLAN_ADD);
7081 	} else {
7082 		hclge_cmd_reuse_desc(&mc_desc[0], false);
7083 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7084 		hclge_cmd_reuse_desc(&mc_desc[1], false);
7085 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7086 		hclge_cmd_reuse_desc(&mc_desc[2], false);
7087 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7088 		memcpy(mc_desc[0].data, req,
7089 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7090 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7091 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7092 		retval = le16_to_cpu(mc_desc[0].retval);
7093 
7094 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7095 							   resp_code,
7096 							   HCLGE_MAC_VLAN_ADD);
7097 	}
7098 
7099 	if (ret) {
7100 		dev_err(&hdev->pdev->dev,
7101 			"add mac addr failed for cmd_send, ret =%d.\n",
7102 			ret);
7103 		return ret;
7104 	}
7105 
7106 	return cfg_status;
7107 }
7108 
7109 static int hclge_init_umv_space(struct hclge_dev *hdev)
7110 {
7111 	u16 allocated_size = 0;
7112 	int ret;
7113 
7114 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7115 				  true);
7116 	if (ret)
7117 		return ret;
7118 
7119 	if (allocated_size < hdev->wanted_umv_size)
7120 		dev_warn(&hdev->pdev->dev,
7121 			 "Alloc umv space failed, want %u, get %u\n",
7122 			 hdev->wanted_umv_size, allocated_size);
7123 
7124 	mutex_init(&hdev->umv_mutex);
7125 	hdev->max_umv_size = allocated_size;
7126 	/* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7127 	 * preserve some unicast mac vlan table entries shared by pf
7128 	 * and its vfs.
7129 	 */
7130 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7131 	hdev->share_umv_size = hdev->priv_umv_size +
7132 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
7133 
7134 	return 0;
7135 }
7136 
7137 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7138 {
7139 	int ret;
7140 
7141 	if (hdev->max_umv_size > 0) {
7142 		ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7143 					  false);
7144 		if (ret)
7145 			return ret;
7146 		hdev->max_umv_size = 0;
7147 	}
7148 	mutex_destroy(&hdev->umv_mutex);
7149 
7150 	return 0;
7151 }
7152 
7153 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7154 			       u16 *allocated_size, bool is_alloc)
7155 {
7156 	struct hclge_umv_spc_alc_cmd *req;
7157 	struct hclge_desc desc;
7158 	int ret;
7159 
7160 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7161 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7162 	if (!is_alloc)
7163 		hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7164 
7165 	req->space_size = cpu_to_le32(space_size);
7166 
7167 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7168 	if (ret) {
7169 		dev_err(&hdev->pdev->dev,
7170 			"%s umv space failed for cmd_send, ret =%d\n",
7171 			is_alloc ? "allocate" : "free", ret);
7172 		return ret;
7173 	}
7174 
7175 	if (is_alloc && allocated_size)
7176 		*allocated_size = le32_to_cpu(desc.data[1]);
7177 
7178 	return 0;
7179 }
7180 
7181 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7182 {
7183 	struct hclge_vport *vport;
7184 	int i;
7185 
7186 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7187 		vport = &hdev->vport[i];
7188 		vport->used_umv_num = 0;
7189 	}
7190 
7191 	mutex_lock(&hdev->umv_mutex);
7192 	hdev->share_umv_size = hdev->priv_umv_size +
7193 			hdev->max_umv_size % (hdev->num_req_vfs + 2);
7194 	mutex_unlock(&hdev->umv_mutex);
7195 }
7196 
7197 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7198 {
7199 	struct hclge_dev *hdev = vport->back;
7200 	bool is_full;
7201 
7202 	mutex_lock(&hdev->umv_mutex);
7203 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7204 		   hdev->share_umv_size == 0);
7205 	mutex_unlock(&hdev->umv_mutex);
7206 
7207 	return is_full;
7208 }
7209 
7210 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7211 {
7212 	struct hclge_dev *hdev = vport->back;
7213 
7214 	mutex_lock(&hdev->umv_mutex);
7215 	if (is_free) {
7216 		if (vport->used_umv_num > hdev->priv_umv_size)
7217 			hdev->share_umv_size++;
7218 
7219 		if (vport->used_umv_num > 0)
7220 			vport->used_umv_num--;
7221 	} else {
7222 		if (vport->used_umv_num >= hdev->priv_umv_size &&
7223 		    hdev->share_umv_size > 0)
7224 			hdev->share_umv_size--;
7225 		vport->used_umv_num++;
7226 	}
7227 	mutex_unlock(&hdev->umv_mutex);
7228 }
7229 
7230 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7231 			     const unsigned char *addr)
7232 {
7233 	struct hclge_vport *vport = hclge_get_vport(handle);
7234 
7235 	return hclge_add_uc_addr_common(vport, addr);
7236 }
7237 
7238 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7239 			     const unsigned char *addr)
7240 {
7241 	struct hclge_dev *hdev = vport->back;
7242 	struct hclge_mac_vlan_tbl_entry_cmd req;
7243 	struct hclge_desc desc;
7244 	u16 egress_port = 0;
7245 	int ret;
7246 
7247 	/* mac addr check */
7248 	if (is_zero_ether_addr(addr) ||
7249 	    is_broadcast_ether_addr(addr) ||
7250 	    is_multicast_ether_addr(addr)) {
7251 		dev_err(&hdev->pdev->dev,
7252 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7253 			 addr, is_zero_ether_addr(addr),
7254 			 is_broadcast_ether_addr(addr),
7255 			 is_multicast_ether_addr(addr));
7256 		return -EINVAL;
7257 	}
7258 
7259 	memset(&req, 0, sizeof(req));
7260 
7261 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7262 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7263 
7264 	req.egress_port = cpu_to_le16(egress_port);
7265 
7266 	hclge_prepare_mac_addr(&req, addr, false);
7267 
7268 	/* Lookup the mac address in the mac_vlan table, and add
7269 	 * it if the entry is inexistent. Repeated unicast entry
7270 	 * is not allowed in the mac vlan table.
7271 	 */
7272 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7273 	if (ret == -ENOENT) {
7274 		if (!hclge_is_umv_space_full(vport)) {
7275 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7276 			if (!ret)
7277 				hclge_update_umv_space(vport, false);
7278 			return ret;
7279 		}
7280 
7281 		dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7282 			hdev->priv_umv_size);
7283 
7284 		return -ENOSPC;
7285 	}
7286 
7287 	/* check if we just hit the duplicate */
7288 	if (!ret) {
7289 		dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7290 			 vport->vport_id, addr);
7291 		return 0;
7292 	}
7293 
7294 	dev_err(&hdev->pdev->dev,
7295 		"PF failed to add unicast entry(%pM) in the MAC table\n",
7296 		addr);
7297 
7298 	return ret;
7299 }
7300 
7301 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7302 			    const unsigned char *addr)
7303 {
7304 	struct hclge_vport *vport = hclge_get_vport(handle);
7305 
7306 	return hclge_rm_uc_addr_common(vport, addr);
7307 }
7308 
7309 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7310 			    const unsigned char *addr)
7311 {
7312 	struct hclge_dev *hdev = vport->back;
7313 	struct hclge_mac_vlan_tbl_entry_cmd req;
7314 	int ret;
7315 
7316 	/* mac addr check */
7317 	if (is_zero_ether_addr(addr) ||
7318 	    is_broadcast_ether_addr(addr) ||
7319 	    is_multicast_ether_addr(addr)) {
7320 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7321 			addr);
7322 		return -EINVAL;
7323 	}
7324 
7325 	memset(&req, 0, sizeof(req));
7326 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7327 	hclge_prepare_mac_addr(&req, addr, false);
7328 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
7329 	if (!ret)
7330 		hclge_update_umv_space(vport, true);
7331 
7332 	return ret;
7333 }
7334 
7335 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7336 			     const unsigned char *addr)
7337 {
7338 	struct hclge_vport *vport = hclge_get_vport(handle);
7339 
7340 	return hclge_add_mc_addr_common(vport, addr);
7341 }
7342 
7343 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7344 			     const unsigned char *addr)
7345 {
7346 	struct hclge_dev *hdev = vport->back;
7347 	struct hclge_mac_vlan_tbl_entry_cmd req;
7348 	struct hclge_desc desc[3];
7349 	int status;
7350 
7351 	/* mac addr check */
7352 	if (!is_multicast_ether_addr(addr)) {
7353 		dev_err(&hdev->pdev->dev,
7354 			"Add mc mac err! invalid mac:%pM.\n",
7355 			 addr);
7356 		return -EINVAL;
7357 	}
7358 	memset(&req, 0, sizeof(req));
7359 	hclge_prepare_mac_addr(&req, addr, true);
7360 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7361 	if (status) {
7362 		/* This mac addr do not exist, add new entry for it */
7363 		memset(desc[0].data, 0, sizeof(desc[0].data));
7364 		memset(desc[1].data, 0, sizeof(desc[0].data));
7365 		memset(desc[2].data, 0, sizeof(desc[0].data));
7366 	}
7367 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7368 	if (status)
7369 		return status;
7370 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7371 
7372 	if (status == -ENOSPC)
7373 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7374 
7375 	return status;
7376 }
7377 
7378 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7379 			    const unsigned char *addr)
7380 {
7381 	struct hclge_vport *vport = hclge_get_vport(handle);
7382 
7383 	return hclge_rm_mc_addr_common(vport, addr);
7384 }
7385 
7386 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7387 			    const unsigned char *addr)
7388 {
7389 	struct hclge_dev *hdev = vport->back;
7390 	struct hclge_mac_vlan_tbl_entry_cmd req;
7391 	enum hclge_cmd_status status;
7392 	struct hclge_desc desc[3];
7393 
7394 	/* mac addr check */
7395 	if (!is_multicast_ether_addr(addr)) {
7396 		dev_dbg(&hdev->pdev->dev,
7397 			"Remove mc mac err! invalid mac:%pM.\n",
7398 			 addr);
7399 		return -EINVAL;
7400 	}
7401 
7402 	memset(&req, 0, sizeof(req));
7403 	hclge_prepare_mac_addr(&req, addr, true);
7404 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7405 	if (!status) {
7406 		/* This mac addr exist, remove this handle's VFID for it */
7407 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7408 		if (status)
7409 			return status;
7410 
7411 		if (hclge_is_all_function_id_zero(desc))
7412 			/* All the vfid is zero, so need to delete this entry */
7413 			status = hclge_remove_mac_vlan_tbl(vport, &req);
7414 		else
7415 			/* Not all the vfid is zero, update the vfid */
7416 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7417 
7418 	} else {
7419 		/* Maybe this mac address is in mta table, but it cannot be
7420 		 * deleted here because an entry of mta represents an address
7421 		 * range rather than a specific address. the delete action to
7422 		 * all entries will take effect in update_mta_status called by
7423 		 * hns3_nic_set_rx_mode.
7424 		 */
7425 		status = 0;
7426 	}
7427 
7428 	return status;
7429 }
7430 
7431 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7432 			       enum HCLGE_MAC_ADDR_TYPE mac_type)
7433 {
7434 	struct hclge_vport_mac_addr_cfg *mac_cfg;
7435 	struct list_head *list;
7436 
7437 	if (!vport->vport_id)
7438 		return;
7439 
7440 	mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7441 	if (!mac_cfg)
7442 		return;
7443 
7444 	mac_cfg->hd_tbl_status = true;
7445 	memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7446 
7447 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7448 	       &vport->uc_mac_list : &vport->mc_mac_list;
7449 
7450 	list_add_tail(&mac_cfg->node, list);
7451 }
7452 
7453 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7454 			      bool is_write_tbl,
7455 			      enum HCLGE_MAC_ADDR_TYPE mac_type)
7456 {
7457 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7458 	struct list_head *list;
7459 	bool uc_flag, mc_flag;
7460 
7461 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7462 	       &vport->uc_mac_list : &vport->mc_mac_list;
7463 
7464 	uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7465 	mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7466 
7467 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7468 		if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7469 			if (uc_flag && mac_cfg->hd_tbl_status)
7470 				hclge_rm_uc_addr_common(vport, mac_addr);
7471 
7472 			if (mc_flag && mac_cfg->hd_tbl_status)
7473 				hclge_rm_mc_addr_common(vport, mac_addr);
7474 
7475 			list_del(&mac_cfg->node);
7476 			kfree(mac_cfg);
7477 			break;
7478 		}
7479 	}
7480 }
7481 
7482 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7483 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
7484 {
7485 	struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7486 	struct list_head *list;
7487 
7488 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7489 	       &vport->uc_mac_list : &vport->mc_mac_list;
7490 
7491 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7492 		if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7493 			hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7494 
7495 		if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7496 			hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7497 
7498 		mac_cfg->hd_tbl_status = false;
7499 		if (is_del_list) {
7500 			list_del(&mac_cfg->node);
7501 			kfree(mac_cfg);
7502 		}
7503 	}
7504 }
7505 
7506 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7507 {
7508 	struct hclge_vport_mac_addr_cfg *mac, *tmp;
7509 	struct hclge_vport *vport;
7510 	int i;
7511 
7512 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7513 		vport = &hdev->vport[i];
7514 		list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7515 			list_del(&mac->node);
7516 			kfree(mac);
7517 		}
7518 
7519 		list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7520 			list_del(&mac->node);
7521 			kfree(mac);
7522 		}
7523 	}
7524 }
7525 
7526 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7527 					      u16 cmdq_resp, u8 resp_code)
7528 {
7529 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
7530 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
7531 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
7532 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
7533 
7534 	int return_status;
7535 
7536 	if (cmdq_resp) {
7537 		dev_err(&hdev->pdev->dev,
7538 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7539 			cmdq_resp);
7540 		return -EIO;
7541 	}
7542 
7543 	switch (resp_code) {
7544 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
7545 	case HCLGE_ETHERTYPE_ALREADY_ADD:
7546 		return_status = 0;
7547 		break;
7548 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7549 		dev_err(&hdev->pdev->dev,
7550 			"add mac ethertype failed for manager table overflow.\n");
7551 		return_status = -EIO;
7552 		break;
7553 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
7554 		dev_err(&hdev->pdev->dev,
7555 			"add mac ethertype failed for key conflict.\n");
7556 		return_status = -EIO;
7557 		break;
7558 	default:
7559 		dev_err(&hdev->pdev->dev,
7560 			"add mac ethertype failed for undefined, code=%u.\n",
7561 			resp_code);
7562 		return_status = -EIO;
7563 	}
7564 
7565 	return return_status;
7566 }
7567 
7568 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7569 				     u8 *mac_addr)
7570 {
7571 	struct hclge_mac_vlan_tbl_entry_cmd req;
7572 	struct hclge_dev *hdev = vport->back;
7573 	struct hclge_desc desc;
7574 	u16 egress_port = 0;
7575 	int i;
7576 
7577 	if (is_zero_ether_addr(mac_addr))
7578 		return false;
7579 
7580 	memset(&req, 0, sizeof(req));
7581 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7582 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7583 	req.egress_port = cpu_to_le16(egress_port);
7584 	hclge_prepare_mac_addr(&req, mac_addr, false);
7585 
7586 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7587 		return true;
7588 
7589 	vf_idx += HCLGE_VF_VPORT_START_NUM;
7590 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7591 		if (i != vf_idx &&
7592 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7593 			return true;
7594 
7595 	return false;
7596 }
7597 
7598 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7599 			    u8 *mac_addr)
7600 {
7601 	struct hclge_vport *vport = hclge_get_vport(handle);
7602 	struct hclge_dev *hdev = vport->back;
7603 
7604 	vport = hclge_get_vf_vport(hdev, vf);
7605 	if (!vport)
7606 		return -EINVAL;
7607 
7608 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7609 		dev_info(&hdev->pdev->dev,
7610 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
7611 			 mac_addr);
7612 		return 0;
7613 	}
7614 
7615 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7616 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7617 			mac_addr);
7618 		return -EEXIST;
7619 	}
7620 
7621 	ether_addr_copy(vport->vf_info.mac, mac_addr);
7622 
7623 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7624 		dev_info(&hdev->pdev->dev,
7625 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7626 			 vf, mac_addr);
7627 		return hclge_inform_reset_assert_to_vf(vport);
7628 	}
7629 
7630 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
7631 		 vf, mac_addr);
7632 	return 0;
7633 }
7634 
7635 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7636 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
7637 {
7638 	struct hclge_desc desc;
7639 	u8 resp_code;
7640 	u16 retval;
7641 	int ret;
7642 
7643 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7644 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7645 
7646 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7647 	if (ret) {
7648 		dev_err(&hdev->pdev->dev,
7649 			"add mac ethertype failed for cmd_send, ret =%d.\n",
7650 			ret);
7651 		return ret;
7652 	}
7653 
7654 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7655 	retval = le16_to_cpu(desc.retval);
7656 
7657 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7658 }
7659 
7660 static int init_mgr_tbl(struct hclge_dev *hdev)
7661 {
7662 	int ret;
7663 	int i;
7664 
7665 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7666 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7667 		if (ret) {
7668 			dev_err(&hdev->pdev->dev,
7669 				"add mac ethertype failed, ret =%d.\n",
7670 				ret);
7671 			return ret;
7672 		}
7673 	}
7674 
7675 	return 0;
7676 }
7677 
7678 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7679 {
7680 	struct hclge_vport *vport = hclge_get_vport(handle);
7681 	struct hclge_dev *hdev = vport->back;
7682 
7683 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
7684 }
7685 
7686 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7687 			      bool is_first)
7688 {
7689 	const unsigned char *new_addr = (const unsigned char *)p;
7690 	struct hclge_vport *vport = hclge_get_vport(handle);
7691 	struct hclge_dev *hdev = vport->back;
7692 	int ret;
7693 
7694 	/* mac addr check */
7695 	if (is_zero_ether_addr(new_addr) ||
7696 	    is_broadcast_ether_addr(new_addr) ||
7697 	    is_multicast_ether_addr(new_addr)) {
7698 		dev_err(&hdev->pdev->dev,
7699 			"Change uc mac err! invalid mac:%pM.\n",
7700 			 new_addr);
7701 		return -EINVAL;
7702 	}
7703 
7704 	if ((!is_first || is_kdump_kernel()) &&
7705 	    hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7706 		dev_warn(&hdev->pdev->dev,
7707 			 "remove old uc mac address fail.\n");
7708 
7709 	ret = hclge_add_uc_addr(handle, new_addr);
7710 	if (ret) {
7711 		dev_err(&hdev->pdev->dev,
7712 			"add uc mac address fail, ret =%d.\n",
7713 			ret);
7714 
7715 		if (!is_first &&
7716 		    hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7717 			dev_err(&hdev->pdev->dev,
7718 				"restore uc mac address fail.\n");
7719 
7720 		return -EIO;
7721 	}
7722 
7723 	ret = hclge_pause_addr_cfg(hdev, new_addr);
7724 	if (ret) {
7725 		dev_err(&hdev->pdev->dev,
7726 			"configure mac pause address fail, ret =%d.\n",
7727 			ret);
7728 		return -EIO;
7729 	}
7730 
7731 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7732 
7733 	return 0;
7734 }
7735 
7736 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7737 			  int cmd)
7738 {
7739 	struct hclge_vport *vport = hclge_get_vport(handle);
7740 	struct hclge_dev *hdev = vport->back;
7741 
7742 	if (!hdev->hw.mac.phydev)
7743 		return -EOPNOTSUPP;
7744 
7745 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7746 }
7747 
7748 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7749 				      u8 fe_type, bool filter_en, u8 vf_id)
7750 {
7751 	struct hclge_vlan_filter_ctrl_cmd *req;
7752 	struct hclge_desc desc;
7753 	int ret;
7754 
7755 	/* read current vlan filter parameter */
7756 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
7757 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7758 	req->vlan_type = vlan_type;
7759 	req->vf_id = vf_id;
7760 
7761 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7762 	if (ret) {
7763 		dev_err(&hdev->pdev->dev,
7764 			"failed to get vlan filter config, ret = %d.\n", ret);
7765 		return ret;
7766 	}
7767 
7768 	/* modify and write new config parameter */
7769 	hclge_cmd_reuse_desc(&desc, false);
7770 	req->vlan_fe = filter_en ?
7771 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
7772 
7773 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7774 	if (ret)
7775 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
7776 			ret);
7777 
7778 	return ret;
7779 }
7780 
7781 #define HCLGE_FILTER_TYPE_VF		0
7782 #define HCLGE_FILTER_TYPE_PORT		1
7783 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
7784 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
7785 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
7786 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
7787 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
7788 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
7789 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
7790 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
7791 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
7792 
7793 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7794 {
7795 	struct hclge_vport *vport = hclge_get_vport(handle);
7796 	struct hclge_dev *hdev = vport->back;
7797 
7798 	if (hdev->pdev->revision >= 0x21) {
7799 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7800 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
7801 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7802 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
7803 	} else {
7804 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7805 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7806 					   0);
7807 	}
7808 	if (enable)
7809 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
7810 	else
7811 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7812 }
7813 
7814 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7815 				    bool is_kill, u16 vlan,
7816 				    __be16 proto)
7817 {
7818 	struct hclge_vport *vport = &hdev->vport[vfid];
7819 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
7820 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
7821 	struct hclge_desc desc[2];
7822 	u8 vf_byte_val;
7823 	u8 vf_byte_off;
7824 	int ret;
7825 
7826 	/* if vf vlan table is full, firmware will close vf vlan filter, it
7827 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
7828 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
7829 	 * new vlan, because tx packets with these vlan id will be dropped.
7830 	 */
7831 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7832 		if (vport->vf_info.spoofchk && vlan) {
7833 			dev_err(&hdev->pdev->dev,
7834 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
7835 			return -EPERM;
7836 		}
7837 		return 0;
7838 	}
7839 
7840 	hclge_cmd_setup_basic_desc(&desc[0],
7841 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7842 	hclge_cmd_setup_basic_desc(&desc[1],
7843 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7844 
7845 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7846 
7847 	vf_byte_off = vfid / 8;
7848 	vf_byte_val = 1 << (vfid % 8);
7849 
7850 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7851 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7852 
7853 	req0->vlan_id  = cpu_to_le16(vlan);
7854 	req0->vlan_cfg = is_kill;
7855 
7856 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7857 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7858 	else
7859 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7860 
7861 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
7862 	if (ret) {
7863 		dev_err(&hdev->pdev->dev,
7864 			"Send vf vlan command fail, ret =%d.\n",
7865 			ret);
7866 		return ret;
7867 	}
7868 
7869 	if (!is_kill) {
7870 #define HCLGE_VF_VLAN_NO_ENTRY	2
7871 		if (!req0->resp_code || req0->resp_code == 1)
7872 			return 0;
7873 
7874 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7875 			set_bit(vfid, hdev->vf_vlan_full);
7876 			dev_warn(&hdev->pdev->dev,
7877 				 "vf vlan table is full, vf vlan filter is disabled\n");
7878 			return 0;
7879 		}
7880 
7881 		dev_err(&hdev->pdev->dev,
7882 			"Add vf vlan filter fail, ret =%u.\n",
7883 			req0->resp_code);
7884 	} else {
7885 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
7886 		if (!req0->resp_code)
7887 			return 0;
7888 
7889 		/* vf vlan filter is disabled when vf vlan table is full,
7890 		 * then new vlan id will not be added into vf vlan table.
7891 		 * Just return 0 without warning, avoid massive verbose
7892 		 * print logs when unload.
7893 		 */
7894 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7895 			return 0;
7896 
7897 		dev_err(&hdev->pdev->dev,
7898 			"Kill vf vlan filter fail, ret =%u.\n",
7899 			req0->resp_code);
7900 	}
7901 
7902 	return -EIO;
7903 }
7904 
7905 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7906 				      u16 vlan_id, bool is_kill)
7907 {
7908 	struct hclge_vlan_filter_pf_cfg_cmd *req;
7909 	struct hclge_desc desc;
7910 	u8 vlan_offset_byte_val;
7911 	u8 vlan_offset_byte;
7912 	u8 vlan_offset_160;
7913 	int ret;
7914 
7915 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7916 
7917 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7918 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7919 			   HCLGE_VLAN_BYTE_SIZE;
7920 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7921 
7922 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7923 	req->vlan_offset = vlan_offset_160;
7924 	req->vlan_cfg = is_kill;
7925 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7926 
7927 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7928 	if (ret)
7929 		dev_err(&hdev->pdev->dev,
7930 			"port vlan command, send fail, ret =%d.\n", ret);
7931 	return ret;
7932 }
7933 
7934 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7935 				    u16 vport_id, u16 vlan_id,
7936 				    bool is_kill)
7937 {
7938 	u16 vport_idx, vport_num = 0;
7939 	int ret;
7940 
7941 	if (is_kill && !vlan_id)
7942 		return 0;
7943 
7944 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7945 				       proto);
7946 	if (ret) {
7947 		dev_err(&hdev->pdev->dev,
7948 			"Set %u vport vlan filter config fail, ret =%d.\n",
7949 			vport_id, ret);
7950 		return ret;
7951 	}
7952 
7953 	/* vlan 0 may be added twice when 8021q module is enabled */
7954 	if (!is_kill && !vlan_id &&
7955 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
7956 		return 0;
7957 
7958 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7959 		dev_err(&hdev->pdev->dev,
7960 			"Add port vlan failed, vport %u is already in vlan %u\n",
7961 			vport_id, vlan_id);
7962 		return -EINVAL;
7963 	}
7964 
7965 	if (is_kill &&
7966 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7967 		dev_err(&hdev->pdev->dev,
7968 			"Delete port vlan failed, vport %u is not in vlan %u\n",
7969 			vport_id, vlan_id);
7970 		return -EINVAL;
7971 	}
7972 
7973 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7974 		vport_num++;
7975 
7976 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7977 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7978 						 is_kill);
7979 
7980 	return ret;
7981 }
7982 
7983 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7984 {
7985 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7986 	struct hclge_vport_vtag_tx_cfg_cmd *req;
7987 	struct hclge_dev *hdev = vport->back;
7988 	struct hclge_desc desc;
7989 	u16 bmap_index;
7990 	int status;
7991 
7992 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7993 
7994 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7995 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7996 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7997 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7998 		      vcfg->accept_tag1 ? 1 : 0);
7999 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8000 		      vcfg->accept_untag1 ? 1 : 0);
8001 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8002 		      vcfg->accept_tag2 ? 1 : 0);
8003 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8004 		      vcfg->accept_untag2 ? 1 : 0);
8005 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8006 		      vcfg->insert_tag1_en ? 1 : 0);
8007 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8008 		      vcfg->insert_tag2_en ? 1 : 0);
8009 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8010 
8011 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8012 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8013 			HCLGE_VF_NUM_PER_BYTE;
8014 	req->vf_bitmap[bmap_index] =
8015 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8016 
8017 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8018 	if (status)
8019 		dev_err(&hdev->pdev->dev,
8020 			"Send port txvlan cfg command fail, ret =%d\n",
8021 			status);
8022 
8023 	return status;
8024 }
8025 
8026 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8027 {
8028 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8029 	struct hclge_vport_vtag_rx_cfg_cmd *req;
8030 	struct hclge_dev *hdev = vport->back;
8031 	struct hclge_desc desc;
8032 	u16 bmap_index;
8033 	int status;
8034 
8035 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8036 
8037 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8038 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8039 		      vcfg->strip_tag1_en ? 1 : 0);
8040 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8041 		      vcfg->strip_tag2_en ? 1 : 0);
8042 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8043 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
8044 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8045 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
8046 
8047 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8048 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8049 			HCLGE_VF_NUM_PER_BYTE;
8050 	req->vf_bitmap[bmap_index] =
8051 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8052 
8053 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8054 	if (status)
8055 		dev_err(&hdev->pdev->dev,
8056 			"Send port rxvlan cfg command fail, ret =%d\n",
8057 			status);
8058 
8059 	return status;
8060 }
8061 
8062 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8063 				  u16 port_base_vlan_state,
8064 				  u16 vlan_tag)
8065 {
8066 	int ret;
8067 
8068 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8069 		vport->txvlan_cfg.accept_tag1 = true;
8070 		vport->txvlan_cfg.insert_tag1_en = false;
8071 		vport->txvlan_cfg.default_tag1 = 0;
8072 	} else {
8073 		vport->txvlan_cfg.accept_tag1 = false;
8074 		vport->txvlan_cfg.insert_tag1_en = true;
8075 		vport->txvlan_cfg.default_tag1 = vlan_tag;
8076 	}
8077 
8078 	vport->txvlan_cfg.accept_untag1 = true;
8079 
8080 	/* accept_tag2 and accept_untag2 are not supported on
8081 	 * pdev revision(0x20), new revision support them,
8082 	 * this two fields can not be configured by user.
8083 	 */
8084 	vport->txvlan_cfg.accept_tag2 = true;
8085 	vport->txvlan_cfg.accept_untag2 = true;
8086 	vport->txvlan_cfg.insert_tag2_en = false;
8087 	vport->txvlan_cfg.default_tag2 = 0;
8088 
8089 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8090 		vport->rxvlan_cfg.strip_tag1_en = false;
8091 		vport->rxvlan_cfg.strip_tag2_en =
8092 				vport->rxvlan_cfg.rx_vlan_offload_en;
8093 	} else {
8094 		vport->rxvlan_cfg.strip_tag1_en =
8095 				vport->rxvlan_cfg.rx_vlan_offload_en;
8096 		vport->rxvlan_cfg.strip_tag2_en = true;
8097 	}
8098 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8099 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8100 
8101 	ret = hclge_set_vlan_tx_offload_cfg(vport);
8102 	if (ret)
8103 		return ret;
8104 
8105 	return hclge_set_vlan_rx_offload_cfg(vport);
8106 }
8107 
8108 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8109 {
8110 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8111 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8112 	struct hclge_desc desc;
8113 	int status;
8114 
8115 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8116 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8117 	rx_req->ot_fst_vlan_type =
8118 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8119 	rx_req->ot_sec_vlan_type =
8120 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8121 	rx_req->in_fst_vlan_type =
8122 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8123 	rx_req->in_sec_vlan_type =
8124 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8125 
8126 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8127 	if (status) {
8128 		dev_err(&hdev->pdev->dev,
8129 			"Send rxvlan protocol type command fail, ret =%d\n",
8130 			status);
8131 		return status;
8132 	}
8133 
8134 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8135 
8136 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8137 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8138 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8139 
8140 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8141 	if (status)
8142 		dev_err(&hdev->pdev->dev,
8143 			"Send txvlan protocol type command fail, ret =%d\n",
8144 			status);
8145 
8146 	return status;
8147 }
8148 
8149 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8150 {
8151 #define HCLGE_DEF_VLAN_TYPE		0x8100
8152 
8153 	struct hnae3_handle *handle = &hdev->vport[0].nic;
8154 	struct hclge_vport *vport;
8155 	int ret;
8156 	int i;
8157 
8158 	if (hdev->pdev->revision >= 0x21) {
8159 		/* for revision 0x21, vf vlan filter is per function */
8160 		for (i = 0; i < hdev->num_alloc_vport; i++) {
8161 			vport = &hdev->vport[i];
8162 			ret = hclge_set_vlan_filter_ctrl(hdev,
8163 							 HCLGE_FILTER_TYPE_VF,
8164 							 HCLGE_FILTER_FE_EGRESS,
8165 							 true,
8166 							 vport->vport_id);
8167 			if (ret)
8168 				return ret;
8169 		}
8170 
8171 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8172 						 HCLGE_FILTER_FE_INGRESS, true,
8173 						 0);
8174 		if (ret)
8175 			return ret;
8176 	} else {
8177 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8178 						 HCLGE_FILTER_FE_EGRESS_V1_B,
8179 						 true, 0);
8180 		if (ret)
8181 			return ret;
8182 	}
8183 
8184 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
8185 
8186 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8187 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8188 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8189 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8190 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8191 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8192 
8193 	ret = hclge_set_vlan_protocol_type(hdev);
8194 	if (ret)
8195 		return ret;
8196 
8197 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8198 		u16 vlan_tag;
8199 
8200 		vport = &hdev->vport[i];
8201 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8202 
8203 		ret = hclge_vlan_offload_cfg(vport,
8204 					     vport->port_base_vlan_cfg.state,
8205 					     vlan_tag);
8206 		if (ret)
8207 			return ret;
8208 	}
8209 
8210 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8211 }
8212 
8213 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8214 				       bool writen_to_tbl)
8215 {
8216 	struct hclge_vport_vlan_cfg *vlan;
8217 
8218 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8219 	if (!vlan)
8220 		return;
8221 
8222 	vlan->hd_tbl_status = writen_to_tbl;
8223 	vlan->vlan_id = vlan_id;
8224 
8225 	list_add_tail(&vlan->node, &vport->vlan_list);
8226 }
8227 
8228 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8229 {
8230 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8231 	struct hclge_dev *hdev = vport->back;
8232 	int ret;
8233 
8234 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8235 		if (!vlan->hd_tbl_status) {
8236 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8237 						       vport->vport_id,
8238 						       vlan->vlan_id, false);
8239 			if (ret) {
8240 				dev_err(&hdev->pdev->dev,
8241 					"restore vport vlan list failed, ret=%d\n",
8242 					ret);
8243 				return ret;
8244 			}
8245 		}
8246 		vlan->hd_tbl_status = true;
8247 	}
8248 
8249 	return 0;
8250 }
8251 
8252 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8253 				      bool is_write_tbl)
8254 {
8255 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8256 	struct hclge_dev *hdev = vport->back;
8257 
8258 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8259 		if (vlan->vlan_id == vlan_id) {
8260 			if (is_write_tbl && vlan->hd_tbl_status)
8261 				hclge_set_vlan_filter_hw(hdev,
8262 							 htons(ETH_P_8021Q),
8263 							 vport->vport_id,
8264 							 vlan_id,
8265 							 true);
8266 
8267 			list_del(&vlan->node);
8268 			kfree(vlan);
8269 			break;
8270 		}
8271 	}
8272 }
8273 
8274 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8275 {
8276 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8277 	struct hclge_dev *hdev = vport->back;
8278 
8279 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8280 		if (vlan->hd_tbl_status)
8281 			hclge_set_vlan_filter_hw(hdev,
8282 						 htons(ETH_P_8021Q),
8283 						 vport->vport_id,
8284 						 vlan->vlan_id,
8285 						 true);
8286 
8287 		vlan->hd_tbl_status = false;
8288 		if (is_del_list) {
8289 			list_del(&vlan->node);
8290 			kfree(vlan);
8291 		}
8292 	}
8293 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
8294 }
8295 
8296 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8297 {
8298 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8299 	struct hclge_vport *vport;
8300 	int i;
8301 
8302 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8303 		vport = &hdev->vport[i];
8304 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8305 			list_del(&vlan->node);
8306 			kfree(vlan);
8307 		}
8308 	}
8309 }
8310 
8311 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8312 {
8313 	struct hclge_vport *vport = hclge_get_vport(handle);
8314 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8315 	struct hclge_dev *hdev = vport->back;
8316 	u16 vlan_proto;
8317 	u16 state, vlan_id;
8318 	int i;
8319 
8320 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8321 		vport = &hdev->vport[i];
8322 		vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8323 		vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8324 		state = vport->port_base_vlan_cfg.state;
8325 
8326 		if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8327 			hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8328 						 vport->vport_id, vlan_id,
8329 						 false);
8330 			continue;
8331 		}
8332 
8333 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8334 			int ret;
8335 
8336 			if (!vlan->hd_tbl_status)
8337 				continue;
8338 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8339 						       vport->vport_id,
8340 						       vlan->vlan_id, false);
8341 			if (ret)
8342 				break;
8343 		}
8344 	}
8345 }
8346 
8347 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8348 {
8349 	struct hclge_vport *vport = hclge_get_vport(handle);
8350 
8351 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8352 		vport->rxvlan_cfg.strip_tag1_en = false;
8353 		vport->rxvlan_cfg.strip_tag2_en = enable;
8354 	} else {
8355 		vport->rxvlan_cfg.strip_tag1_en = enable;
8356 		vport->rxvlan_cfg.strip_tag2_en = true;
8357 	}
8358 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8359 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8360 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8361 
8362 	return hclge_set_vlan_rx_offload_cfg(vport);
8363 }
8364 
8365 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8366 					    u16 port_base_vlan_state,
8367 					    struct hclge_vlan_info *new_info,
8368 					    struct hclge_vlan_info *old_info)
8369 {
8370 	struct hclge_dev *hdev = vport->back;
8371 	int ret;
8372 
8373 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8374 		hclge_rm_vport_all_vlan_table(vport, false);
8375 		return hclge_set_vlan_filter_hw(hdev,
8376 						 htons(new_info->vlan_proto),
8377 						 vport->vport_id,
8378 						 new_info->vlan_tag,
8379 						 false);
8380 	}
8381 
8382 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8383 				       vport->vport_id, old_info->vlan_tag,
8384 				       true);
8385 	if (ret)
8386 		return ret;
8387 
8388 	return hclge_add_vport_all_vlan_table(vport);
8389 }
8390 
8391 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8392 				    struct hclge_vlan_info *vlan_info)
8393 {
8394 	struct hnae3_handle *nic = &vport->nic;
8395 	struct hclge_vlan_info *old_vlan_info;
8396 	struct hclge_dev *hdev = vport->back;
8397 	int ret;
8398 
8399 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8400 
8401 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8402 	if (ret)
8403 		return ret;
8404 
8405 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8406 		/* add new VLAN tag */
8407 		ret = hclge_set_vlan_filter_hw(hdev,
8408 					       htons(vlan_info->vlan_proto),
8409 					       vport->vport_id,
8410 					       vlan_info->vlan_tag,
8411 					       false);
8412 		if (ret)
8413 			return ret;
8414 
8415 		/* remove old VLAN tag */
8416 		ret = hclge_set_vlan_filter_hw(hdev,
8417 					       htons(old_vlan_info->vlan_proto),
8418 					       vport->vport_id,
8419 					       old_vlan_info->vlan_tag,
8420 					       true);
8421 		if (ret)
8422 			return ret;
8423 
8424 		goto update;
8425 	}
8426 
8427 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8428 					       old_vlan_info);
8429 	if (ret)
8430 		return ret;
8431 
8432 	/* update state only when disable/enable port based VLAN */
8433 	vport->port_base_vlan_cfg.state = state;
8434 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8435 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8436 	else
8437 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8438 
8439 update:
8440 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8441 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8442 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8443 
8444 	return 0;
8445 }
8446 
8447 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8448 					  enum hnae3_port_base_vlan_state state,
8449 					  u16 vlan)
8450 {
8451 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8452 		if (!vlan)
8453 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8454 		else
8455 			return HNAE3_PORT_BASE_VLAN_ENABLE;
8456 	} else {
8457 		if (!vlan)
8458 			return HNAE3_PORT_BASE_VLAN_DISABLE;
8459 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8460 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8461 		else
8462 			return HNAE3_PORT_BASE_VLAN_MODIFY;
8463 	}
8464 }
8465 
8466 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8467 				    u16 vlan, u8 qos, __be16 proto)
8468 {
8469 	struct hclge_vport *vport = hclge_get_vport(handle);
8470 	struct hclge_dev *hdev = vport->back;
8471 	struct hclge_vlan_info vlan_info;
8472 	u16 state;
8473 	int ret;
8474 
8475 	if (hdev->pdev->revision == 0x20)
8476 		return -EOPNOTSUPP;
8477 
8478 	vport = hclge_get_vf_vport(hdev, vfid);
8479 	if (!vport)
8480 		return -EINVAL;
8481 
8482 	/* qos is a 3 bits value, so can not be bigger than 7 */
8483 	if (vlan > VLAN_N_VID - 1 || qos > 7)
8484 		return -EINVAL;
8485 	if (proto != htons(ETH_P_8021Q))
8486 		return -EPROTONOSUPPORT;
8487 
8488 	state = hclge_get_port_base_vlan_state(vport,
8489 					       vport->port_base_vlan_cfg.state,
8490 					       vlan);
8491 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8492 		return 0;
8493 
8494 	vlan_info.vlan_tag = vlan;
8495 	vlan_info.qos = qos;
8496 	vlan_info.vlan_proto = ntohs(proto);
8497 
8498 	if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8499 		return hclge_update_port_base_vlan_cfg(vport, state,
8500 						       &vlan_info);
8501 	} else {
8502 		ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8503 							vport->vport_id, state,
8504 							vlan, qos,
8505 							ntohs(proto));
8506 		return ret;
8507 	}
8508 }
8509 
8510 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
8511 {
8512 	struct hclge_vlan_info *vlan_info;
8513 	struct hclge_vport *vport;
8514 	int ret;
8515 	int vf;
8516 
8517 	/* clear port base vlan for all vf */
8518 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
8519 		vport = &hdev->vport[vf];
8520 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8521 
8522 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8523 					       vport->vport_id,
8524 					       vlan_info->vlan_tag, true);
8525 		if (ret)
8526 			dev_err(&hdev->pdev->dev,
8527 				"failed to clear vf vlan for vf%d, ret = %d\n",
8528 				vf - HCLGE_VF_VPORT_START_NUM, ret);
8529 	}
8530 }
8531 
8532 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8533 			  u16 vlan_id, bool is_kill)
8534 {
8535 	struct hclge_vport *vport = hclge_get_vport(handle);
8536 	struct hclge_dev *hdev = vport->back;
8537 	bool writen_to_tbl = false;
8538 	int ret = 0;
8539 
8540 	/* When device is resetting, firmware is unable to handle
8541 	 * mailbox. Just record the vlan id, and remove it after
8542 	 * reset finished.
8543 	 */
8544 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8545 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
8546 		return -EBUSY;
8547 	}
8548 
8549 	/* when port base vlan enabled, we use port base vlan as the vlan
8550 	 * filter entry. In this case, we don't update vlan filter table
8551 	 * when user add new vlan or remove exist vlan, just update the vport
8552 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
8553 	 * table until port base vlan disabled
8554 	 */
8555 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8556 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8557 					       vlan_id, is_kill);
8558 		writen_to_tbl = true;
8559 	}
8560 
8561 	if (!ret) {
8562 		if (is_kill)
8563 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
8564 		else
8565 			hclge_add_vport_vlan_table(vport, vlan_id,
8566 						   writen_to_tbl);
8567 	} else if (is_kill) {
8568 		/* when remove hw vlan filter failed, record the vlan id,
8569 		 * and try to remove it from hw later, to be consistence
8570 		 * with stack
8571 		 */
8572 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
8573 	}
8574 	return ret;
8575 }
8576 
8577 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8578 {
8579 #define HCLGE_MAX_SYNC_COUNT	60
8580 
8581 	int i, ret, sync_cnt = 0;
8582 	u16 vlan_id;
8583 
8584 	/* start from vport 1 for PF is always alive */
8585 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8586 		struct hclge_vport *vport = &hdev->vport[i];
8587 
8588 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8589 					 VLAN_N_VID);
8590 		while (vlan_id != VLAN_N_VID) {
8591 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8592 						       vport->vport_id, vlan_id,
8593 						       true);
8594 			if (ret && ret != -EINVAL)
8595 				return;
8596 
8597 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8598 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
8599 
8600 			sync_cnt++;
8601 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8602 				return;
8603 
8604 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8605 						 VLAN_N_VID);
8606 		}
8607 	}
8608 }
8609 
8610 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8611 {
8612 	struct hclge_config_max_frm_size_cmd *req;
8613 	struct hclge_desc desc;
8614 
8615 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8616 
8617 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8618 	req->max_frm_size = cpu_to_le16(new_mps);
8619 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8620 
8621 	return hclge_cmd_send(&hdev->hw, &desc, 1);
8622 }
8623 
8624 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8625 {
8626 	struct hclge_vport *vport = hclge_get_vport(handle);
8627 
8628 	return hclge_set_vport_mtu(vport, new_mtu);
8629 }
8630 
8631 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8632 {
8633 	struct hclge_dev *hdev = vport->back;
8634 	int i, max_frm_size, ret;
8635 
8636 	/* HW supprt 2 layer vlan */
8637 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8638 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8639 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
8640 		return -EINVAL;
8641 
8642 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8643 	mutex_lock(&hdev->vport_lock);
8644 	/* VF's mps must fit within hdev->mps */
8645 	if (vport->vport_id && max_frm_size > hdev->mps) {
8646 		mutex_unlock(&hdev->vport_lock);
8647 		return -EINVAL;
8648 	} else if (vport->vport_id) {
8649 		vport->mps = max_frm_size;
8650 		mutex_unlock(&hdev->vport_lock);
8651 		return 0;
8652 	}
8653 
8654 	/* PF's mps must be greater then VF's mps */
8655 	for (i = 1; i < hdev->num_alloc_vport; i++)
8656 		if (max_frm_size < hdev->vport[i].mps) {
8657 			mutex_unlock(&hdev->vport_lock);
8658 			return -EINVAL;
8659 		}
8660 
8661 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8662 
8663 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
8664 	if (ret) {
8665 		dev_err(&hdev->pdev->dev,
8666 			"Change mtu fail, ret =%d\n", ret);
8667 		goto out;
8668 	}
8669 
8670 	hdev->mps = max_frm_size;
8671 	vport->mps = max_frm_size;
8672 
8673 	ret = hclge_buffer_alloc(hdev);
8674 	if (ret)
8675 		dev_err(&hdev->pdev->dev,
8676 			"Allocate buffer fail, ret =%d\n", ret);
8677 
8678 out:
8679 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8680 	mutex_unlock(&hdev->vport_lock);
8681 	return ret;
8682 }
8683 
8684 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8685 				    bool enable)
8686 {
8687 	struct hclge_reset_tqp_queue_cmd *req;
8688 	struct hclge_desc desc;
8689 	int ret;
8690 
8691 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8692 
8693 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8694 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8695 	if (enable)
8696 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8697 
8698 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8699 	if (ret) {
8700 		dev_err(&hdev->pdev->dev,
8701 			"Send tqp reset cmd error, status =%d\n", ret);
8702 		return ret;
8703 	}
8704 
8705 	return 0;
8706 }
8707 
8708 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8709 {
8710 	struct hclge_reset_tqp_queue_cmd *req;
8711 	struct hclge_desc desc;
8712 	int ret;
8713 
8714 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8715 
8716 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8717 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8718 
8719 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8720 	if (ret) {
8721 		dev_err(&hdev->pdev->dev,
8722 			"Get reset status error, status =%d\n", ret);
8723 		return ret;
8724 	}
8725 
8726 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8727 }
8728 
8729 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8730 {
8731 	struct hnae3_queue *queue;
8732 	struct hclge_tqp *tqp;
8733 
8734 	queue = handle->kinfo.tqp[queue_id];
8735 	tqp = container_of(queue, struct hclge_tqp, q);
8736 
8737 	return tqp->index;
8738 }
8739 
8740 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8741 {
8742 	struct hclge_vport *vport = hclge_get_vport(handle);
8743 	struct hclge_dev *hdev = vport->back;
8744 	int reset_try_times = 0;
8745 	int reset_status;
8746 	u16 queue_gid;
8747 	int ret;
8748 
8749 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8750 
8751 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8752 	if (ret) {
8753 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8754 		return ret;
8755 	}
8756 
8757 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8758 	if (ret) {
8759 		dev_err(&hdev->pdev->dev,
8760 			"Send reset tqp cmd fail, ret = %d\n", ret);
8761 		return ret;
8762 	}
8763 
8764 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8765 		reset_status = hclge_get_reset_status(hdev, queue_gid);
8766 		if (reset_status)
8767 			break;
8768 
8769 		/* Wait for tqp hw reset */
8770 		usleep_range(1000, 1200);
8771 	}
8772 
8773 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8774 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8775 		return ret;
8776 	}
8777 
8778 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8779 	if (ret)
8780 		dev_err(&hdev->pdev->dev,
8781 			"Deassert the soft reset fail, ret = %d\n", ret);
8782 
8783 	return ret;
8784 }
8785 
8786 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8787 {
8788 	struct hclge_dev *hdev = vport->back;
8789 	int reset_try_times = 0;
8790 	int reset_status;
8791 	u16 queue_gid;
8792 	int ret;
8793 
8794 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8795 
8796 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8797 	if (ret) {
8798 		dev_warn(&hdev->pdev->dev,
8799 			 "Send reset tqp cmd fail, ret = %d\n", ret);
8800 		return;
8801 	}
8802 
8803 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8804 		reset_status = hclge_get_reset_status(hdev, queue_gid);
8805 		if (reset_status)
8806 			break;
8807 
8808 		/* Wait for tqp hw reset */
8809 		usleep_range(1000, 1200);
8810 	}
8811 
8812 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8813 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8814 		return;
8815 	}
8816 
8817 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8818 	if (ret)
8819 		dev_warn(&hdev->pdev->dev,
8820 			 "Deassert the soft reset fail, ret = %d\n", ret);
8821 }
8822 
8823 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8824 {
8825 	struct hclge_vport *vport = hclge_get_vport(handle);
8826 	struct hclge_dev *hdev = vport->back;
8827 
8828 	return hdev->fw_version;
8829 }
8830 
8831 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8832 {
8833 	struct phy_device *phydev = hdev->hw.mac.phydev;
8834 
8835 	if (!phydev)
8836 		return;
8837 
8838 	phy_set_asym_pause(phydev, rx_en, tx_en);
8839 }
8840 
8841 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8842 {
8843 	int ret;
8844 
8845 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8846 		return 0;
8847 
8848 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8849 	if (ret)
8850 		dev_err(&hdev->pdev->dev,
8851 			"configure pauseparam error, ret = %d.\n", ret);
8852 
8853 	return ret;
8854 }
8855 
8856 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8857 {
8858 	struct phy_device *phydev = hdev->hw.mac.phydev;
8859 	u16 remote_advertising = 0;
8860 	u16 local_advertising;
8861 	u32 rx_pause, tx_pause;
8862 	u8 flowctl;
8863 
8864 	if (!phydev->link || !phydev->autoneg)
8865 		return 0;
8866 
8867 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8868 
8869 	if (phydev->pause)
8870 		remote_advertising = LPA_PAUSE_CAP;
8871 
8872 	if (phydev->asym_pause)
8873 		remote_advertising |= LPA_PAUSE_ASYM;
8874 
8875 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8876 					   remote_advertising);
8877 	tx_pause = flowctl & FLOW_CTRL_TX;
8878 	rx_pause = flowctl & FLOW_CTRL_RX;
8879 
8880 	if (phydev->duplex == HCLGE_MAC_HALF) {
8881 		tx_pause = 0;
8882 		rx_pause = 0;
8883 	}
8884 
8885 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8886 }
8887 
8888 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8889 				 u32 *rx_en, u32 *tx_en)
8890 {
8891 	struct hclge_vport *vport = hclge_get_vport(handle);
8892 	struct hclge_dev *hdev = vport->back;
8893 	struct phy_device *phydev = hdev->hw.mac.phydev;
8894 
8895 	*auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8896 
8897 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8898 		*rx_en = 0;
8899 		*tx_en = 0;
8900 		return;
8901 	}
8902 
8903 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8904 		*rx_en = 1;
8905 		*tx_en = 0;
8906 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8907 		*tx_en = 1;
8908 		*rx_en = 0;
8909 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8910 		*rx_en = 1;
8911 		*tx_en = 1;
8912 	} else {
8913 		*rx_en = 0;
8914 		*tx_en = 0;
8915 	}
8916 }
8917 
8918 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8919 					 u32 rx_en, u32 tx_en)
8920 {
8921 	if (rx_en && tx_en)
8922 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
8923 	else if (rx_en && !tx_en)
8924 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8925 	else if (!rx_en && tx_en)
8926 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8927 	else
8928 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
8929 
8930 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8931 }
8932 
8933 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8934 				u32 rx_en, u32 tx_en)
8935 {
8936 	struct hclge_vport *vport = hclge_get_vport(handle);
8937 	struct hclge_dev *hdev = vport->back;
8938 	struct phy_device *phydev = hdev->hw.mac.phydev;
8939 	u32 fc_autoneg;
8940 
8941 	if (phydev) {
8942 		fc_autoneg = hclge_get_autoneg(handle);
8943 		if (auto_neg != fc_autoneg) {
8944 			dev_info(&hdev->pdev->dev,
8945 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8946 			return -EOPNOTSUPP;
8947 		}
8948 	}
8949 
8950 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8951 		dev_info(&hdev->pdev->dev,
8952 			 "Priority flow control enabled. Cannot set link flow control.\n");
8953 		return -EOPNOTSUPP;
8954 	}
8955 
8956 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8957 
8958 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8959 
8960 	if (!auto_neg)
8961 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8962 
8963 	if (phydev)
8964 		return phy_start_aneg(phydev);
8965 
8966 	return -EOPNOTSUPP;
8967 }
8968 
8969 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8970 					  u8 *auto_neg, u32 *speed, u8 *duplex)
8971 {
8972 	struct hclge_vport *vport = hclge_get_vport(handle);
8973 	struct hclge_dev *hdev = vport->back;
8974 
8975 	if (speed)
8976 		*speed = hdev->hw.mac.speed;
8977 	if (duplex)
8978 		*duplex = hdev->hw.mac.duplex;
8979 	if (auto_neg)
8980 		*auto_neg = hdev->hw.mac.autoneg;
8981 }
8982 
8983 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8984 				 u8 *module_type)
8985 {
8986 	struct hclge_vport *vport = hclge_get_vport(handle);
8987 	struct hclge_dev *hdev = vport->back;
8988 
8989 	/* When nic is down, the service task is not running, doesn't update
8990 	 * the port information per second. Query the port information before
8991 	 * return the media type, ensure getting the correct media information.
8992 	 */
8993 	hclge_update_port_info(hdev);
8994 
8995 	if (media_type)
8996 		*media_type = hdev->hw.mac.media_type;
8997 
8998 	if (module_type)
8999 		*module_type = hdev->hw.mac.module_type;
9000 }
9001 
9002 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
9003 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
9004 {
9005 	struct hclge_vport *vport = hclge_get_vport(handle);
9006 	struct hclge_dev *hdev = vport->back;
9007 	struct phy_device *phydev = hdev->hw.mac.phydev;
9008 	int mdix_ctrl, mdix, is_resolved;
9009 	unsigned int retval;
9010 
9011 	if (!phydev) {
9012 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9013 		*tp_mdix = ETH_TP_MDI_INVALID;
9014 		return;
9015 	}
9016 
9017 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9018 
9019 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9020 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9021 				    HCLGE_PHY_MDIX_CTRL_S);
9022 
9023 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9024 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9025 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9026 
9027 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9028 
9029 	switch (mdix_ctrl) {
9030 	case 0x0:
9031 		*tp_mdix_ctrl = ETH_TP_MDI;
9032 		break;
9033 	case 0x1:
9034 		*tp_mdix_ctrl = ETH_TP_MDI_X;
9035 		break;
9036 	case 0x3:
9037 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9038 		break;
9039 	default:
9040 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9041 		break;
9042 	}
9043 
9044 	if (!is_resolved)
9045 		*tp_mdix = ETH_TP_MDI_INVALID;
9046 	else if (mdix)
9047 		*tp_mdix = ETH_TP_MDI_X;
9048 	else
9049 		*tp_mdix = ETH_TP_MDI;
9050 }
9051 
9052 static void hclge_info_show(struct hclge_dev *hdev)
9053 {
9054 	struct device *dev = &hdev->pdev->dev;
9055 
9056 	dev_info(dev, "PF info begin:\n");
9057 
9058 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9059 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9060 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9061 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9062 	dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9063 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9064 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9065 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9066 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9067 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9068 	dev_info(dev, "This is %s PF\n",
9069 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9070 	dev_info(dev, "DCB %s\n",
9071 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9072 	dev_info(dev, "MQPRIO %s\n",
9073 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9074 
9075 	dev_info(dev, "PF info end.\n");
9076 }
9077 
9078 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9079 					  struct hclge_vport *vport)
9080 {
9081 	struct hnae3_client *client = vport->nic.client;
9082 	struct hclge_dev *hdev = ae_dev->priv;
9083 	int rst_cnt = hdev->rst_stats.reset_cnt;
9084 	int ret;
9085 
9086 	ret = client->ops->init_instance(&vport->nic);
9087 	if (ret)
9088 		return ret;
9089 
9090 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9091 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9092 	    rst_cnt != hdev->rst_stats.reset_cnt) {
9093 		ret = -EBUSY;
9094 		goto init_nic_err;
9095 	}
9096 
9097 	/* Enable nic hw error interrupts */
9098 	ret = hclge_config_nic_hw_error(hdev, true);
9099 	if (ret) {
9100 		dev_err(&ae_dev->pdev->dev,
9101 			"fail(%d) to enable hw error interrupts\n", ret);
9102 		goto init_nic_err;
9103 	}
9104 
9105 	hnae3_set_client_init_flag(client, ae_dev, 1);
9106 
9107 	if (netif_msg_drv(&hdev->vport->nic))
9108 		hclge_info_show(hdev);
9109 
9110 	return ret;
9111 
9112 init_nic_err:
9113 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9114 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9115 		msleep(HCLGE_WAIT_RESET_DONE);
9116 
9117 	client->ops->uninit_instance(&vport->nic, 0);
9118 
9119 	return ret;
9120 }
9121 
9122 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9123 					   struct hclge_vport *vport)
9124 {
9125 	struct hclge_dev *hdev = ae_dev->priv;
9126 	struct hnae3_client *client;
9127 	int rst_cnt;
9128 	int ret;
9129 
9130 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9131 	    !hdev->nic_client)
9132 		return 0;
9133 
9134 	client = hdev->roce_client;
9135 	ret = hclge_init_roce_base_info(vport);
9136 	if (ret)
9137 		return ret;
9138 
9139 	rst_cnt = hdev->rst_stats.reset_cnt;
9140 	ret = client->ops->init_instance(&vport->roce);
9141 	if (ret)
9142 		return ret;
9143 
9144 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9145 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9146 	    rst_cnt != hdev->rst_stats.reset_cnt) {
9147 		ret = -EBUSY;
9148 		goto init_roce_err;
9149 	}
9150 
9151 	/* Enable roce ras interrupts */
9152 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
9153 	if (ret) {
9154 		dev_err(&ae_dev->pdev->dev,
9155 			"fail(%d) to enable roce ras interrupts\n", ret);
9156 		goto init_roce_err;
9157 	}
9158 
9159 	hnae3_set_client_init_flag(client, ae_dev, 1);
9160 
9161 	return 0;
9162 
9163 init_roce_err:
9164 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9165 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9166 		msleep(HCLGE_WAIT_RESET_DONE);
9167 
9168 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9169 
9170 	return ret;
9171 }
9172 
9173 static int hclge_init_client_instance(struct hnae3_client *client,
9174 				      struct hnae3_ae_dev *ae_dev)
9175 {
9176 	struct hclge_dev *hdev = ae_dev->priv;
9177 	struct hclge_vport *vport;
9178 	int i, ret;
9179 
9180 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9181 		vport = &hdev->vport[i];
9182 
9183 		switch (client->type) {
9184 		case HNAE3_CLIENT_KNIC:
9185 			hdev->nic_client = client;
9186 			vport->nic.client = client;
9187 			ret = hclge_init_nic_client_instance(ae_dev, vport);
9188 			if (ret)
9189 				goto clear_nic;
9190 
9191 			ret = hclge_init_roce_client_instance(ae_dev, vport);
9192 			if (ret)
9193 				goto clear_roce;
9194 
9195 			break;
9196 		case HNAE3_CLIENT_ROCE:
9197 			if (hnae3_dev_roce_supported(hdev)) {
9198 				hdev->roce_client = client;
9199 				vport->roce.client = client;
9200 			}
9201 
9202 			ret = hclge_init_roce_client_instance(ae_dev, vport);
9203 			if (ret)
9204 				goto clear_roce;
9205 
9206 			break;
9207 		default:
9208 			return -EINVAL;
9209 		}
9210 	}
9211 
9212 	return 0;
9213 
9214 clear_nic:
9215 	hdev->nic_client = NULL;
9216 	vport->nic.client = NULL;
9217 	return ret;
9218 clear_roce:
9219 	hdev->roce_client = NULL;
9220 	vport->roce.client = NULL;
9221 	return ret;
9222 }
9223 
9224 static void hclge_uninit_client_instance(struct hnae3_client *client,
9225 					 struct hnae3_ae_dev *ae_dev)
9226 {
9227 	struct hclge_dev *hdev = ae_dev->priv;
9228 	struct hclge_vport *vport;
9229 	int i;
9230 
9231 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9232 		vport = &hdev->vport[i];
9233 		if (hdev->roce_client) {
9234 			clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9235 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9236 				msleep(HCLGE_WAIT_RESET_DONE);
9237 
9238 			hdev->roce_client->ops->uninit_instance(&vport->roce,
9239 								0);
9240 			hdev->roce_client = NULL;
9241 			vport->roce.client = NULL;
9242 		}
9243 		if (client->type == HNAE3_CLIENT_ROCE)
9244 			return;
9245 		if (hdev->nic_client && client->ops->uninit_instance) {
9246 			clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9247 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9248 				msleep(HCLGE_WAIT_RESET_DONE);
9249 
9250 			client->ops->uninit_instance(&vport->nic, 0);
9251 			hdev->nic_client = NULL;
9252 			vport->nic.client = NULL;
9253 		}
9254 	}
9255 }
9256 
9257 static int hclge_pci_init(struct hclge_dev *hdev)
9258 {
9259 	struct pci_dev *pdev = hdev->pdev;
9260 	struct hclge_hw *hw;
9261 	int ret;
9262 
9263 	ret = pci_enable_device(pdev);
9264 	if (ret) {
9265 		dev_err(&pdev->dev, "failed to enable PCI device\n");
9266 		return ret;
9267 	}
9268 
9269 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9270 	if (ret) {
9271 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9272 		if (ret) {
9273 			dev_err(&pdev->dev,
9274 				"can't set consistent PCI DMA");
9275 			goto err_disable_device;
9276 		}
9277 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9278 	}
9279 
9280 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9281 	if (ret) {
9282 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9283 		goto err_disable_device;
9284 	}
9285 
9286 	pci_set_master(pdev);
9287 	hw = &hdev->hw;
9288 	hw->io_base = pcim_iomap(pdev, 2, 0);
9289 	if (!hw->io_base) {
9290 		dev_err(&pdev->dev, "Can't map configuration register space\n");
9291 		ret = -ENOMEM;
9292 		goto err_clr_master;
9293 	}
9294 
9295 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9296 
9297 	return 0;
9298 err_clr_master:
9299 	pci_clear_master(pdev);
9300 	pci_release_regions(pdev);
9301 err_disable_device:
9302 	pci_disable_device(pdev);
9303 
9304 	return ret;
9305 }
9306 
9307 static void hclge_pci_uninit(struct hclge_dev *hdev)
9308 {
9309 	struct pci_dev *pdev = hdev->pdev;
9310 
9311 	pcim_iounmap(pdev, hdev->hw.io_base);
9312 	pci_free_irq_vectors(pdev);
9313 	pci_clear_master(pdev);
9314 	pci_release_mem_regions(pdev);
9315 	pci_disable_device(pdev);
9316 }
9317 
9318 static void hclge_state_init(struct hclge_dev *hdev)
9319 {
9320 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9321 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9322 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9323 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9324 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9325 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9326 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9327 }
9328 
9329 static void hclge_state_uninit(struct hclge_dev *hdev)
9330 {
9331 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9332 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9333 
9334 	if (hdev->reset_timer.function)
9335 		del_timer_sync(&hdev->reset_timer);
9336 	if (hdev->service_task.work.func)
9337 		cancel_delayed_work_sync(&hdev->service_task);
9338 }
9339 
9340 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9341 {
9342 #define HCLGE_FLR_RETRY_WAIT_MS	500
9343 #define HCLGE_FLR_RETRY_CNT	5
9344 
9345 	struct hclge_dev *hdev = ae_dev->priv;
9346 	int retry_cnt = 0;
9347 	int ret;
9348 
9349 retry:
9350 	down(&hdev->reset_sem);
9351 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9352 	hdev->reset_type = HNAE3_FLR_RESET;
9353 	ret = hclge_reset_prepare(hdev);
9354 	if (ret) {
9355 		dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9356 			ret);
9357 		if (hdev->reset_pending ||
9358 		    retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9359 			dev_err(&hdev->pdev->dev,
9360 				"reset_pending:0x%lx, retry_cnt:%d\n",
9361 				hdev->reset_pending, retry_cnt);
9362 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9363 			up(&hdev->reset_sem);
9364 			msleep(HCLGE_FLR_RETRY_WAIT_MS);
9365 			goto retry;
9366 		}
9367 	}
9368 
9369 	/* disable misc vector before FLR done */
9370 	hclge_enable_vector(&hdev->misc_vector, false);
9371 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9372 	hdev->rst_stats.flr_rst_cnt++;
9373 }
9374 
9375 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9376 {
9377 	struct hclge_dev *hdev = ae_dev->priv;
9378 	int ret;
9379 
9380 	hclge_enable_vector(&hdev->misc_vector, true);
9381 
9382 	ret = hclge_reset_rebuild(hdev);
9383 	if (ret)
9384 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9385 
9386 	hdev->reset_type = HNAE3_NONE_RESET;
9387 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9388 	up(&hdev->reset_sem);
9389 }
9390 
9391 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9392 {
9393 	u16 i;
9394 
9395 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9396 		struct hclge_vport *vport = &hdev->vport[i];
9397 		int ret;
9398 
9399 		 /* Send cmd to clear VF's FUNC_RST_ING */
9400 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9401 		if (ret)
9402 			dev_warn(&hdev->pdev->dev,
9403 				 "clear vf(%u) rst failed %d!\n",
9404 				 vport->vport_id, ret);
9405 	}
9406 }
9407 
9408 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9409 {
9410 	struct pci_dev *pdev = ae_dev->pdev;
9411 	struct hclge_dev *hdev;
9412 	int ret;
9413 
9414 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9415 	if (!hdev) {
9416 		ret = -ENOMEM;
9417 		goto out;
9418 	}
9419 
9420 	hdev->pdev = pdev;
9421 	hdev->ae_dev = ae_dev;
9422 	hdev->reset_type = HNAE3_NONE_RESET;
9423 	hdev->reset_level = HNAE3_FUNC_RESET;
9424 	ae_dev->priv = hdev;
9425 
9426 	/* HW supprt 2 layer vlan */
9427 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9428 
9429 	mutex_init(&hdev->vport_lock);
9430 	spin_lock_init(&hdev->fd_rule_lock);
9431 	sema_init(&hdev->reset_sem, 1);
9432 
9433 	ret = hclge_pci_init(hdev);
9434 	if (ret)
9435 		goto out;
9436 
9437 	/* Firmware command queue initialize */
9438 	ret = hclge_cmd_queue_init(hdev);
9439 	if (ret)
9440 		goto err_pci_uninit;
9441 
9442 	/* Firmware command initialize */
9443 	ret = hclge_cmd_init(hdev);
9444 	if (ret)
9445 		goto err_cmd_uninit;
9446 
9447 	ret = hclge_get_cap(hdev);
9448 	if (ret)
9449 		goto err_cmd_uninit;
9450 
9451 	ret = hclge_configure(hdev);
9452 	if (ret) {
9453 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9454 		goto err_cmd_uninit;
9455 	}
9456 
9457 	ret = hclge_init_msi(hdev);
9458 	if (ret) {
9459 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9460 		goto err_cmd_uninit;
9461 	}
9462 
9463 	ret = hclge_misc_irq_init(hdev);
9464 	if (ret)
9465 		goto err_msi_uninit;
9466 
9467 	ret = hclge_alloc_tqps(hdev);
9468 	if (ret) {
9469 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9470 		goto err_msi_irq_uninit;
9471 	}
9472 
9473 	ret = hclge_alloc_vport(hdev);
9474 	if (ret)
9475 		goto err_msi_irq_uninit;
9476 
9477 	ret = hclge_map_tqp(hdev);
9478 	if (ret)
9479 		goto err_msi_irq_uninit;
9480 
9481 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9482 		ret = hclge_mac_mdio_config(hdev);
9483 		if (ret)
9484 			goto err_msi_irq_uninit;
9485 	}
9486 
9487 	ret = hclge_init_umv_space(hdev);
9488 	if (ret)
9489 		goto err_mdiobus_unreg;
9490 
9491 	ret = hclge_mac_init(hdev);
9492 	if (ret) {
9493 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9494 		goto err_mdiobus_unreg;
9495 	}
9496 
9497 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9498 	if (ret) {
9499 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9500 		goto err_mdiobus_unreg;
9501 	}
9502 
9503 	ret = hclge_config_gro(hdev, true);
9504 	if (ret)
9505 		goto err_mdiobus_unreg;
9506 
9507 	ret = hclge_init_vlan_config(hdev);
9508 	if (ret) {
9509 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9510 		goto err_mdiobus_unreg;
9511 	}
9512 
9513 	ret = hclge_tm_schd_init(hdev);
9514 	if (ret) {
9515 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9516 		goto err_mdiobus_unreg;
9517 	}
9518 
9519 	hclge_rss_init_cfg(hdev);
9520 	ret = hclge_rss_init_hw(hdev);
9521 	if (ret) {
9522 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9523 		goto err_mdiobus_unreg;
9524 	}
9525 
9526 	ret = init_mgr_tbl(hdev);
9527 	if (ret) {
9528 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9529 		goto err_mdiobus_unreg;
9530 	}
9531 
9532 	ret = hclge_init_fd_config(hdev);
9533 	if (ret) {
9534 		dev_err(&pdev->dev,
9535 			"fd table init fail, ret=%d\n", ret);
9536 		goto err_mdiobus_unreg;
9537 	}
9538 
9539 	INIT_KFIFO(hdev->mac_tnl_log);
9540 
9541 	hclge_dcb_ops_set(hdev);
9542 
9543 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9544 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9545 
9546 	/* Setup affinity after service timer setup because add_timer_on
9547 	 * is called in affinity notify.
9548 	 */
9549 	hclge_misc_affinity_setup(hdev);
9550 
9551 	hclge_clear_all_event_cause(hdev);
9552 	hclge_clear_resetting_state(hdev);
9553 
9554 	/* Log and clear the hw errors those already occurred */
9555 	hclge_handle_all_hns_hw_errors(ae_dev);
9556 
9557 	/* request delayed reset for the error recovery because an immediate
9558 	 * global reset on a PF affecting pending initialization of other PFs
9559 	 */
9560 	if (ae_dev->hw_err_reset_req) {
9561 		enum hnae3_reset_type reset_level;
9562 
9563 		reset_level = hclge_get_reset_level(ae_dev,
9564 						    &ae_dev->hw_err_reset_req);
9565 		hclge_set_def_reset_request(ae_dev, reset_level);
9566 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9567 	}
9568 
9569 	/* Enable MISC vector(vector0) */
9570 	hclge_enable_vector(&hdev->misc_vector, true);
9571 
9572 	hclge_state_init(hdev);
9573 	hdev->last_reset_time = jiffies;
9574 
9575 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9576 		 HCLGE_DRIVER_NAME);
9577 
9578 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
9579 
9580 	return 0;
9581 
9582 err_mdiobus_unreg:
9583 	if (hdev->hw.mac.phydev)
9584 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
9585 err_msi_irq_uninit:
9586 	hclge_misc_irq_uninit(hdev);
9587 err_msi_uninit:
9588 	pci_free_irq_vectors(pdev);
9589 err_cmd_uninit:
9590 	hclge_cmd_uninit(hdev);
9591 err_pci_uninit:
9592 	pcim_iounmap(pdev, hdev->hw.io_base);
9593 	pci_clear_master(pdev);
9594 	pci_release_regions(pdev);
9595 	pci_disable_device(pdev);
9596 out:
9597 	return ret;
9598 }
9599 
9600 static void hclge_stats_clear(struct hclge_dev *hdev)
9601 {
9602 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
9603 }
9604 
9605 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9606 {
9607 	return hclge_config_switch_param(hdev, vf, enable,
9608 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
9609 }
9610 
9611 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9612 {
9613 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9614 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
9615 					  enable, vf);
9616 }
9617 
9618 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9619 {
9620 	int ret;
9621 
9622 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9623 	if (ret) {
9624 		dev_err(&hdev->pdev->dev,
9625 			"Set vf %d mac spoof check %s failed, ret=%d\n",
9626 			vf, enable ? "on" : "off", ret);
9627 		return ret;
9628 	}
9629 
9630 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9631 	if (ret)
9632 		dev_err(&hdev->pdev->dev,
9633 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
9634 			vf, enable ? "on" : "off", ret);
9635 
9636 	return ret;
9637 }
9638 
9639 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9640 				 bool enable)
9641 {
9642 	struct hclge_vport *vport = hclge_get_vport(handle);
9643 	struct hclge_dev *hdev = vport->back;
9644 	u32 new_spoofchk = enable ? 1 : 0;
9645 	int ret;
9646 
9647 	if (hdev->pdev->revision == 0x20)
9648 		return -EOPNOTSUPP;
9649 
9650 	vport = hclge_get_vf_vport(hdev, vf);
9651 	if (!vport)
9652 		return -EINVAL;
9653 
9654 	if (vport->vf_info.spoofchk == new_spoofchk)
9655 		return 0;
9656 
9657 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9658 		dev_warn(&hdev->pdev->dev,
9659 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9660 			 vf);
9661 	else if (enable && hclge_is_umv_space_full(vport))
9662 		dev_warn(&hdev->pdev->dev,
9663 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9664 			 vf);
9665 
9666 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9667 	if (ret)
9668 		return ret;
9669 
9670 	vport->vf_info.spoofchk = new_spoofchk;
9671 	return 0;
9672 }
9673 
9674 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9675 {
9676 	struct hclge_vport *vport = hdev->vport;
9677 	int ret;
9678 	int i;
9679 
9680 	if (hdev->pdev->revision == 0x20)
9681 		return 0;
9682 
9683 	/* resume the vf spoof check state after reset */
9684 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9685 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9686 					       vport->vf_info.spoofchk);
9687 		if (ret)
9688 			return ret;
9689 
9690 		vport++;
9691 	}
9692 
9693 	return 0;
9694 }
9695 
9696 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9697 {
9698 	struct hclge_vport *vport = hclge_get_vport(handle);
9699 	struct hclge_dev *hdev = vport->back;
9700 	u32 new_trusted = enable ? 1 : 0;
9701 	bool en_bc_pmc;
9702 	int ret;
9703 
9704 	vport = hclge_get_vf_vport(hdev, vf);
9705 	if (!vport)
9706 		return -EINVAL;
9707 
9708 	if (vport->vf_info.trusted == new_trusted)
9709 		return 0;
9710 
9711 	/* Disable promisc mode for VF if it is not trusted any more. */
9712 	if (!enable && vport->vf_info.promisc_enable) {
9713 		en_bc_pmc = hdev->pdev->revision != 0x20;
9714 		ret = hclge_set_vport_promisc_mode(vport, false, false,
9715 						   en_bc_pmc);
9716 		if (ret)
9717 			return ret;
9718 		vport->vf_info.promisc_enable = 0;
9719 		hclge_inform_vf_promisc_info(vport);
9720 	}
9721 
9722 	vport->vf_info.trusted = new_trusted;
9723 
9724 	return 0;
9725 }
9726 
9727 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9728 {
9729 	int ret;
9730 	int vf;
9731 
9732 	/* reset vf rate to default value */
9733 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9734 		struct hclge_vport *vport = &hdev->vport[vf];
9735 
9736 		vport->vf_info.max_tx_rate = 0;
9737 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9738 		if (ret)
9739 			dev_err(&hdev->pdev->dev,
9740 				"vf%d failed to reset to default, ret=%d\n",
9741 				vf - HCLGE_VF_VPORT_START_NUM, ret);
9742 	}
9743 }
9744 
9745 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9746 				     int min_tx_rate, int max_tx_rate)
9747 {
9748 	if (min_tx_rate != 0 ||
9749 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9750 		dev_err(&hdev->pdev->dev,
9751 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9752 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9753 		return -EINVAL;
9754 	}
9755 
9756 	return 0;
9757 }
9758 
9759 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9760 			     int min_tx_rate, int max_tx_rate, bool force)
9761 {
9762 	struct hclge_vport *vport = hclge_get_vport(handle);
9763 	struct hclge_dev *hdev = vport->back;
9764 	int ret;
9765 
9766 	ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9767 	if (ret)
9768 		return ret;
9769 
9770 	vport = hclge_get_vf_vport(hdev, vf);
9771 	if (!vport)
9772 		return -EINVAL;
9773 
9774 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9775 		return 0;
9776 
9777 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9778 	if (ret)
9779 		return ret;
9780 
9781 	vport->vf_info.max_tx_rate = max_tx_rate;
9782 
9783 	return 0;
9784 }
9785 
9786 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9787 {
9788 	struct hnae3_handle *handle = &hdev->vport->nic;
9789 	struct hclge_vport *vport;
9790 	int ret;
9791 	int vf;
9792 
9793 	/* resume the vf max_tx_rate after reset */
9794 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9795 		vport = hclge_get_vf_vport(hdev, vf);
9796 		if (!vport)
9797 			return -EINVAL;
9798 
9799 		/* zero means max rate, after reset, firmware already set it to
9800 		 * max rate, so just continue.
9801 		 */
9802 		if (!vport->vf_info.max_tx_rate)
9803 			continue;
9804 
9805 		ret = hclge_set_vf_rate(handle, vf, 0,
9806 					vport->vf_info.max_tx_rate, true);
9807 		if (ret) {
9808 			dev_err(&hdev->pdev->dev,
9809 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
9810 				vf, vport->vf_info.max_tx_rate, ret);
9811 			return ret;
9812 		}
9813 	}
9814 
9815 	return 0;
9816 }
9817 
9818 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9819 {
9820 	struct hclge_vport *vport = hdev->vport;
9821 	int i;
9822 
9823 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9824 		hclge_vport_stop(vport);
9825 		vport++;
9826 	}
9827 }
9828 
9829 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9830 {
9831 	struct hclge_dev *hdev = ae_dev->priv;
9832 	struct pci_dev *pdev = ae_dev->pdev;
9833 	int ret;
9834 
9835 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9836 
9837 	hclge_stats_clear(hdev);
9838 	memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9839 	memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9840 
9841 	ret = hclge_cmd_init(hdev);
9842 	if (ret) {
9843 		dev_err(&pdev->dev, "Cmd queue init failed\n");
9844 		return ret;
9845 	}
9846 
9847 	ret = hclge_map_tqp(hdev);
9848 	if (ret) {
9849 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9850 		return ret;
9851 	}
9852 
9853 	hclge_reset_umv_space(hdev);
9854 
9855 	ret = hclge_mac_init(hdev);
9856 	if (ret) {
9857 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9858 		return ret;
9859 	}
9860 
9861 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9862 	if (ret) {
9863 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9864 		return ret;
9865 	}
9866 
9867 	ret = hclge_config_gro(hdev, true);
9868 	if (ret)
9869 		return ret;
9870 
9871 	ret = hclge_init_vlan_config(hdev);
9872 	if (ret) {
9873 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9874 		return ret;
9875 	}
9876 
9877 	ret = hclge_tm_init_hw(hdev, true);
9878 	if (ret) {
9879 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9880 		return ret;
9881 	}
9882 
9883 	ret = hclge_rss_init_hw(hdev);
9884 	if (ret) {
9885 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9886 		return ret;
9887 	}
9888 
9889 	ret = init_mgr_tbl(hdev);
9890 	if (ret) {
9891 		dev_err(&pdev->dev,
9892 			"failed to reinit manager table, ret = %d\n", ret);
9893 		return ret;
9894 	}
9895 
9896 	ret = hclge_init_fd_config(hdev);
9897 	if (ret) {
9898 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9899 		return ret;
9900 	}
9901 
9902 	/* Log and clear the hw errors those already occurred */
9903 	hclge_handle_all_hns_hw_errors(ae_dev);
9904 
9905 	/* Re-enable the hw error interrupts because
9906 	 * the interrupts get disabled on global reset.
9907 	 */
9908 	ret = hclge_config_nic_hw_error(hdev, true);
9909 	if (ret) {
9910 		dev_err(&pdev->dev,
9911 			"fail(%d) to re-enable NIC hw error interrupts\n",
9912 			ret);
9913 		return ret;
9914 	}
9915 
9916 	if (hdev->roce_client) {
9917 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
9918 		if (ret) {
9919 			dev_err(&pdev->dev,
9920 				"fail(%d) to re-enable roce ras interrupts\n",
9921 				ret);
9922 			return ret;
9923 		}
9924 	}
9925 
9926 	hclge_reset_vport_state(hdev);
9927 	ret = hclge_reset_vport_spoofchk(hdev);
9928 	if (ret)
9929 		return ret;
9930 
9931 	ret = hclge_resume_vf_rate(hdev);
9932 	if (ret)
9933 		return ret;
9934 
9935 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9936 		 HCLGE_DRIVER_NAME);
9937 
9938 	return 0;
9939 }
9940 
9941 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9942 {
9943 	struct hclge_dev *hdev = ae_dev->priv;
9944 	struct hclge_mac *mac = &hdev->hw.mac;
9945 
9946 	hclge_reset_vf_rate(hdev);
9947 	hclge_clear_vf_vlan(hdev);
9948 	hclge_misc_affinity_teardown(hdev);
9949 	hclge_state_uninit(hdev);
9950 
9951 	if (mac->phydev)
9952 		mdiobus_unregister(mac->mdio_bus);
9953 
9954 	hclge_uninit_umv_space(hdev);
9955 
9956 	/* Disable MISC vector(vector0) */
9957 	hclge_enable_vector(&hdev->misc_vector, false);
9958 	synchronize_irq(hdev->misc_vector.vector_irq);
9959 
9960 	/* Disable all hw interrupts */
9961 	hclge_config_mac_tnl_int(hdev, false);
9962 	hclge_config_nic_hw_error(hdev, false);
9963 	hclge_config_rocee_ras_interrupt(hdev, false);
9964 
9965 	hclge_cmd_uninit(hdev);
9966 	hclge_misc_irq_uninit(hdev);
9967 	hclge_pci_uninit(hdev);
9968 	mutex_destroy(&hdev->vport_lock);
9969 	hclge_uninit_vport_mac_table(hdev);
9970 	hclge_uninit_vport_vlan_table(hdev);
9971 	ae_dev->priv = NULL;
9972 }
9973 
9974 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9975 {
9976 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9977 	struct hclge_vport *vport = hclge_get_vport(handle);
9978 	struct hclge_dev *hdev = vport->back;
9979 
9980 	return min_t(u32, hdev->rss_size_max,
9981 		     vport->alloc_tqps / kinfo->num_tc);
9982 }
9983 
9984 static void hclge_get_channels(struct hnae3_handle *handle,
9985 			       struct ethtool_channels *ch)
9986 {
9987 	ch->max_combined = hclge_get_max_channels(handle);
9988 	ch->other_count = 1;
9989 	ch->max_other = 1;
9990 	ch->combined_count = handle->kinfo.rss_size;
9991 }
9992 
9993 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9994 					u16 *alloc_tqps, u16 *max_rss_size)
9995 {
9996 	struct hclge_vport *vport = hclge_get_vport(handle);
9997 	struct hclge_dev *hdev = vport->back;
9998 
9999 	*alloc_tqps = vport->alloc_tqps;
10000 	*max_rss_size = hdev->rss_size_max;
10001 }
10002 
10003 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10004 			      bool rxfh_configured)
10005 {
10006 	struct hclge_vport *vport = hclge_get_vport(handle);
10007 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
10008 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
10009 	struct hclge_dev *hdev = vport->back;
10010 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10011 	u16 cur_rss_size = kinfo->rss_size;
10012 	u16 cur_tqps = kinfo->num_tqps;
10013 	u16 tc_valid[HCLGE_MAX_TC_NUM];
10014 	u16 roundup_size;
10015 	u32 *rss_indir;
10016 	unsigned int i;
10017 	int ret;
10018 
10019 	kinfo->req_rss_size = new_tqps_num;
10020 
10021 	ret = hclge_tm_vport_map_update(hdev);
10022 	if (ret) {
10023 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10024 		return ret;
10025 	}
10026 
10027 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
10028 	roundup_size = ilog2(roundup_size);
10029 	/* Set the RSS TC mode according to the new RSS size */
10030 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10031 		tc_valid[i] = 0;
10032 
10033 		if (!(hdev->hw_tc_map & BIT(i)))
10034 			continue;
10035 
10036 		tc_valid[i] = 1;
10037 		tc_size[i] = roundup_size;
10038 		tc_offset[i] = kinfo->rss_size * i;
10039 	}
10040 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10041 	if (ret)
10042 		return ret;
10043 
10044 	/* RSS indirection table has been configuared by user */
10045 	if (rxfh_configured)
10046 		goto out;
10047 
10048 	/* Reinitializes the rss indirect table according to the new RSS size */
10049 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10050 	if (!rss_indir)
10051 		return -ENOMEM;
10052 
10053 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10054 		rss_indir[i] = i % kinfo->rss_size;
10055 
10056 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10057 	if (ret)
10058 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10059 			ret);
10060 
10061 	kfree(rss_indir);
10062 
10063 out:
10064 	if (!ret)
10065 		dev_info(&hdev->pdev->dev,
10066 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10067 			 cur_rss_size, kinfo->rss_size,
10068 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10069 
10070 	return ret;
10071 }
10072 
10073 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10074 			      u32 *regs_num_64_bit)
10075 {
10076 	struct hclge_desc desc;
10077 	u32 total_num;
10078 	int ret;
10079 
10080 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10081 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10082 	if (ret) {
10083 		dev_err(&hdev->pdev->dev,
10084 			"Query register number cmd failed, ret = %d.\n", ret);
10085 		return ret;
10086 	}
10087 
10088 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
10089 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
10090 
10091 	total_num = *regs_num_32_bit + *regs_num_64_bit;
10092 	if (!total_num)
10093 		return -EINVAL;
10094 
10095 	return 0;
10096 }
10097 
10098 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10099 				 void *data)
10100 {
10101 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10102 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10103 
10104 	struct hclge_desc *desc;
10105 	u32 *reg_val = data;
10106 	__le32 *desc_data;
10107 	int nodata_num;
10108 	int cmd_num;
10109 	int i, k, n;
10110 	int ret;
10111 
10112 	if (regs_num == 0)
10113 		return 0;
10114 
10115 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10116 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10117 			       HCLGE_32_BIT_REG_RTN_DATANUM);
10118 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10119 	if (!desc)
10120 		return -ENOMEM;
10121 
10122 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10123 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10124 	if (ret) {
10125 		dev_err(&hdev->pdev->dev,
10126 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
10127 		kfree(desc);
10128 		return ret;
10129 	}
10130 
10131 	for (i = 0; i < cmd_num; i++) {
10132 		if (i == 0) {
10133 			desc_data = (__le32 *)(&desc[i].data[0]);
10134 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10135 		} else {
10136 			desc_data = (__le32 *)(&desc[i]);
10137 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
10138 		}
10139 		for (k = 0; k < n; k++) {
10140 			*reg_val++ = le32_to_cpu(*desc_data++);
10141 
10142 			regs_num--;
10143 			if (!regs_num)
10144 				break;
10145 		}
10146 	}
10147 
10148 	kfree(desc);
10149 	return 0;
10150 }
10151 
10152 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10153 				 void *data)
10154 {
10155 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10156 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10157 
10158 	struct hclge_desc *desc;
10159 	u64 *reg_val = data;
10160 	__le64 *desc_data;
10161 	int nodata_len;
10162 	int cmd_num;
10163 	int i, k, n;
10164 	int ret;
10165 
10166 	if (regs_num == 0)
10167 		return 0;
10168 
10169 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10170 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10171 			       HCLGE_64_BIT_REG_RTN_DATANUM);
10172 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10173 	if (!desc)
10174 		return -ENOMEM;
10175 
10176 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10177 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10178 	if (ret) {
10179 		dev_err(&hdev->pdev->dev,
10180 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
10181 		kfree(desc);
10182 		return ret;
10183 	}
10184 
10185 	for (i = 0; i < cmd_num; i++) {
10186 		if (i == 0) {
10187 			desc_data = (__le64 *)(&desc[i].data[0]);
10188 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10189 		} else {
10190 			desc_data = (__le64 *)(&desc[i]);
10191 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
10192 		}
10193 		for (k = 0; k < n; k++) {
10194 			*reg_val++ = le64_to_cpu(*desc_data++);
10195 
10196 			regs_num--;
10197 			if (!regs_num)
10198 				break;
10199 		}
10200 	}
10201 
10202 	kfree(desc);
10203 	return 0;
10204 }
10205 
10206 #define MAX_SEPARATE_NUM	4
10207 #define SEPARATOR_VALUE		0xFDFCFBFA
10208 #define REG_NUM_PER_LINE	4
10209 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
10210 #define REG_SEPARATOR_LINE	1
10211 #define REG_NUM_REMAIN_MASK	3
10212 #define BD_LIST_MAX_NUM		30
10213 
10214 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10215 {
10216 	/*prepare 4 commands to query DFX BD number*/
10217 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10218 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10219 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10220 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10221 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10222 	desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10223 	hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10224 
10225 	return hclge_cmd_send(&hdev->hw, desc, 4);
10226 }
10227 
10228 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10229 				    int *bd_num_list,
10230 				    u32 type_num)
10231 {
10232 	u32 entries_per_desc, desc_index, index, offset, i;
10233 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10234 	int ret;
10235 
10236 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
10237 	if (ret) {
10238 		dev_err(&hdev->pdev->dev,
10239 			"Get dfx bd num fail, status is %d.\n", ret);
10240 		return ret;
10241 	}
10242 
10243 	entries_per_desc = ARRAY_SIZE(desc[0].data);
10244 	for (i = 0; i < type_num; i++) {
10245 		offset = hclge_dfx_bd_offset_list[i];
10246 		index = offset % entries_per_desc;
10247 		desc_index = offset / entries_per_desc;
10248 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10249 	}
10250 
10251 	return ret;
10252 }
10253 
10254 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10255 				  struct hclge_desc *desc_src, int bd_num,
10256 				  enum hclge_opcode_type cmd)
10257 {
10258 	struct hclge_desc *desc = desc_src;
10259 	int i, ret;
10260 
10261 	hclge_cmd_setup_basic_desc(desc, cmd, true);
10262 	for (i = 0; i < bd_num - 1; i++) {
10263 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10264 		desc++;
10265 		hclge_cmd_setup_basic_desc(desc, cmd, true);
10266 	}
10267 
10268 	desc = desc_src;
10269 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10270 	if (ret)
10271 		dev_err(&hdev->pdev->dev,
10272 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10273 			cmd, ret);
10274 
10275 	return ret;
10276 }
10277 
10278 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10279 				    void *data)
10280 {
10281 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10282 	struct hclge_desc *desc = desc_src;
10283 	u32 *reg = data;
10284 
10285 	entries_per_desc = ARRAY_SIZE(desc->data);
10286 	reg_num = entries_per_desc * bd_num;
10287 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10288 	for (i = 0; i < reg_num; i++) {
10289 		index = i % entries_per_desc;
10290 		desc_index = i / entries_per_desc;
10291 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
10292 	}
10293 	for (i = 0; i < separator_num; i++)
10294 		*reg++ = SEPARATOR_VALUE;
10295 
10296 	return reg_num + separator_num;
10297 }
10298 
10299 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10300 {
10301 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10302 	int data_len_per_desc, bd_num, i;
10303 	int bd_num_list[BD_LIST_MAX_NUM];
10304 	u32 data_len;
10305 	int ret;
10306 
10307 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10308 	if (ret) {
10309 		dev_err(&hdev->pdev->dev,
10310 			"Get dfx reg bd num fail, status is %d.\n", ret);
10311 		return ret;
10312 	}
10313 
10314 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
10315 	*len = 0;
10316 	for (i = 0; i < dfx_reg_type_num; i++) {
10317 		bd_num = bd_num_list[i];
10318 		data_len = data_len_per_desc * bd_num;
10319 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10320 	}
10321 
10322 	return ret;
10323 }
10324 
10325 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10326 {
10327 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10328 	int bd_num, bd_num_max, buf_len, i;
10329 	int bd_num_list[BD_LIST_MAX_NUM];
10330 	struct hclge_desc *desc_src;
10331 	u32 *reg = data;
10332 	int ret;
10333 
10334 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10335 	if (ret) {
10336 		dev_err(&hdev->pdev->dev,
10337 			"Get dfx reg bd num fail, status is %d.\n", ret);
10338 		return ret;
10339 	}
10340 
10341 	bd_num_max = bd_num_list[0];
10342 	for (i = 1; i < dfx_reg_type_num; i++)
10343 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10344 
10345 	buf_len = sizeof(*desc_src) * bd_num_max;
10346 	desc_src = kzalloc(buf_len, GFP_KERNEL);
10347 	if (!desc_src)
10348 		return -ENOMEM;
10349 
10350 	for (i = 0; i < dfx_reg_type_num; i++) {
10351 		bd_num = bd_num_list[i];
10352 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10353 					     hclge_dfx_reg_opcode_list[i]);
10354 		if (ret) {
10355 			dev_err(&hdev->pdev->dev,
10356 				"Get dfx reg fail, status is %d.\n", ret);
10357 			break;
10358 		}
10359 
10360 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10361 	}
10362 
10363 	kfree(desc_src);
10364 	return ret;
10365 }
10366 
10367 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10368 			      struct hnae3_knic_private_info *kinfo)
10369 {
10370 #define HCLGE_RING_REG_OFFSET		0x200
10371 #define HCLGE_RING_INT_REG_OFFSET	0x4
10372 
10373 	int i, j, reg_num, separator_num;
10374 	int data_num_sum;
10375 	u32 *reg = data;
10376 
10377 	/* fetching per-PF registers valus from PF PCIe register space */
10378 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10379 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10380 	for (i = 0; i < reg_num; i++)
10381 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10382 	for (i = 0; i < separator_num; i++)
10383 		*reg++ = SEPARATOR_VALUE;
10384 	data_num_sum = reg_num + separator_num;
10385 
10386 	reg_num = ARRAY_SIZE(common_reg_addr_list);
10387 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10388 	for (i = 0; i < reg_num; i++)
10389 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10390 	for (i = 0; i < separator_num; i++)
10391 		*reg++ = SEPARATOR_VALUE;
10392 	data_num_sum += reg_num + separator_num;
10393 
10394 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
10395 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10396 	for (j = 0; j < kinfo->num_tqps; j++) {
10397 		for (i = 0; i < reg_num; i++)
10398 			*reg++ = hclge_read_dev(&hdev->hw,
10399 						ring_reg_addr_list[i] +
10400 						HCLGE_RING_REG_OFFSET * j);
10401 		for (i = 0; i < separator_num; i++)
10402 			*reg++ = SEPARATOR_VALUE;
10403 	}
10404 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10405 
10406 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10407 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10408 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
10409 		for (i = 0; i < reg_num; i++)
10410 			*reg++ = hclge_read_dev(&hdev->hw,
10411 						tqp_intr_reg_addr_list[i] +
10412 						HCLGE_RING_INT_REG_OFFSET * j);
10413 		for (i = 0; i < separator_num; i++)
10414 			*reg++ = SEPARATOR_VALUE;
10415 	}
10416 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10417 
10418 	return data_num_sum;
10419 }
10420 
10421 static int hclge_get_regs_len(struct hnae3_handle *handle)
10422 {
10423 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10424 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10425 	struct hclge_vport *vport = hclge_get_vport(handle);
10426 	struct hclge_dev *hdev = vport->back;
10427 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10428 	int regs_lines_32_bit, regs_lines_64_bit;
10429 	int ret;
10430 
10431 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10432 	if (ret) {
10433 		dev_err(&hdev->pdev->dev,
10434 			"Get register number failed, ret = %d.\n", ret);
10435 		return ret;
10436 	}
10437 
10438 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10439 	if (ret) {
10440 		dev_err(&hdev->pdev->dev,
10441 			"Get dfx reg len failed, ret = %d.\n", ret);
10442 		return ret;
10443 	}
10444 
10445 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10446 		REG_SEPARATOR_LINE;
10447 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10448 		REG_SEPARATOR_LINE;
10449 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10450 		REG_SEPARATOR_LINE;
10451 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10452 		REG_SEPARATOR_LINE;
10453 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10454 		REG_SEPARATOR_LINE;
10455 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10456 		REG_SEPARATOR_LINE;
10457 
10458 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10459 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10460 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10461 }
10462 
10463 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10464 			   void *data)
10465 {
10466 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10467 	struct hclge_vport *vport = hclge_get_vport(handle);
10468 	struct hclge_dev *hdev = vport->back;
10469 	u32 regs_num_32_bit, regs_num_64_bit;
10470 	int i, reg_num, separator_num, ret;
10471 	u32 *reg = data;
10472 
10473 	*version = hdev->fw_version;
10474 
10475 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10476 	if (ret) {
10477 		dev_err(&hdev->pdev->dev,
10478 			"Get register number failed, ret = %d.\n", ret);
10479 		return;
10480 	}
10481 
10482 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10483 
10484 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10485 	if (ret) {
10486 		dev_err(&hdev->pdev->dev,
10487 			"Get 32 bit register failed, ret = %d.\n", ret);
10488 		return;
10489 	}
10490 	reg_num = regs_num_32_bit;
10491 	reg += reg_num;
10492 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10493 	for (i = 0; i < separator_num; i++)
10494 		*reg++ = SEPARATOR_VALUE;
10495 
10496 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10497 	if (ret) {
10498 		dev_err(&hdev->pdev->dev,
10499 			"Get 64 bit register failed, ret = %d.\n", ret);
10500 		return;
10501 	}
10502 	reg_num = regs_num_64_bit * 2;
10503 	reg += reg_num;
10504 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10505 	for (i = 0; i < separator_num; i++)
10506 		*reg++ = SEPARATOR_VALUE;
10507 
10508 	ret = hclge_get_dfx_reg(hdev, reg);
10509 	if (ret)
10510 		dev_err(&hdev->pdev->dev,
10511 			"Get dfx register failed, ret = %d.\n", ret);
10512 }
10513 
10514 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10515 {
10516 	struct hclge_set_led_state_cmd *req;
10517 	struct hclge_desc desc;
10518 	int ret;
10519 
10520 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10521 
10522 	req = (struct hclge_set_led_state_cmd *)desc.data;
10523 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10524 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10525 
10526 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10527 	if (ret)
10528 		dev_err(&hdev->pdev->dev,
10529 			"Send set led state cmd error, ret =%d\n", ret);
10530 
10531 	return ret;
10532 }
10533 
10534 enum hclge_led_status {
10535 	HCLGE_LED_OFF,
10536 	HCLGE_LED_ON,
10537 	HCLGE_LED_NO_CHANGE = 0xFF,
10538 };
10539 
10540 static int hclge_set_led_id(struct hnae3_handle *handle,
10541 			    enum ethtool_phys_id_state status)
10542 {
10543 	struct hclge_vport *vport = hclge_get_vport(handle);
10544 	struct hclge_dev *hdev = vport->back;
10545 
10546 	switch (status) {
10547 	case ETHTOOL_ID_ACTIVE:
10548 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
10549 	case ETHTOOL_ID_INACTIVE:
10550 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10551 	default:
10552 		return -EINVAL;
10553 	}
10554 }
10555 
10556 static void hclge_get_link_mode(struct hnae3_handle *handle,
10557 				unsigned long *supported,
10558 				unsigned long *advertising)
10559 {
10560 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10561 	struct hclge_vport *vport = hclge_get_vport(handle);
10562 	struct hclge_dev *hdev = vport->back;
10563 	unsigned int idx = 0;
10564 
10565 	for (; idx < size; idx++) {
10566 		supported[idx] = hdev->hw.mac.supported[idx];
10567 		advertising[idx] = hdev->hw.mac.advertising[idx];
10568 	}
10569 }
10570 
10571 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10572 {
10573 	struct hclge_vport *vport = hclge_get_vport(handle);
10574 	struct hclge_dev *hdev = vport->back;
10575 
10576 	return hclge_config_gro(hdev, enable);
10577 }
10578 
10579 static const struct hnae3_ae_ops hclge_ops = {
10580 	.init_ae_dev = hclge_init_ae_dev,
10581 	.uninit_ae_dev = hclge_uninit_ae_dev,
10582 	.flr_prepare = hclge_flr_prepare,
10583 	.flr_done = hclge_flr_done,
10584 	.init_client_instance = hclge_init_client_instance,
10585 	.uninit_client_instance = hclge_uninit_client_instance,
10586 	.map_ring_to_vector = hclge_map_ring_to_vector,
10587 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10588 	.get_vector = hclge_get_vector,
10589 	.put_vector = hclge_put_vector,
10590 	.set_promisc_mode = hclge_set_promisc_mode,
10591 	.set_loopback = hclge_set_loopback,
10592 	.start = hclge_ae_start,
10593 	.stop = hclge_ae_stop,
10594 	.client_start = hclge_client_start,
10595 	.client_stop = hclge_client_stop,
10596 	.get_status = hclge_get_status,
10597 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
10598 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10599 	.get_media_type = hclge_get_media_type,
10600 	.check_port_speed = hclge_check_port_speed,
10601 	.get_fec = hclge_get_fec,
10602 	.set_fec = hclge_set_fec,
10603 	.get_rss_key_size = hclge_get_rss_key_size,
10604 	.get_rss_indir_size = hclge_get_rss_indir_size,
10605 	.get_rss = hclge_get_rss,
10606 	.set_rss = hclge_set_rss,
10607 	.set_rss_tuple = hclge_set_rss_tuple,
10608 	.get_rss_tuple = hclge_get_rss_tuple,
10609 	.get_tc_size = hclge_get_tc_size,
10610 	.get_mac_addr = hclge_get_mac_addr,
10611 	.set_mac_addr = hclge_set_mac_addr,
10612 	.do_ioctl = hclge_do_ioctl,
10613 	.add_uc_addr = hclge_add_uc_addr,
10614 	.rm_uc_addr = hclge_rm_uc_addr,
10615 	.add_mc_addr = hclge_add_mc_addr,
10616 	.rm_mc_addr = hclge_rm_mc_addr,
10617 	.set_autoneg = hclge_set_autoneg,
10618 	.get_autoneg = hclge_get_autoneg,
10619 	.restart_autoneg = hclge_restart_autoneg,
10620 	.halt_autoneg = hclge_halt_autoneg,
10621 	.get_pauseparam = hclge_get_pauseparam,
10622 	.set_pauseparam = hclge_set_pauseparam,
10623 	.set_mtu = hclge_set_mtu,
10624 	.reset_queue = hclge_reset_tqp,
10625 	.get_stats = hclge_get_stats,
10626 	.get_mac_stats = hclge_get_mac_stat,
10627 	.update_stats = hclge_update_stats,
10628 	.get_strings = hclge_get_strings,
10629 	.get_sset_count = hclge_get_sset_count,
10630 	.get_fw_version = hclge_get_fw_version,
10631 	.get_mdix_mode = hclge_get_mdix_mode,
10632 	.enable_vlan_filter = hclge_enable_vlan_filter,
10633 	.set_vlan_filter = hclge_set_vlan_filter,
10634 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10635 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10636 	.reset_event = hclge_reset_event,
10637 	.get_reset_level = hclge_get_reset_level,
10638 	.set_default_reset_request = hclge_set_def_reset_request,
10639 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10640 	.set_channels = hclge_set_channels,
10641 	.get_channels = hclge_get_channels,
10642 	.get_regs_len = hclge_get_regs_len,
10643 	.get_regs = hclge_get_regs,
10644 	.set_led_id = hclge_set_led_id,
10645 	.get_link_mode = hclge_get_link_mode,
10646 	.add_fd_entry = hclge_add_fd_entry,
10647 	.del_fd_entry = hclge_del_fd_entry,
10648 	.del_all_fd_entries = hclge_del_all_fd_entries,
10649 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10650 	.get_fd_rule_info = hclge_get_fd_rule_info,
10651 	.get_fd_all_rules = hclge_get_all_rules,
10652 	.restore_fd_rules = hclge_restore_fd_entries,
10653 	.enable_fd = hclge_enable_fd,
10654 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
10655 	.dbg_run_cmd = hclge_dbg_run_cmd,
10656 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
10657 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
10658 	.ae_dev_resetting = hclge_ae_dev_resetting,
10659 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10660 	.set_gro_en = hclge_gro_en,
10661 	.get_global_queue_id = hclge_covert_handle_qid_global,
10662 	.set_timer_task = hclge_set_timer_task,
10663 	.mac_connect_phy = hclge_mac_connect_phy,
10664 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
10665 	.restore_vlan_table = hclge_restore_vlan_table,
10666 	.get_vf_config = hclge_get_vf_config,
10667 	.set_vf_link_state = hclge_set_vf_link_state,
10668 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
10669 	.set_vf_trust = hclge_set_vf_trust,
10670 	.set_vf_rate = hclge_set_vf_rate,
10671 	.set_vf_mac = hclge_set_vf_mac,
10672 };
10673 
10674 static struct hnae3_ae_algo ae_algo = {
10675 	.ops = &hclge_ops,
10676 	.pdev_id_table = ae_algo_pci_tbl,
10677 };
10678 
10679 static int hclge_init(void)
10680 {
10681 	pr_info("%s is initializing\n", HCLGE_NAME);
10682 
10683 	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
10684 	if (!hclge_wq) {
10685 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
10686 		return -ENOMEM;
10687 	}
10688 
10689 	hnae3_register_ae_algo(&ae_algo);
10690 
10691 	return 0;
10692 }
10693 
10694 static void hclge_exit(void)
10695 {
10696 	hnae3_unregister_ae_algo(&ae_algo);
10697 	destroy_workqueue(hclge_wq);
10698 }
10699 module_init(hclge_init);
10700 module_exit(hclge_exit);
10701 
10702 MODULE_LICENSE("GPL");
10703 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10704 MODULE_DESCRIPTION("HCLGE Driver");
10705 MODULE_VERSION(HCLGE_MOD_VERSION);
10706