1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25 
26 #define HCLGE_NAME			"hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29 
30 #define HCLGE_BUF_SIZE_UNIT	256U
31 #define HCLGE_BUF_MUL_BY	2
32 #define HCLGE_BUF_DIV_BY	2
33 #define NEED_RESERVE_TC_NUM	2
34 #define BUF_MAX_PERCENT		100
35 #define BUF_RESERVE_PERCENT	90
36 
37 #define HCLGE_RESET_MAX_FAIL_CNT	5
38 #define HCLGE_RESET_SYNC_TIME		100
39 #define HCLGE_PF_RESET_SYNC_TIME	20
40 #define HCLGE_PF_RESET_SYNC_CNT		1500
41 
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55 
56 #define HCLGE_LINK_STATUS_MS	10
57 
58 #define HCLGE_VF_VPORT_START_NUM	1
59 
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 						   unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 
75 static struct hnae3_ae_algo ae_algo;
76 
77 static struct workqueue_struct *hclge_wq;
78 
79 static const struct pci_device_id ae_algo_pci_tbl[] = {
80 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
81 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
82 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
83 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
84 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
85 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
86 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
87 	/* required last entry */
88 	{0, }
89 };
90 
91 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
92 
93 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
94 					 HCLGE_CMDQ_TX_ADDR_H_REG,
95 					 HCLGE_CMDQ_TX_DEPTH_REG,
96 					 HCLGE_CMDQ_TX_TAIL_REG,
97 					 HCLGE_CMDQ_TX_HEAD_REG,
98 					 HCLGE_CMDQ_RX_ADDR_L_REG,
99 					 HCLGE_CMDQ_RX_ADDR_H_REG,
100 					 HCLGE_CMDQ_RX_DEPTH_REG,
101 					 HCLGE_CMDQ_RX_TAIL_REG,
102 					 HCLGE_CMDQ_RX_HEAD_REG,
103 					 HCLGE_VECTOR0_CMDQ_SRC_REG,
104 					 HCLGE_CMDQ_INTR_STS_REG,
105 					 HCLGE_CMDQ_INTR_EN_REG,
106 					 HCLGE_CMDQ_INTR_GEN_REG};
107 
108 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
109 					   HCLGE_VECTOR0_OTER_EN_REG,
110 					   HCLGE_MISC_RESET_STS_REG,
111 					   HCLGE_MISC_VECTOR_INT_STS,
112 					   HCLGE_GLOBAL_RESET_REG,
113 					   HCLGE_FUN_RST_ING,
114 					   HCLGE_GRO_EN_REG};
115 
116 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
117 					 HCLGE_RING_RX_ADDR_H_REG,
118 					 HCLGE_RING_RX_BD_NUM_REG,
119 					 HCLGE_RING_RX_BD_LENGTH_REG,
120 					 HCLGE_RING_RX_MERGE_EN_REG,
121 					 HCLGE_RING_RX_TAIL_REG,
122 					 HCLGE_RING_RX_HEAD_REG,
123 					 HCLGE_RING_RX_FBD_NUM_REG,
124 					 HCLGE_RING_RX_OFFSET_REG,
125 					 HCLGE_RING_RX_FBD_OFFSET_REG,
126 					 HCLGE_RING_RX_STASH_REG,
127 					 HCLGE_RING_RX_BD_ERR_REG,
128 					 HCLGE_RING_TX_ADDR_L_REG,
129 					 HCLGE_RING_TX_ADDR_H_REG,
130 					 HCLGE_RING_TX_BD_NUM_REG,
131 					 HCLGE_RING_TX_PRIORITY_REG,
132 					 HCLGE_RING_TX_TC_REG,
133 					 HCLGE_RING_TX_MERGE_EN_REG,
134 					 HCLGE_RING_TX_TAIL_REG,
135 					 HCLGE_RING_TX_HEAD_REG,
136 					 HCLGE_RING_TX_FBD_NUM_REG,
137 					 HCLGE_RING_TX_OFFSET_REG,
138 					 HCLGE_RING_TX_EBD_NUM_REG,
139 					 HCLGE_RING_TX_EBD_OFFSET_REG,
140 					 HCLGE_RING_TX_BD_ERR_REG,
141 					 HCLGE_RING_EN_REG};
142 
143 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
144 					     HCLGE_TQP_INTR_GL0_REG,
145 					     HCLGE_TQP_INTR_GL1_REG,
146 					     HCLGE_TQP_INTR_GL2_REG,
147 					     HCLGE_TQP_INTR_RL_REG};
148 
149 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
150 	"App    Loopback test",
151 	"Serdes serial Loopback test",
152 	"Serdes parallel Loopback test",
153 	"Phy    Loopback test"
154 };
155 
156 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
157 	{"mac_tx_mac_pause_num",
158 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
159 	{"mac_rx_mac_pause_num",
160 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
161 	{"mac_tx_control_pkt_num",
162 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
163 	{"mac_rx_control_pkt_num",
164 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
165 	{"mac_tx_pfc_pkt_num",
166 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
167 	{"mac_tx_pfc_pri0_pkt_num",
168 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
169 	{"mac_tx_pfc_pri1_pkt_num",
170 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
171 	{"mac_tx_pfc_pri2_pkt_num",
172 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
173 	{"mac_tx_pfc_pri3_pkt_num",
174 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
175 	{"mac_tx_pfc_pri4_pkt_num",
176 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
177 	{"mac_tx_pfc_pri5_pkt_num",
178 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
179 	{"mac_tx_pfc_pri6_pkt_num",
180 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
181 	{"mac_tx_pfc_pri7_pkt_num",
182 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
183 	{"mac_rx_pfc_pkt_num",
184 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
185 	{"mac_rx_pfc_pri0_pkt_num",
186 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
187 	{"mac_rx_pfc_pri1_pkt_num",
188 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
189 	{"mac_rx_pfc_pri2_pkt_num",
190 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
191 	{"mac_rx_pfc_pri3_pkt_num",
192 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
193 	{"mac_rx_pfc_pri4_pkt_num",
194 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
195 	{"mac_rx_pfc_pri5_pkt_num",
196 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
197 	{"mac_rx_pfc_pri6_pkt_num",
198 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
199 	{"mac_rx_pfc_pri7_pkt_num",
200 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
201 	{"mac_tx_total_pkt_num",
202 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
203 	{"mac_tx_total_oct_num",
204 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
205 	{"mac_tx_good_pkt_num",
206 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
207 	{"mac_tx_bad_pkt_num",
208 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
209 	{"mac_tx_good_oct_num",
210 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
211 	{"mac_tx_bad_oct_num",
212 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
213 	{"mac_tx_uni_pkt_num",
214 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
215 	{"mac_tx_multi_pkt_num",
216 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
217 	{"mac_tx_broad_pkt_num",
218 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
219 	{"mac_tx_undersize_pkt_num",
220 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
221 	{"mac_tx_oversize_pkt_num",
222 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
223 	{"mac_tx_64_oct_pkt_num",
224 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
225 	{"mac_tx_65_127_oct_pkt_num",
226 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
227 	{"mac_tx_128_255_oct_pkt_num",
228 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
229 	{"mac_tx_256_511_oct_pkt_num",
230 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
231 	{"mac_tx_512_1023_oct_pkt_num",
232 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
233 	{"mac_tx_1024_1518_oct_pkt_num",
234 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
235 	{"mac_tx_1519_2047_oct_pkt_num",
236 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
237 	{"mac_tx_2048_4095_oct_pkt_num",
238 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
239 	{"mac_tx_4096_8191_oct_pkt_num",
240 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
241 	{"mac_tx_8192_9216_oct_pkt_num",
242 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
243 	{"mac_tx_9217_12287_oct_pkt_num",
244 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
245 	{"mac_tx_12288_16383_oct_pkt_num",
246 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
247 	{"mac_tx_1519_max_good_pkt_num",
248 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
249 	{"mac_tx_1519_max_bad_pkt_num",
250 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
251 	{"mac_rx_total_pkt_num",
252 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
253 	{"mac_rx_total_oct_num",
254 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
255 	{"mac_rx_good_pkt_num",
256 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
257 	{"mac_rx_bad_pkt_num",
258 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
259 	{"mac_rx_good_oct_num",
260 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
261 	{"mac_rx_bad_oct_num",
262 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
263 	{"mac_rx_uni_pkt_num",
264 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
265 	{"mac_rx_multi_pkt_num",
266 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
267 	{"mac_rx_broad_pkt_num",
268 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
269 	{"mac_rx_undersize_pkt_num",
270 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
271 	{"mac_rx_oversize_pkt_num",
272 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
273 	{"mac_rx_64_oct_pkt_num",
274 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
275 	{"mac_rx_65_127_oct_pkt_num",
276 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
277 	{"mac_rx_128_255_oct_pkt_num",
278 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
279 	{"mac_rx_256_511_oct_pkt_num",
280 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
281 	{"mac_rx_512_1023_oct_pkt_num",
282 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
283 	{"mac_rx_1024_1518_oct_pkt_num",
284 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
285 	{"mac_rx_1519_2047_oct_pkt_num",
286 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
287 	{"mac_rx_2048_4095_oct_pkt_num",
288 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
289 	{"mac_rx_4096_8191_oct_pkt_num",
290 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
291 	{"mac_rx_8192_9216_oct_pkt_num",
292 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
293 	{"mac_rx_9217_12287_oct_pkt_num",
294 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
295 	{"mac_rx_12288_16383_oct_pkt_num",
296 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
297 	{"mac_rx_1519_max_good_pkt_num",
298 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
299 	{"mac_rx_1519_max_bad_pkt_num",
300 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
301 
302 	{"mac_tx_fragment_pkt_num",
303 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
304 	{"mac_tx_undermin_pkt_num",
305 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
306 	{"mac_tx_jabber_pkt_num",
307 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
308 	{"mac_tx_err_all_pkt_num",
309 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
310 	{"mac_tx_from_app_good_pkt_num",
311 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
312 	{"mac_tx_from_app_bad_pkt_num",
313 		HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
314 	{"mac_rx_fragment_pkt_num",
315 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
316 	{"mac_rx_undermin_pkt_num",
317 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
318 	{"mac_rx_jabber_pkt_num",
319 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
320 	{"mac_rx_fcs_err_pkt_num",
321 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
322 	{"mac_rx_send_app_good_pkt_num",
323 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
324 	{"mac_rx_send_app_bad_pkt_num",
325 		HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
326 };
327 
328 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
329 	{
330 		.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
331 		.ethter_type = cpu_to_le16(ETH_P_LLDP),
332 		.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
333 		.i_port_bitmap = 0x1,
334 	},
335 };
336 
337 static const u8 hclge_hash_key[] = {
338 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
339 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
340 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
341 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
342 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343 };
344 
345 static const u32 hclge_dfx_bd_offset_list[] = {
346 	HCLGE_DFX_BIOS_BD_OFFSET,
347 	HCLGE_DFX_SSU_0_BD_OFFSET,
348 	HCLGE_DFX_SSU_1_BD_OFFSET,
349 	HCLGE_DFX_IGU_BD_OFFSET,
350 	HCLGE_DFX_RPU_0_BD_OFFSET,
351 	HCLGE_DFX_RPU_1_BD_OFFSET,
352 	HCLGE_DFX_NCSI_BD_OFFSET,
353 	HCLGE_DFX_RTC_BD_OFFSET,
354 	HCLGE_DFX_PPP_BD_OFFSET,
355 	HCLGE_DFX_RCB_BD_OFFSET,
356 	HCLGE_DFX_TQP_BD_OFFSET,
357 	HCLGE_DFX_SSU_2_BD_OFFSET
358 };
359 
360 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
361 	HCLGE_OPC_DFX_BIOS_COMMON_REG,
362 	HCLGE_OPC_DFX_SSU_REG_0,
363 	HCLGE_OPC_DFX_SSU_REG_1,
364 	HCLGE_OPC_DFX_IGU_EGU_REG,
365 	HCLGE_OPC_DFX_RPU_REG_0,
366 	HCLGE_OPC_DFX_RPU_REG_1,
367 	HCLGE_OPC_DFX_NCSI_REG,
368 	HCLGE_OPC_DFX_RTC_REG,
369 	HCLGE_OPC_DFX_PPP_REG,
370 	HCLGE_OPC_DFX_RCB_REG,
371 	HCLGE_OPC_DFX_TQP_REG,
372 	HCLGE_OPC_DFX_SSU_REG_2
373 };
374 
375 static const struct key_info meta_data_key_info[] = {
376 	{ PACKET_TYPE_ID, 6},
377 	{ IP_FRAGEMENT, 1},
378 	{ ROCE_TYPE, 1},
379 	{ NEXT_KEY, 5},
380 	{ VLAN_NUMBER, 2},
381 	{ SRC_VPORT, 12},
382 	{ DST_VPORT, 12},
383 	{ TUNNEL_PACKET, 1},
384 };
385 
386 static const struct key_info tuple_key_info[] = {
387 	{ OUTER_DST_MAC, 48},
388 	{ OUTER_SRC_MAC, 48},
389 	{ OUTER_VLAN_TAG_FST, 16},
390 	{ OUTER_VLAN_TAG_SEC, 16},
391 	{ OUTER_ETH_TYPE, 16},
392 	{ OUTER_L2_RSV, 16},
393 	{ OUTER_IP_TOS, 8},
394 	{ OUTER_IP_PROTO, 8},
395 	{ OUTER_SRC_IP, 32},
396 	{ OUTER_DST_IP, 32},
397 	{ OUTER_L3_RSV, 16},
398 	{ OUTER_SRC_PORT, 16},
399 	{ OUTER_DST_PORT, 16},
400 	{ OUTER_L4_RSV, 32},
401 	{ OUTER_TUN_VNI, 24},
402 	{ OUTER_TUN_FLOW_ID, 8},
403 	{ INNER_DST_MAC, 48},
404 	{ INNER_SRC_MAC, 48},
405 	{ INNER_VLAN_TAG_FST, 16},
406 	{ INNER_VLAN_TAG_SEC, 16},
407 	{ INNER_ETH_TYPE, 16},
408 	{ INNER_L2_RSV, 16},
409 	{ INNER_IP_TOS, 8},
410 	{ INNER_IP_PROTO, 8},
411 	{ INNER_SRC_IP, 32},
412 	{ INNER_DST_IP, 32},
413 	{ INNER_L3_RSV, 16},
414 	{ INNER_SRC_PORT, 16},
415 	{ INNER_DST_PORT, 16},
416 	{ INNER_L4_RSV, 32},
417 };
418 
419 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
420 {
421 #define HCLGE_MAC_CMD_NUM 21
422 
423 	u64 *data = (u64 *)(&hdev->mac_stats);
424 	struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
425 	__le64 *desc_data;
426 	int i, k, n;
427 	int ret;
428 
429 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
430 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
431 	if (ret) {
432 		dev_err(&hdev->pdev->dev,
433 			"Get MAC pkt stats fail, status = %d.\n", ret);
434 
435 		return ret;
436 	}
437 
438 	for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
439 		/* for special opcode 0032, only the first desc has the head */
440 		if (unlikely(i == 0)) {
441 			desc_data = (__le64 *)(&desc[i].data[0]);
442 			n = HCLGE_RD_FIRST_STATS_NUM;
443 		} else {
444 			desc_data = (__le64 *)(&desc[i]);
445 			n = HCLGE_RD_OTHER_STATS_NUM;
446 		}
447 
448 		for (k = 0; k < n; k++) {
449 			*data += le64_to_cpu(*desc_data);
450 			data++;
451 			desc_data++;
452 		}
453 	}
454 
455 	return 0;
456 }
457 
458 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
459 {
460 	u64 *data = (u64 *)(&hdev->mac_stats);
461 	struct hclge_desc *desc;
462 	__le64 *desc_data;
463 	u16 i, k, n;
464 	int ret;
465 
466 	/* This may be called inside atomic sections,
467 	 * so GFP_ATOMIC is more suitalbe here
468 	 */
469 	desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
470 	if (!desc)
471 		return -ENOMEM;
472 
473 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
474 	ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
475 	if (ret) {
476 		kfree(desc);
477 		return ret;
478 	}
479 
480 	for (i = 0; i < desc_num; i++) {
481 		/* for special opcode 0034, only the first desc has the head */
482 		if (i == 0) {
483 			desc_data = (__le64 *)(&desc[i].data[0]);
484 			n = HCLGE_RD_FIRST_STATS_NUM;
485 		} else {
486 			desc_data = (__le64 *)(&desc[i]);
487 			n = HCLGE_RD_OTHER_STATS_NUM;
488 		}
489 
490 		for (k = 0; k < n; k++) {
491 			*data += le64_to_cpu(*desc_data);
492 			data++;
493 			desc_data++;
494 		}
495 	}
496 
497 	kfree(desc);
498 
499 	return 0;
500 }
501 
502 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
503 {
504 	struct hclge_desc desc;
505 	__le32 *desc_data;
506 	u32 reg_num;
507 	int ret;
508 
509 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
510 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
511 	if (ret)
512 		return ret;
513 
514 	desc_data = (__le32 *)(&desc.data[0]);
515 	reg_num = le32_to_cpu(*desc_data);
516 
517 	*desc_num = 1 + ((reg_num - 3) >> 2) +
518 		    (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
519 
520 	return 0;
521 }
522 
523 static int hclge_mac_update_stats(struct hclge_dev *hdev)
524 {
525 	u32 desc_num;
526 	int ret;
527 
528 	ret = hclge_mac_query_reg_num(hdev, &desc_num);
529 
530 	/* The firmware supports the new statistics acquisition method */
531 	if (!ret)
532 		ret = hclge_mac_update_stats_complete(hdev, desc_num);
533 	else if (ret == -EOPNOTSUPP)
534 		ret = hclge_mac_update_stats_defective(hdev);
535 	else
536 		dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
537 
538 	return ret;
539 }
540 
541 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
542 {
543 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
544 	struct hclge_vport *vport = hclge_get_vport(handle);
545 	struct hclge_dev *hdev = vport->back;
546 	struct hnae3_queue *queue;
547 	struct hclge_desc desc[1];
548 	struct hclge_tqp *tqp;
549 	int ret, i;
550 
551 	for (i = 0; i < kinfo->num_tqps; i++) {
552 		queue = handle->kinfo.tqp[i];
553 		tqp = container_of(queue, struct hclge_tqp, q);
554 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
555 		hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
556 					   true);
557 
558 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
559 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
560 		if (ret) {
561 			dev_err(&hdev->pdev->dev,
562 				"Query tqp stat fail, status = %d,queue = %d\n",
563 				ret, i);
564 			return ret;
565 		}
566 		tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
567 			le32_to_cpu(desc[0].data[1]);
568 	}
569 
570 	for (i = 0; i < kinfo->num_tqps; i++) {
571 		queue = handle->kinfo.tqp[i];
572 		tqp = container_of(queue, struct hclge_tqp, q);
573 		/* command : HCLGE_OPC_QUERY_IGU_STAT */
574 		hclge_cmd_setup_basic_desc(&desc[0],
575 					   HCLGE_OPC_QUERY_TX_STATS,
576 					   true);
577 
578 		desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
579 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
580 		if (ret) {
581 			dev_err(&hdev->pdev->dev,
582 				"Query tqp stat fail, status = %d,queue = %d\n",
583 				ret, i);
584 			return ret;
585 		}
586 		tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
587 			le32_to_cpu(desc[0].data[1]);
588 	}
589 
590 	return 0;
591 }
592 
593 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
594 {
595 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
596 	struct hclge_tqp *tqp;
597 	u64 *buff = data;
598 	int i;
599 
600 	for (i = 0; i < kinfo->num_tqps; i++) {
601 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
602 		*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
603 	}
604 
605 	for (i = 0; i < kinfo->num_tqps; i++) {
606 		tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
607 		*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
608 	}
609 
610 	return buff;
611 }
612 
613 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
614 {
615 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
616 
617 	/* each tqp has TX & RX two queues */
618 	return kinfo->num_tqps * (2);
619 }
620 
621 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
622 {
623 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
624 	u8 *buff = data;
625 	int i = 0;
626 
627 	for (i = 0; i < kinfo->num_tqps; i++) {
628 		struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
629 			struct hclge_tqp, q);
630 		snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
631 			 tqp->index);
632 		buff = buff + ETH_GSTRING_LEN;
633 	}
634 
635 	for (i = 0; i < kinfo->num_tqps; i++) {
636 		struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
637 			struct hclge_tqp, q);
638 		snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
639 			 tqp->index);
640 		buff = buff + ETH_GSTRING_LEN;
641 	}
642 
643 	return buff;
644 }
645 
646 static u64 *hclge_comm_get_stats(const void *comm_stats,
647 				 const struct hclge_comm_stats_str strs[],
648 				 int size, u64 *data)
649 {
650 	u64 *buf = data;
651 	u32 i;
652 
653 	for (i = 0; i < size; i++)
654 		buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
655 
656 	return buf + size;
657 }
658 
659 static u8 *hclge_comm_get_strings(u32 stringset,
660 				  const struct hclge_comm_stats_str strs[],
661 				  int size, u8 *data)
662 {
663 	char *buff = (char *)data;
664 	u32 i;
665 
666 	if (stringset != ETH_SS_STATS)
667 		return buff;
668 
669 	for (i = 0; i < size; i++) {
670 		snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
671 		buff = buff + ETH_GSTRING_LEN;
672 	}
673 
674 	return (u8 *)buff;
675 }
676 
677 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
678 {
679 	struct hnae3_handle *handle;
680 	int status;
681 
682 	handle = &hdev->vport[0].nic;
683 	if (handle->client) {
684 		status = hclge_tqps_update_stats(handle);
685 		if (status) {
686 			dev_err(&hdev->pdev->dev,
687 				"Update TQPS stats fail, status = %d.\n",
688 				status);
689 		}
690 	}
691 
692 	status = hclge_mac_update_stats(hdev);
693 	if (status)
694 		dev_err(&hdev->pdev->dev,
695 			"Update MAC stats fail, status = %d.\n", status);
696 }
697 
698 static void hclge_update_stats(struct hnae3_handle *handle,
699 			       struct net_device_stats *net_stats)
700 {
701 	struct hclge_vport *vport = hclge_get_vport(handle);
702 	struct hclge_dev *hdev = vport->back;
703 	int status;
704 
705 	if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706 		return;
707 
708 	status = hclge_mac_update_stats(hdev);
709 	if (status)
710 		dev_err(&hdev->pdev->dev,
711 			"Update MAC stats fail, status = %d.\n",
712 			status);
713 
714 	status = hclge_tqps_update_stats(handle);
715 	if (status)
716 		dev_err(&hdev->pdev->dev,
717 			"Update TQPS stats fail, status = %d.\n",
718 			status);
719 
720 	clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
721 }
722 
723 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
724 {
725 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
726 		HNAE3_SUPPORT_PHY_LOOPBACK |\
727 		HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
728 		HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
729 
730 	struct hclge_vport *vport = hclge_get_vport(handle);
731 	struct hclge_dev *hdev = vport->back;
732 	int count = 0;
733 
734 	/* Loopback test support rules:
735 	 * mac: only GE mode support
736 	 * serdes: all mac mode will support include GE/XGE/LGE/CGE
737 	 * phy: only support when phy device exist on board
738 	 */
739 	if (stringset == ETH_SS_TEST) {
740 		/* clear loopback bit flags at first */
741 		handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
742 		if (hdev->pdev->revision >= 0x21 ||
743 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
744 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
745 		    hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
746 			count += 1;
747 			handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
748 		}
749 
750 		count += 2;
751 		handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
752 		handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
753 
754 		if (hdev->hw.mac.phydev) {
755 			count += 1;
756 			handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
757 		}
758 
759 	} else if (stringset == ETH_SS_STATS) {
760 		count = ARRAY_SIZE(g_mac_stats_string) +
761 			hclge_tqps_get_sset_count(handle, stringset);
762 	}
763 
764 	return count;
765 }
766 
767 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
768 			      u8 *data)
769 {
770 	u8 *p = (char *)data;
771 	int size;
772 
773 	if (stringset == ETH_SS_STATS) {
774 		size = ARRAY_SIZE(g_mac_stats_string);
775 		p = hclge_comm_get_strings(stringset, g_mac_stats_string,
776 					   size, p);
777 		p = hclge_tqps_get_strings(handle, p);
778 	} else if (stringset == ETH_SS_TEST) {
779 		if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
780 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
781 			       ETH_GSTRING_LEN);
782 			p += ETH_GSTRING_LEN;
783 		}
784 		if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
785 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
786 			       ETH_GSTRING_LEN);
787 			p += ETH_GSTRING_LEN;
788 		}
789 		if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
790 			memcpy(p,
791 			       hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
792 			       ETH_GSTRING_LEN);
793 			p += ETH_GSTRING_LEN;
794 		}
795 		if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
796 			memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
797 			       ETH_GSTRING_LEN);
798 			p += ETH_GSTRING_LEN;
799 		}
800 	}
801 }
802 
803 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
804 {
805 	struct hclge_vport *vport = hclge_get_vport(handle);
806 	struct hclge_dev *hdev = vport->back;
807 	u64 *p;
808 
809 	p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
810 				 ARRAY_SIZE(g_mac_stats_string), data);
811 	p = hclge_tqps_get_stats(handle, p);
812 }
813 
814 static void hclge_get_mac_stat(struct hnae3_handle *handle,
815 			       struct hns3_mac_stats *mac_stats)
816 {
817 	struct hclge_vport *vport = hclge_get_vport(handle);
818 	struct hclge_dev *hdev = vport->back;
819 
820 	hclge_update_stats(handle, NULL);
821 
822 	mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
823 	mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
824 }
825 
826 static int hclge_parse_func_status(struct hclge_dev *hdev,
827 				   struct hclge_func_status_cmd *status)
828 {
829 #define HCLGE_MAC_ID_MASK	0xF
830 
831 	if (!(status->pf_state & HCLGE_PF_STATE_DONE))
832 		return -EINVAL;
833 
834 	/* Set the pf to main pf */
835 	if (status->pf_state & HCLGE_PF_STATE_MAIN)
836 		hdev->flag |= HCLGE_FLAG_MAIN;
837 	else
838 		hdev->flag &= ~HCLGE_FLAG_MAIN;
839 
840 	hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
841 	return 0;
842 }
843 
844 static int hclge_query_function_status(struct hclge_dev *hdev)
845 {
846 #define HCLGE_QUERY_MAX_CNT	5
847 
848 	struct hclge_func_status_cmd *req;
849 	struct hclge_desc desc;
850 	int timeout = 0;
851 	int ret;
852 
853 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
854 	req = (struct hclge_func_status_cmd *)desc.data;
855 
856 	do {
857 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
858 		if (ret) {
859 			dev_err(&hdev->pdev->dev,
860 				"query function status failed %d.\n", ret);
861 			return ret;
862 		}
863 
864 		/* Check pf reset is done */
865 		if (req->pf_state)
866 			break;
867 		usleep_range(1000, 2000);
868 	} while (timeout++ < HCLGE_QUERY_MAX_CNT);
869 
870 	return hclge_parse_func_status(hdev, req);
871 }
872 
873 static int hclge_query_pf_resource(struct hclge_dev *hdev)
874 {
875 	struct hclge_pf_res_cmd *req;
876 	struct hclge_desc desc;
877 	int ret;
878 
879 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
880 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
881 	if (ret) {
882 		dev_err(&hdev->pdev->dev,
883 			"query pf resource failed %d.\n", ret);
884 		return ret;
885 	}
886 
887 	req = (struct hclge_pf_res_cmd *)desc.data;
888 	hdev->num_tqps = le16_to_cpu(req->tqp_num);
889 	hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
890 
891 	if (req->tx_buf_size)
892 		hdev->tx_buf_size =
893 			le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
894 	else
895 		hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
896 
897 	hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
898 
899 	if (req->dv_buf_size)
900 		hdev->dv_buf_size =
901 			le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
902 	else
903 		hdev->dv_buf_size = HCLGE_DEFAULT_DV;
904 
905 	hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
906 
907 	if (hnae3_dev_roce_supported(hdev)) {
908 		hdev->roce_base_msix_offset =
909 		hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
910 				HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
911 		hdev->num_roce_msi =
912 		hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
913 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
914 
915 		/* nic's msix numbers is always equals to the roce's. */
916 		hdev->num_nic_msi = hdev->num_roce_msi;
917 
918 		/* PF should have NIC vectors and Roce vectors,
919 		 * NIC vectors are queued before Roce vectors.
920 		 */
921 		hdev->num_msi = hdev->num_roce_msi +
922 				hdev->roce_base_msix_offset;
923 	} else {
924 		hdev->num_msi =
925 		hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
926 				HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
927 
928 		hdev->num_nic_msi = hdev->num_msi;
929 	}
930 
931 	if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
932 		dev_err(&hdev->pdev->dev,
933 			"Just %u msi resources, not enough for pf(min:2).\n",
934 			hdev->num_nic_msi);
935 		return -EINVAL;
936 	}
937 
938 	return 0;
939 }
940 
941 static int hclge_parse_speed(int speed_cmd, int *speed)
942 {
943 	switch (speed_cmd) {
944 	case 6:
945 		*speed = HCLGE_MAC_SPEED_10M;
946 		break;
947 	case 7:
948 		*speed = HCLGE_MAC_SPEED_100M;
949 		break;
950 	case 0:
951 		*speed = HCLGE_MAC_SPEED_1G;
952 		break;
953 	case 1:
954 		*speed = HCLGE_MAC_SPEED_10G;
955 		break;
956 	case 2:
957 		*speed = HCLGE_MAC_SPEED_25G;
958 		break;
959 	case 3:
960 		*speed = HCLGE_MAC_SPEED_40G;
961 		break;
962 	case 4:
963 		*speed = HCLGE_MAC_SPEED_50G;
964 		break;
965 	case 5:
966 		*speed = HCLGE_MAC_SPEED_100G;
967 		break;
968 	default:
969 		return -EINVAL;
970 	}
971 
972 	return 0;
973 }
974 
975 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
976 {
977 	struct hclge_vport *vport = hclge_get_vport(handle);
978 	struct hclge_dev *hdev = vport->back;
979 	u32 speed_ability = hdev->hw.mac.speed_ability;
980 	u32 speed_bit = 0;
981 
982 	switch (speed) {
983 	case HCLGE_MAC_SPEED_10M:
984 		speed_bit = HCLGE_SUPPORT_10M_BIT;
985 		break;
986 	case HCLGE_MAC_SPEED_100M:
987 		speed_bit = HCLGE_SUPPORT_100M_BIT;
988 		break;
989 	case HCLGE_MAC_SPEED_1G:
990 		speed_bit = HCLGE_SUPPORT_1G_BIT;
991 		break;
992 	case HCLGE_MAC_SPEED_10G:
993 		speed_bit = HCLGE_SUPPORT_10G_BIT;
994 		break;
995 	case HCLGE_MAC_SPEED_25G:
996 		speed_bit = HCLGE_SUPPORT_25G_BIT;
997 		break;
998 	case HCLGE_MAC_SPEED_40G:
999 		speed_bit = HCLGE_SUPPORT_40G_BIT;
1000 		break;
1001 	case HCLGE_MAC_SPEED_50G:
1002 		speed_bit = HCLGE_SUPPORT_50G_BIT;
1003 		break;
1004 	case HCLGE_MAC_SPEED_100G:
1005 		speed_bit = HCLGE_SUPPORT_100G_BIT;
1006 		break;
1007 	default:
1008 		return -EINVAL;
1009 	}
1010 
1011 	if (speed_bit & speed_ability)
1012 		return 0;
1013 
1014 	return -EINVAL;
1015 }
1016 
1017 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1018 {
1019 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1020 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1021 				 mac->supported);
1022 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1023 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1024 				 mac->supported);
1025 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1026 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1027 				 mac->supported);
1028 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1029 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1030 				 mac->supported);
1031 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1032 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1033 				 mac->supported);
1034 }
1035 
1036 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1037 {
1038 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1039 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1040 				 mac->supported);
1041 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1042 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1043 				 mac->supported);
1044 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1045 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1046 				 mac->supported);
1047 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1048 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1049 				 mac->supported);
1050 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1051 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1052 				 mac->supported);
1053 }
1054 
1055 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1056 {
1057 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1058 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1059 				 mac->supported);
1060 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1061 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1062 				 mac->supported);
1063 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1064 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1065 				 mac->supported);
1066 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1067 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1068 				 mac->supported);
1069 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1070 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1071 				 mac->supported);
1072 }
1073 
1074 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1075 {
1076 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1077 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1078 				 mac->supported);
1079 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1080 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1081 				 mac->supported);
1082 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1083 		linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1084 				 mac->supported);
1085 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1086 		linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1087 				 mac->supported);
1088 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1089 		linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1090 				 mac->supported);
1091 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1092 		linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1093 				 mac->supported);
1094 }
1095 
1096 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1097 {
1098 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1099 	linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1100 
1101 	switch (mac->speed) {
1102 	case HCLGE_MAC_SPEED_10G:
1103 	case HCLGE_MAC_SPEED_40G:
1104 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1105 				 mac->supported);
1106 		mac->fec_ability =
1107 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1108 		break;
1109 	case HCLGE_MAC_SPEED_25G:
1110 	case HCLGE_MAC_SPEED_50G:
1111 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1112 				 mac->supported);
1113 		mac->fec_ability =
1114 			BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1115 			BIT(HNAE3_FEC_AUTO);
1116 		break;
1117 	case HCLGE_MAC_SPEED_100G:
1118 		linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1119 		mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1120 		break;
1121 	default:
1122 		mac->fec_ability = 0;
1123 		break;
1124 	}
1125 }
1126 
1127 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1128 					u8 speed_ability)
1129 {
1130 	struct hclge_mac *mac = &hdev->hw.mac;
1131 
1132 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1133 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1134 				 mac->supported);
1135 
1136 	hclge_convert_setting_sr(mac, speed_ability);
1137 	hclge_convert_setting_lr(mac, speed_ability);
1138 	hclge_convert_setting_cr(mac, speed_ability);
1139 	if (hdev->pdev->revision >= 0x21)
1140 		hclge_convert_setting_fec(mac);
1141 
1142 	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1143 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1144 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1145 }
1146 
1147 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1148 					    u8 speed_ability)
1149 {
1150 	struct hclge_mac *mac = &hdev->hw.mac;
1151 
1152 	hclge_convert_setting_kr(mac, speed_ability);
1153 	if (hdev->pdev->revision >= 0x21)
1154 		hclge_convert_setting_fec(mac);
1155 	linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1156 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1157 	linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1158 }
1159 
1160 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1161 					 u8 speed_ability)
1162 {
1163 	unsigned long *supported = hdev->hw.mac.supported;
1164 
1165 	/* default to support all speed for GE port */
1166 	if (!speed_ability)
1167 		speed_ability = HCLGE_SUPPORT_GE;
1168 
1169 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1171 				 supported);
1172 
1173 	if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1174 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1175 				 supported);
1176 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1177 				 supported);
1178 	}
1179 
1180 	if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1181 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1182 		linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1183 	}
1184 
1185 	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1186 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1187 	linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1188 	linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1189 }
1190 
1191 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1192 {
1193 	u8 media_type = hdev->hw.mac.media_type;
1194 
1195 	if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1196 		hclge_parse_fiber_link_mode(hdev, speed_ability);
1197 	else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1198 		hclge_parse_copper_link_mode(hdev, speed_ability);
1199 	else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1200 		hclge_parse_backplane_link_mode(hdev, speed_ability);
1201 }
1202 
1203 static u32 hclge_get_max_speed(u8 speed_ability)
1204 {
1205 	if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1206 		return HCLGE_MAC_SPEED_100G;
1207 
1208 	if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1209 		return HCLGE_MAC_SPEED_50G;
1210 
1211 	if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1212 		return HCLGE_MAC_SPEED_40G;
1213 
1214 	if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1215 		return HCLGE_MAC_SPEED_25G;
1216 
1217 	if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1218 		return HCLGE_MAC_SPEED_10G;
1219 
1220 	if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1221 		return HCLGE_MAC_SPEED_1G;
1222 
1223 	if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1224 		return HCLGE_MAC_SPEED_100M;
1225 
1226 	if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1227 		return HCLGE_MAC_SPEED_10M;
1228 
1229 	return HCLGE_MAC_SPEED_1G;
1230 }
1231 
1232 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1233 {
1234 	struct hclge_cfg_param_cmd *req;
1235 	u64 mac_addr_tmp_high;
1236 	u64 mac_addr_tmp;
1237 	unsigned int i;
1238 
1239 	req = (struct hclge_cfg_param_cmd *)desc[0].data;
1240 
1241 	/* get the configuration */
1242 	cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243 					      HCLGE_CFG_VMDQ_M,
1244 					      HCLGE_CFG_VMDQ_S);
1245 	cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1246 				      HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1247 	cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1248 					    HCLGE_CFG_TQP_DESC_N_M,
1249 					    HCLGE_CFG_TQP_DESC_N_S);
1250 
1251 	cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1252 					HCLGE_CFG_PHY_ADDR_M,
1253 					HCLGE_CFG_PHY_ADDR_S);
1254 	cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1255 					  HCLGE_CFG_MEDIA_TP_M,
1256 					  HCLGE_CFG_MEDIA_TP_S);
1257 	cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1258 					  HCLGE_CFG_RX_BUF_LEN_M,
1259 					  HCLGE_CFG_RX_BUF_LEN_S);
1260 	/* get mac_address */
1261 	mac_addr_tmp = __le32_to_cpu(req->param[2]);
1262 	mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1263 					    HCLGE_CFG_MAC_ADDR_H_M,
1264 					    HCLGE_CFG_MAC_ADDR_H_S);
1265 
1266 	mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1267 
1268 	cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1269 					     HCLGE_CFG_DEFAULT_SPEED_M,
1270 					     HCLGE_CFG_DEFAULT_SPEED_S);
1271 	cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1272 					    HCLGE_CFG_RSS_SIZE_M,
1273 					    HCLGE_CFG_RSS_SIZE_S);
1274 
1275 	for (i = 0; i < ETH_ALEN; i++)
1276 		cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1277 
1278 	req = (struct hclge_cfg_param_cmd *)desc[1].data;
1279 	cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1280 
1281 	cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282 					     HCLGE_CFG_SPEED_ABILITY_M,
1283 					     HCLGE_CFG_SPEED_ABILITY_S);
1284 	cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1285 					 HCLGE_CFG_UMV_TBL_SPACE_M,
1286 					 HCLGE_CFG_UMV_TBL_SPACE_S);
1287 	if (!cfg->umv_space)
1288 		cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1289 }
1290 
1291 /* hclge_get_cfg: query the static parameter from flash
1292  * @hdev: pointer to struct hclge_dev
1293  * @hcfg: the config structure to be getted
1294  */
1295 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1296 {
1297 	struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1298 	struct hclge_cfg_param_cmd *req;
1299 	unsigned int i;
1300 	int ret;
1301 
1302 	for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1303 		u32 offset = 0;
1304 
1305 		req = (struct hclge_cfg_param_cmd *)desc[i].data;
1306 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1307 					   true);
1308 		hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1309 				HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1310 		/* Len should be united by 4 bytes when send to hardware */
1311 		hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1312 				HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1313 		req->offset = cpu_to_le32(offset);
1314 	}
1315 
1316 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1317 	if (ret) {
1318 		dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1319 		return ret;
1320 	}
1321 
1322 	hclge_parse_cfg(hcfg, desc);
1323 
1324 	return 0;
1325 }
1326 
1327 static int hclge_get_cap(struct hclge_dev *hdev)
1328 {
1329 	int ret;
1330 
1331 	ret = hclge_query_function_status(hdev);
1332 	if (ret) {
1333 		dev_err(&hdev->pdev->dev,
1334 			"query function status error %d.\n", ret);
1335 		return ret;
1336 	}
1337 
1338 	/* get pf resource */
1339 	return hclge_query_pf_resource(hdev);
1340 }
1341 
1342 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1343 {
1344 #define HCLGE_MIN_TX_DESC	64
1345 #define HCLGE_MIN_RX_DESC	64
1346 
1347 	if (!is_kdump_kernel())
1348 		return;
1349 
1350 	dev_info(&hdev->pdev->dev,
1351 		 "Running kdump kernel. Using minimal resources\n");
1352 
1353 	/* minimal queue pairs equals to the number of vports */
1354 	hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1355 	hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1356 	hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1357 }
1358 
1359 static int hclge_configure(struct hclge_dev *hdev)
1360 {
1361 	struct hclge_cfg cfg;
1362 	unsigned int i;
1363 	int ret;
1364 
1365 	ret = hclge_get_cfg(hdev, &cfg);
1366 	if (ret)
1367 		return ret;
1368 
1369 	hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1370 	hdev->base_tqp_pid = 0;
1371 	hdev->rss_size_max = cfg.rss_size_max;
1372 	hdev->rx_buf_len = cfg.rx_buf_len;
1373 	ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1374 	hdev->hw.mac.media_type = cfg.media_type;
1375 	hdev->hw.mac.phy_addr = cfg.phy_addr;
1376 	hdev->num_tx_desc = cfg.tqp_desc_num;
1377 	hdev->num_rx_desc = cfg.tqp_desc_num;
1378 	hdev->tm_info.num_pg = 1;
1379 	hdev->tc_max = cfg.tc_num;
1380 	hdev->tm_info.hw_pfc_map = 0;
1381 	hdev->wanted_umv_size = cfg.umv_space;
1382 
1383 	if (hnae3_dev_fd_supported(hdev)) {
1384 		hdev->fd_en = true;
1385 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1386 	}
1387 
1388 	ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1389 	if (ret) {
1390 		dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1391 			cfg.default_speed, ret);
1392 		return ret;
1393 	}
1394 
1395 	hclge_parse_link_mode(hdev, cfg.speed_ability);
1396 
1397 	hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1398 
1399 	if ((hdev->tc_max > HNAE3_MAX_TC) ||
1400 	    (hdev->tc_max < 1)) {
1401 		dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1402 			 hdev->tc_max);
1403 		hdev->tc_max = 1;
1404 	}
1405 
1406 	/* Dev does not support DCB */
1407 	if (!hnae3_dev_dcb_supported(hdev)) {
1408 		hdev->tc_max = 1;
1409 		hdev->pfc_max = 0;
1410 	} else {
1411 		hdev->pfc_max = hdev->tc_max;
1412 	}
1413 
1414 	hdev->tm_info.num_tc = 1;
1415 
1416 	/* Currently not support uncontiuous tc */
1417 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1418 		hnae3_set_bit(hdev->hw_tc_map, i, 1);
1419 
1420 	hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1421 
1422 	hclge_init_kdump_kernel_config(hdev);
1423 
1424 	/* Set the init affinity based on pci func number */
1425 	i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1426 	i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1427 	cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1428 			&hdev->affinity_mask);
1429 
1430 	return ret;
1431 }
1432 
1433 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1434 			    u16 tso_mss_max)
1435 {
1436 	struct hclge_cfg_tso_status_cmd *req;
1437 	struct hclge_desc desc;
1438 
1439 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1440 
1441 	req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1442 	req->tso_mss_min = cpu_to_le16(tso_mss_min);
1443 	req->tso_mss_max = cpu_to_le16(tso_mss_max);
1444 
1445 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1446 }
1447 
1448 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1449 {
1450 	struct hclge_cfg_gro_status_cmd *req;
1451 	struct hclge_desc desc;
1452 	int ret;
1453 
1454 	if (!hnae3_dev_gro_supported(hdev))
1455 		return 0;
1456 
1457 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1458 	req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1459 
1460 	req->gro_en = en ? 1 : 0;
1461 
1462 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1463 	if (ret)
1464 		dev_err(&hdev->pdev->dev,
1465 			"GRO hardware config cmd failed, ret = %d\n", ret);
1466 
1467 	return ret;
1468 }
1469 
1470 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1471 {
1472 	struct hclge_tqp *tqp;
1473 	int i;
1474 
1475 	hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1476 				  sizeof(struct hclge_tqp), GFP_KERNEL);
1477 	if (!hdev->htqp)
1478 		return -ENOMEM;
1479 
1480 	tqp = hdev->htqp;
1481 
1482 	for (i = 0; i < hdev->num_tqps; i++) {
1483 		tqp->dev = &hdev->pdev->dev;
1484 		tqp->index = i;
1485 
1486 		tqp->q.ae_algo = &ae_algo;
1487 		tqp->q.buf_size = hdev->rx_buf_len;
1488 		tqp->q.tx_desc_num = hdev->num_tx_desc;
1489 		tqp->q.rx_desc_num = hdev->num_rx_desc;
1490 		tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1491 			i * HCLGE_TQP_REG_SIZE;
1492 
1493 		tqp++;
1494 	}
1495 
1496 	return 0;
1497 }
1498 
1499 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1500 				  u16 tqp_pid, u16 tqp_vid, bool is_pf)
1501 {
1502 	struct hclge_tqp_map_cmd *req;
1503 	struct hclge_desc desc;
1504 	int ret;
1505 
1506 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1507 
1508 	req = (struct hclge_tqp_map_cmd *)desc.data;
1509 	req->tqp_id = cpu_to_le16(tqp_pid);
1510 	req->tqp_vf = func_id;
1511 	req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1512 	if (!is_pf)
1513 		req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1514 	req->tqp_vid = cpu_to_le16(tqp_vid);
1515 
1516 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1517 	if (ret)
1518 		dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1519 
1520 	return ret;
1521 }
1522 
1523 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1524 {
1525 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1526 	struct hclge_dev *hdev = vport->back;
1527 	int i, alloced;
1528 
1529 	for (i = 0, alloced = 0; i < hdev->num_tqps &&
1530 	     alloced < num_tqps; i++) {
1531 		if (!hdev->htqp[i].alloced) {
1532 			hdev->htqp[i].q.handle = &vport->nic;
1533 			hdev->htqp[i].q.tqp_index = alloced;
1534 			hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1535 			hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1536 			kinfo->tqp[alloced] = &hdev->htqp[i].q;
1537 			hdev->htqp[i].alloced = true;
1538 			alloced++;
1539 		}
1540 	}
1541 	vport->alloc_tqps = alloced;
1542 	kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1543 				vport->alloc_tqps / hdev->tm_info.num_tc);
1544 
1545 	/* ensure one to one mapping between irq and queue at default */
1546 	kinfo->rss_size = min_t(u16, kinfo->rss_size,
1547 				(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1548 
1549 	return 0;
1550 }
1551 
1552 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1553 			    u16 num_tx_desc, u16 num_rx_desc)
1554 
1555 {
1556 	struct hnae3_handle *nic = &vport->nic;
1557 	struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1558 	struct hclge_dev *hdev = vport->back;
1559 	int ret;
1560 
1561 	kinfo->num_tx_desc = num_tx_desc;
1562 	kinfo->num_rx_desc = num_rx_desc;
1563 
1564 	kinfo->rx_buf_len = hdev->rx_buf_len;
1565 
1566 	kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1567 				  sizeof(struct hnae3_queue *), GFP_KERNEL);
1568 	if (!kinfo->tqp)
1569 		return -ENOMEM;
1570 
1571 	ret = hclge_assign_tqp(vport, num_tqps);
1572 	if (ret)
1573 		dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1574 
1575 	return ret;
1576 }
1577 
1578 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1579 				  struct hclge_vport *vport)
1580 {
1581 	struct hnae3_handle *nic = &vport->nic;
1582 	struct hnae3_knic_private_info *kinfo;
1583 	u16 i;
1584 
1585 	kinfo = &nic->kinfo;
1586 	for (i = 0; i < vport->alloc_tqps; i++) {
1587 		struct hclge_tqp *q =
1588 			container_of(kinfo->tqp[i], struct hclge_tqp, q);
1589 		bool is_pf;
1590 		int ret;
1591 
1592 		is_pf = !(vport->vport_id);
1593 		ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1594 					     i, is_pf);
1595 		if (ret)
1596 			return ret;
1597 	}
1598 
1599 	return 0;
1600 }
1601 
1602 static int hclge_map_tqp(struct hclge_dev *hdev)
1603 {
1604 	struct hclge_vport *vport = hdev->vport;
1605 	u16 i, num_vport;
1606 
1607 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1608 	for (i = 0; i < num_vport; i++)	{
1609 		int ret;
1610 
1611 		ret = hclge_map_tqp_to_vport(hdev, vport);
1612 		if (ret)
1613 			return ret;
1614 
1615 		vport++;
1616 	}
1617 
1618 	return 0;
1619 }
1620 
1621 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1622 {
1623 	struct hnae3_handle *nic = &vport->nic;
1624 	struct hclge_dev *hdev = vport->back;
1625 	int ret;
1626 
1627 	nic->pdev = hdev->pdev;
1628 	nic->ae_algo = &ae_algo;
1629 	nic->numa_node_mask = hdev->numa_node_mask;
1630 
1631 	ret = hclge_knic_setup(vport, num_tqps,
1632 			       hdev->num_tx_desc, hdev->num_rx_desc);
1633 	if (ret)
1634 		dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1635 
1636 	return ret;
1637 }
1638 
1639 static int hclge_alloc_vport(struct hclge_dev *hdev)
1640 {
1641 	struct pci_dev *pdev = hdev->pdev;
1642 	struct hclge_vport *vport;
1643 	u32 tqp_main_vport;
1644 	u32 tqp_per_vport;
1645 	int num_vport, i;
1646 	int ret;
1647 
1648 	/* We need to alloc a vport for main NIC of PF */
1649 	num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1650 
1651 	if (hdev->num_tqps < num_vport) {
1652 		dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1653 			hdev->num_tqps, num_vport);
1654 		return -EINVAL;
1655 	}
1656 
1657 	/* Alloc the same number of TQPs for every vport */
1658 	tqp_per_vport = hdev->num_tqps / num_vport;
1659 	tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1660 
1661 	vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1662 			     GFP_KERNEL);
1663 	if (!vport)
1664 		return -ENOMEM;
1665 
1666 	hdev->vport = vport;
1667 	hdev->num_alloc_vport = num_vport;
1668 
1669 	if (IS_ENABLED(CONFIG_PCI_IOV))
1670 		hdev->num_alloc_vfs = hdev->num_req_vfs;
1671 
1672 	for (i = 0; i < num_vport; i++) {
1673 		vport->back = hdev;
1674 		vport->vport_id = i;
1675 		vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1676 		vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1677 		vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1678 		vport->rxvlan_cfg.rx_vlan_offload_en = true;
1679 		INIT_LIST_HEAD(&vport->vlan_list);
1680 		INIT_LIST_HEAD(&vport->uc_mac_list);
1681 		INIT_LIST_HEAD(&vport->mc_mac_list);
1682 		spin_lock_init(&vport->mac_list_lock);
1683 
1684 		if (i == 0)
1685 			ret = hclge_vport_setup(vport, tqp_main_vport);
1686 		else
1687 			ret = hclge_vport_setup(vport, tqp_per_vport);
1688 		if (ret) {
1689 			dev_err(&pdev->dev,
1690 				"vport setup failed for vport %d, %d\n",
1691 				i, ret);
1692 			return ret;
1693 		}
1694 
1695 		vport++;
1696 	}
1697 
1698 	return 0;
1699 }
1700 
1701 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1702 				    struct hclge_pkt_buf_alloc *buf_alloc)
1703 {
1704 /* TX buffer size is unit by 128 byte */
1705 #define HCLGE_BUF_SIZE_UNIT_SHIFT	7
1706 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK	BIT(15)
1707 	struct hclge_tx_buff_alloc_cmd *req;
1708 	struct hclge_desc desc;
1709 	int ret;
1710 	u8 i;
1711 
1712 	req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1713 
1714 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1715 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1716 		u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1717 
1718 		req->tx_pkt_buff[i] =
1719 			cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1720 				     HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1721 	}
1722 
1723 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1724 	if (ret)
1725 		dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1726 			ret);
1727 
1728 	return ret;
1729 }
1730 
1731 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1732 				 struct hclge_pkt_buf_alloc *buf_alloc)
1733 {
1734 	int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1735 
1736 	if (ret)
1737 		dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1738 
1739 	return ret;
1740 }
1741 
1742 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1743 {
1744 	unsigned int i;
1745 	u32 cnt = 0;
1746 
1747 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1748 		if (hdev->hw_tc_map & BIT(i))
1749 			cnt++;
1750 	return cnt;
1751 }
1752 
1753 /* Get the number of pfc enabled TCs, which have private buffer */
1754 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1755 				  struct hclge_pkt_buf_alloc *buf_alloc)
1756 {
1757 	struct hclge_priv_buf *priv;
1758 	unsigned int i;
1759 	int cnt = 0;
1760 
1761 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1762 		priv = &buf_alloc->priv_buf[i];
1763 		if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1764 		    priv->enable)
1765 			cnt++;
1766 	}
1767 
1768 	return cnt;
1769 }
1770 
1771 /* Get the number of pfc disabled TCs, which have private buffer */
1772 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1773 				     struct hclge_pkt_buf_alloc *buf_alloc)
1774 {
1775 	struct hclge_priv_buf *priv;
1776 	unsigned int i;
1777 	int cnt = 0;
1778 
1779 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1780 		priv = &buf_alloc->priv_buf[i];
1781 		if (hdev->hw_tc_map & BIT(i) &&
1782 		    !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1783 		    priv->enable)
1784 			cnt++;
1785 	}
1786 
1787 	return cnt;
1788 }
1789 
1790 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1791 {
1792 	struct hclge_priv_buf *priv;
1793 	u32 rx_priv = 0;
1794 	int i;
1795 
1796 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1797 		priv = &buf_alloc->priv_buf[i];
1798 		if (priv->enable)
1799 			rx_priv += priv->buf_size;
1800 	}
1801 	return rx_priv;
1802 }
1803 
1804 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1805 {
1806 	u32 i, total_tx_size = 0;
1807 
1808 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1809 		total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1810 
1811 	return total_tx_size;
1812 }
1813 
1814 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1815 				struct hclge_pkt_buf_alloc *buf_alloc,
1816 				u32 rx_all)
1817 {
1818 	u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1819 	u32 tc_num = hclge_get_tc_num(hdev);
1820 	u32 shared_buf, aligned_mps;
1821 	u32 rx_priv;
1822 	int i;
1823 
1824 	aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1825 
1826 	if (hnae3_dev_dcb_supported(hdev))
1827 		shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1828 					hdev->dv_buf_size;
1829 	else
1830 		shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1831 					+ hdev->dv_buf_size;
1832 
1833 	shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1834 	shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1835 			     HCLGE_BUF_SIZE_UNIT);
1836 
1837 	rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1838 	if (rx_all < rx_priv + shared_std)
1839 		return false;
1840 
1841 	shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1842 	buf_alloc->s_buf.buf_size = shared_buf;
1843 	if (hnae3_dev_dcb_supported(hdev)) {
1844 		buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1845 		buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1846 			- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1847 				  HCLGE_BUF_SIZE_UNIT);
1848 	} else {
1849 		buf_alloc->s_buf.self.high = aligned_mps +
1850 						HCLGE_NON_DCB_ADDITIONAL_BUF;
1851 		buf_alloc->s_buf.self.low = aligned_mps;
1852 	}
1853 
1854 	if (hnae3_dev_dcb_supported(hdev)) {
1855 		hi_thrd = shared_buf - hdev->dv_buf_size;
1856 
1857 		if (tc_num <= NEED_RESERVE_TC_NUM)
1858 			hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1859 					/ BUF_MAX_PERCENT;
1860 
1861 		if (tc_num)
1862 			hi_thrd = hi_thrd / tc_num;
1863 
1864 		hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1865 		hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1866 		lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1867 	} else {
1868 		hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1869 		lo_thrd = aligned_mps;
1870 	}
1871 
1872 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1873 		buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1874 		buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1875 	}
1876 
1877 	return true;
1878 }
1879 
1880 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1881 				struct hclge_pkt_buf_alloc *buf_alloc)
1882 {
1883 	u32 i, total_size;
1884 
1885 	total_size = hdev->pkt_buf_size;
1886 
1887 	/* alloc tx buffer for all enabled tc */
1888 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1889 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1890 
1891 		if (hdev->hw_tc_map & BIT(i)) {
1892 			if (total_size < hdev->tx_buf_size)
1893 				return -ENOMEM;
1894 
1895 			priv->tx_buf_size = hdev->tx_buf_size;
1896 		} else {
1897 			priv->tx_buf_size = 0;
1898 		}
1899 
1900 		total_size -= priv->tx_buf_size;
1901 	}
1902 
1903 	return 0;
1904 }
1905 
1906 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1907 				  struct hclge_pkt_buf_alloc *buf_alloc)
1908 {
1909 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1910 	u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1911 	unsigned int i;
1912 
1913 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1914 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1915 
1916 		priv->enable = 0;
1917 		priv->wl.low = 0;
1918 		priv->wl.high = 0;
1919 		priv->buf_size = 0;
1920 
1921 		if (!(hdev->hw_tc_map & BIT(i)))
1922 			continue;
1923 
1924 		priv->enable = 1;
1925 
1926 		if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1927 			priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1928 			priv->wl.high = roundup(priv->wl.low + aligned_mps,
1929 						HCLGE_BUF_SIZE_UNIT);
1930 		} else {
1931 			priv->wl.low = 0;
1932 			priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1933 					aligned_mps;
1934 		}
1935 
1936 		priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1937 	}
1938 
1939 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1940 }
1941 
1942 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1943 					  struct hclge_pkt_buf_alloc *buf_alloc)
1944 {
1945 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1946 	int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1947 	int i;
1948 
1949 	/* let the last to be cleared first */
1950 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1951 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1952 		unsigned int mask = BIT((unsigned int)i);
1953 
1954 		if (hdev->hw_tc_map & mask &&
1955 		    !(hdev->tm_info.hw_pfc_map & mask)) {
1956 			/* Clear the no pfc TC private buffer */
1957 			priv->wl.low = 0;
1958 			priv->wl.high = 0;
1959 			priv->buf_size = 0;
1960 			priv->enable = 0;
1961 			no_pfc_priv_num--;
1962 		}
1963 
1964 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1965 		    no_pfc_priv_num == 0)
1966 			break;
1967 	}
1968 
1969 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1970 }
1971 
1972 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1973 					struct hclge_pkt_buf_alloc *buf_alloc)
1974 {
1975 	u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1976 	int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1977 	int i;
1978 
1979 	/* let the last to be cleared first */
1980 	for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1981 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1982 		unsigned int mask = BIT((unsigned int)i);
1983 
1984 		if (hdev->hw_tc_map & mask &&
1985 		    hdev->tm_info.hw_pfc_map & mask) {
1986 			/* Reduce the number of pfc TC with private buffer */
1987 			priv->wl.low = 0;
1988 			priv->enable = 0;
1989 			priv->wl.high = 0;
1990 			priv->buf_size = 0;
1991 			pfc_priv_num--;
1992 		}
1993 
1994 		if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1995 		    pfc_priv_num == 0)
1996 			break;
1997 	}
1998 
1999 	return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2000 }
2001 
2002 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2003 				      struct hclge_pkt_buf_alloc *buf_alloc)
2004 {
2005 #define COMPENSATE_BUFFER	0x3C00
2006 #define COMPENSATE_HALF_MPS_NUM	5
2007 #define PRIV_WL_GAP		0x1800
2008 
2009 	u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2010 	u32 tc_num = hclge_get_tc_num(hdev);
2011 	u32 half_mps = hdev->mps >> 1;
2012 	u32 min_rx_priv;
2013 	unsigned int i;
2014 
2015 	if (tc_num)
2016 		rx_priv = rx_priv / tc_num;
2017 
2018 	if (tc_num <= NEED_RESERVE_TC_NUM)
2019 		rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2020 
2021 	min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2022 			COMPENSATE_HALF_MPS_NUM * half_mps;
2023 	min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2024 	rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2025 
2026 	if (rx_priv < min_rx_priv)
2027 		return false;
2028 
2029 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2030 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2031 
2032 		priv->enable = 0;
2033 		priv->wl.low = 0;
2034 		priv->wl.high = 0;
2035 		priv->buf_size = 0;
2036 
2037 		if (!(hdev->hw_tc_map & BIT(i)))
2038 			continue;
2039 
2040 		priv->enable = 1;
2041 		priv->buf_size = rx_priv;
2042 		priv->wl.high = rx_priv - hdev->dv_buf_size;
2043 		priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2044 	}
2045 
2046 	buf_alloc->s_buf.buf_size = 0;
2047 
2048 	return true;
2049 }
2050 
2051 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2052  * @hdev: pointer to struct hclge_dev
2053  * @buf_alloc: pointer to buffer calculation data
2054  * @return: 0: calculate sucessful, negative: fail
2055  */
2056 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2057 				struct hclge_pkt_buf_alloc *buf_alloc)
2058 {
2059 	/* When DCB is not supported, rx private buffer is not allocated. */
2060 	if (!hnae3_dev_dcb_supported(hdev)) {
2061 		u32 rx_all = hdev->pkt_buf_size;
2062 
2063 		rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2064 		if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2065 			return -ENOMEM;
2066 
2067 		return 0;
2068 	}
2069 
2070 	if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2071 		return 0;
2072 
2073 	if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2074 		return 0;
2075 
2076 	/* try to decrease the buffer size */
2077 	if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2078 		return 0;
2079 
2080 	if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2081 		return 0;
2082 
2083 	if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2084 		return 0;
2085 
2086 	return -ENOMEM;
2087 }
2088 
2089 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2090 				   struct hclge_pkt_buf_alloc *buf_alloc)
2091 {
2092 	struct hclge_rx_priv_buff_cmd *req;
2093 	struct hclge_desc desc;
2094 	int ret;
2095 	int i;
2096 
2097 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2098 	req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2099 
2100 	/* Alloc private buffer TCs */
2101 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2102 		struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2103 
2104 		req->buf_num[i] =
2105 			cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2106 		req->buf_num[i] |=
2107 			cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2108 	}
2109 
2110 	req->shared_buf =
2111 		cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2112 			    (1 << HCLGE_TC0_PRI_BUF_EN_B));
2113 
2114 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2115 	if (ret)
2116 		dev_err(&hdev->pdev->dev,
2117 			"rx private buffer alloc cmd failed %d\n", ret);
2118 
2119 	return ret;
2120 }
2121 
2122 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2123 				   struct hclge_pkt_buf_alloc *buf_alloc)
2124 {
2125 	struct hclge_rx_priv_wl_buf *req;
2126 	struct hclge_priv_buf *priv;
2127 	struct hclge_desc desc[2];
2128 	int i, j;
2129 	int ret;
2130 
2131 	for (i = 0; i < 2; i++) {
2132 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2133 					   false);
2134 		req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2135 
2136 		/* The first descriptor set the NEXT bit to 1 */
2137 		if (i == 0)
2138 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2139 		else
2140 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2141 
2142 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2143 			u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2144 
2145 			priv = &buf_alloc->priv_buf[idx];
2146 			req->tc_wl[j].high =
2147 				cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2148 			req->tc_wl[j].high |=
2149 				cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2150 			req->tc_wl[j].low =
2151 				cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2152 			req->tc_wl[j].low |=
2153 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2154 		}
2155 	}
2156 
2157 	/* Send 2 descriptor at one time */
2158 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2159 	if (ret)
2160 		dev_err(&hdev->pdev->dev,
2161 			"rx private waterline config cmd failed %d\n",
2162 			ret);
2163 	return ret;
2164 }
2165 
2166 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2167 				    struct hclge_pkt_buf_alloc *buf_alloc)
2168 {
2169 	struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2170 	struct hclge_rx_com_thrd *req;
2171 	struct hclge_desc desc[2];
2172 	struct hclge_tc_thrd *tc;
2173 	int i, j;
2174 	int ret;
2175 
2176 	for (i = 0; i < 2; i++) {
2177 		hclge_cmd_setup_basic_desc(&desc[i],
2178 					   HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2179 		req = (struct hclge_rx_com_thrd *)&desc[i].data;
2180 
2181 		/* The first descriptor set the NEXT bit to 1 */
2182 		if (i == 0)
2183 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2184 		else
2185 			desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2186 
2187 		for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2188 			tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2189 
2190 			req->com_thrd[j].high =
2191 				cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2192 			req->com_thrd[j].high |=
2193 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2194 			req->com_thrd[j].low =
2195 				cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2196 			req->com_thrd[j].low |=
2197 				 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2198 		}
2199 	}
2200 
2201 	/* Send 2 descriptors at one time */
2202 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
2203 	if (ret)
2204 		dev_err(&hdev->pdev->dev,
2205 			"common threshold config cmd failed %d\n", ret);
2206 	return ret;
2207 }
2208 
2209 static int hclge_common_wl_config(struct hclge_dev *hdev,
2210 				  struct hclge_pkt_buf_alloc *buf_alloc)
2211 {
2212 	struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2213 	struct hclge_rx_com_wl *req;
2214 	struct hclge_desc desc;
2215 	int ret;
2216 
2217 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2218 
2219 	req = (struct hclge_rx_com_wl *)desc.data;
2220 	req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2221 	req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2222 
2223 	req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2224 	req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2225 
2226 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2227 	if (ret)
2228 		dev_err(&hdev->pdev->dev,
2229 			"common waterline config cmd failed %d\n", ret);
2230 
2231 	return ret;
2232 }
2233 
2234 int hclge_buffer_alloc(struct hclge_dev *hdev)
2235 {
2236 	struct hclge_pkt_buf_alloc *pkt_buf;
2237 	int ret;
2238 
2239 	pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2240 	if (!pkt_buf)
2241 		return -ENOMEM;
2242 
2243 	ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2244 	if (ret) {
2245 		dev_err(&hdev->pdev->dev,
2246 			"could not calc tx buffer size for all TCs %d\n", ret);
2247 		goto out;
2248 	}
2249 
2250 	ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2251 	if (ret) {
2252 		dev_err(&hdev->pdev->dev,
2253 			"could not alloc tx buffers %d\n", ret);
2254 		goto out;
2255 	}
2256 
2257 	ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2258 	if (ret) {
2259 		dev_err(&hdev->pdev->dev,
2260 			"could not calc rx priv buffer size for all TCs %d\n",
2261 			ret);
2262 		goto out;
2263 	}
2264 
2265 	ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2266 	if (ret) {
2267 		dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2268 			ret);
2269 		goto out;
2270 	}
2271 
2272 	if (hnae3_dev_dcb_supported(hdev)) {
2273 		ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2274 		if (ret) {
2275 			dev_err(&hdev->pdev->dev,
2276 				"could not configure rx private waterline %d\n",
2277 				ret);
2278 			goto out;
2279 		}
2280 
2281 		ret = hclge_common_thrd_config(hdev, pkt_buf);
2282 		if (ret) {
2283 			dev_err(&hdev->pdev->dev,
2284 				"could not configure common threshold %d\n",
2285 				ret);
2286 			goto out;
2287 		}
2288 	}
2289 
2290 	ret = hclge_common_wl_config(hdev, pkt_buf);
2291 	if (ret)
2292 		dev_err(&hdev->pdev->dev,
2293 			"could not configure common waterline %d\n", ret);
2294 
2295 out:
2296 	kfree(pkt_buf);
2297 	return ret;
2298 }
2299 
2300 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2301 {
2302 	struct hnae3_handle *roce = &vport->roce;
2303 	struct hnae3_handle *nic = &vport->nic;
2304 
2305 	roce->rinfo.num_vectors = vport->back->num_roce_msi;
2306 
2307 	if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2308 	    vport->back->num_msi_left == 0)
2309 		return -EINVAL;
2310 
2311 	roce->rinfo.base_vector = vport->back->roce_base_vector;
2312 
2313 	roce->rinfo.netdev = nic->kinfo.netdev;
2314 	roce->rinfo.roce_io_base = vport->back->hw.io_base;
2315 
2316 	roce->pdev = nic->pdev;
2317 	roce->ae_algo = nic->ae_algo;
2318 	roce->numa_node_mask = nic->numa_node_mask;
2319 
2320 	return 0;
2321 }
2322 
2323 static int hclge_init_msi(struct hclge_dev *hdev)
2324 {
2325 	struct pci_dev *pdev = hdev->pdev;
2326 	int vectors;
2327 	int i;
2328 
2329 	vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2330 					hdev->num_msi,
2331 					PCI_IRQ_MSI | PCI_IRQ_MSIX);
2332 	if (vectors < 0) {
2333 		dev_err(&pdev->dev,
2334 			"failed(%d) to allocate MSI/MSI-X vectors\n",
2335 			vectors);
2336 		return vectors;
2337 	}
2338 	if (vectors < hdev->num_msi)
2339 		dev_warn(&hdev->pdev->dev,
2340 			 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2341 			 hdev->num_msi, vectors);
2342 
2343 	hdev->num_msi = vectors;
2344 	hdev->num_msi_left = vectors;
2345 
2346 	hdev->base_msi_vector = pdev->irq;
2347 	hdev->roce_base_vector = hdev->base_msi_vector +
2348 				hdev->roce_base_msix_offset;
2349 
2350 	hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2351 					   sizeof(u16), GFP_KERNEL);
2352 	if (!hdev->vector_status) {
2353 		pci_free_irq_vectors(pdev);
2354 		return -ENOMEM;
2355 	}
2356 
2357 	for (i = 0; i < hdev->num_msi; i++)
2358 		hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2359 
2360 	hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2361 					sizeof(int), GFP_KERNEL);
2362 	if (!hdev->vector_irq) {
2363 		pci_free_irq_vectors(pdev);
2364 		return -ENOMEM;
2365 	}
2366 
2367 	return 0;
2368 }
2369 
2370 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2371 {
2372 	if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2373 		duplex = HCLGE_MAC_FULL;
2374 
2375 	return duplex;
2376 }
2377 
2378 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2379 				      u8 duplex)
2380 {
2381 	struct hclge_config_mac_speed_dup_cmd *req;
2382 	struct hclge_desc desc;
2383 	int ret;
2384 
2385 	req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2386 
2387 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2388 
2389 	if (duplex)
2390 		hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2391 
2392 	switch (speed) {
2393 	case HCLGE_MAC_SPEED_10M:
2394 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2395 				HCLGE_CFG_SPEED_S, 6);
2396 		break;
2397 	case HCLGE_MAC_SPEED_100M:
2398 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2399 				HCLGE_CFG_SPEED_S, 7);
2400 		break;
2401 	case HCLGE_MAC_SPEED_1G:
2402 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2403 				HCLGE_CFG_SPEED_S, 0);
2404 		break;
2405 	case HCLGE_MAC_SPEED_10G:
2406 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2407 				HCLGE_CFG_SPEED_S, 1);
2408 		break;
2409 	case HCLGE_MAC_SPEED_25G:
2410 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2411 				HCLGE_CFG_SPEED_S, 2);
2412 		break;
2413 	case HCLGE_MAC_SPEED_40G:
2414 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2415 				HCLGE_CFG_SPEED_S, 3);
2416 		break;
2417 	case HCLGE_MAC_SPEED_50G:
2418 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2419 				HCLGE_CFG_SPEED_S, 4);
2420 		break;
2421 	case HCLGE_MAC_SPEED_100G:
2422 		hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2423 				HCLGE_CFG_SPEED_S, 5);
2424 		break;
2425 	default:
2426 		dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2427 		return -EINVAL;
2428 	}
2429 
2430 	hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2431 		      1);
2432 
2433 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2434 	if (ret) {
2435 		dev_err(&hdev->pdev->dev,
2436 			"mac speed/duplex config cmd failed %d.\n", ret);
2437 		return ret;
2438 	}
2439 
2440 	return 0;
2441 }
2442 
2443 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2444 {
2445 	struct hclge_mac *mac = &hdev->hw.mac;
2446 	int ret;
2447 
2448 	duplex = hclge_check_speed_dup(duplex, speed);
2449 	if (!mac->support_autoneg && mac->speed == speed &&
2450 	    mac->duplex == duplex)
2451 		return 0;
2452 
2453 	ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2454 	if (ret)
2455 		return ret;
2456 
2457 	hdev->hw.mac.speed = speed;
2458 	hdev->hw.mac.duplex = duplex;
2459 
2460 	return 0;
2461 }
2462 
2463 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2464 				     u8 duplex)
2465 {
2466 	struct hclge_vport *vport = hclge_get_vport(handle);
2467 	struct hclge_dev *hdev = vport->back;
2468 
2469 	return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2470 }
2471 
2472 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2473 {
2474 	struct hclge_config_auto_neg_cmd *req;
2475 	struct hclge_desc desc;
2476 	u32 flag = 0;
2477 	int ret;
2478 
2479 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2480 
2481 	req = (struct hclge_config_auto_neg_cmd *)desc.data;
2482 	if (enable)
2483 		hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2484 	req->cfg_an_cmd_flag = cpu_to_le32(flag);
2485 
2486 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2487 	if (ret)
2488 		dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2489 			ret);
2490 
2491 	return ret;
2492 }
2493 
2494 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2495 {
2496 	struct hclge_vport *vport = hclge_get_vport(handle);
2497 	struct hclge_dev *hdev = vport->back;
2498 
2499 	if (!hdev->hw.mac.support_autoneg) {
2500 		if (enable) {
2501 			dev_err(&hdev->pdev->dev,
2502 				"autoneg is not supported by current port\n");
2503 			return -EOPNOTSUPP;
2504 		} else {
2505 			return 0;
2506 		}
2507 	}
2508 
2509 	return hclge_set_autoneg_en(hdev, enable);
2510 }
2511 
2512 static int hclge_get_autoneg(struct hnae3_handle *handle)
2513 {
2514 	struct hclge_vport *vport = hclge_get_vport(handle);
2515 	struct hclge_dev *hdev = vport->back;
2516 	struct phy_device *phydev = hdev->hw.mac.phydev;
2517 
2518 	if (phydev)
2519 		return phydev->autoneg;
2520 
2521 	return hdev->hw.mac.autoneg;
2522 }
2523 
2524 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2525 {
2526 	struct hclge_vport *vport = hclge_get_vport(handle);
2527 	struct hclge_dev *hdev = vport->back;
2528 	int ret;
2529 
2530 	dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2531 
2532 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2533 	if (ret)
2534 		return ret;
2535 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2536 }
2537 
2538 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2539 {
2540 	struct hclge_vport *vport = hclge_get_vport(handle);
2541 	struct hclge_dev *hdev = vport->back;
2542 
2543 	if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2544 		return hclge_set_autoneg_en(hdev, !halt);
2545 
2546 	return 0;
2547 }
2548 
2549 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2550 {
2551 	struct hclge_config_fec_cmd *req;
2552 	struct hclge_desc desc;
2553 	int ret;
2554 
2555 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2556 
2557 	req = (struct hclge_config_fec_cmd *)desc.data;
2558 	if (fec_mode & BIT(HNAE3_FEC_AUTO))
2559 		hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2560 	if (fec_mode & BIT(HNAE3_FEC_RS))
2561 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2562 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2563 	if (fec_mode & BIT(HNAE3_FEC_BASER))
2564 		hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2565 				HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2566 
2567 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2568 	if (ret)
2569 		dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2570 
2571 	return ret;
2572 }
2573 
2574 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2575 {
2576 	struct hclge_vport *vport = hclge_get_vport(handle);
2577 	struct hclge_dev *hdev = vport->back;
2578 	struct hclge_mac *mac = &hdev->hw.mac;
2579 	int ret;
2580 
2581 	if (fec_mode && !(mac->fec_ability & fec_mode)) {
2582 		dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2583 		return -EINVAL;
2584 	}
2585 
2586 	ret = hclge_set_fec_hw(hdev, fec_mode);
2587 	if (ret)
2588 		return ret;
2589 
2590 	mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2591 	return 0;
2592 }
2593 
2594 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2595 			  u8 *fec_mode)
2596 {
2597 	struct hclge_vport *vport = hclge_get_vport(handle);
2598 	struct hclge_dev *hdev = vport->back;
2599 	struct hclge_mac *mac = &hdev->hw.mac;
2600 
2601 	if (fec_ability)
2602 		*fec_ability = mac->fec_ability;
2603 	if (fec_mode)
2604 		*fec_mode = mac->fec_mode;
2605 }
2606 
2607 static int hclge_mac_init(struct hclge_dev *hdev)
2608 {
2609 	struct hclge_mac *mac = &hdev->hw.mac;
2610 	int ret;
2611 
2612 	hdev->support_sfp_query = true;
2613 	hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2614 	ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2615 					 hdev->hw.mac.duplex);
2616 	if (ret)
2617 		return ret;
2618 
2619 	if (hdev->hw.mac.support_autoneg) {
2620 		ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2621 		if (ret)
2622 			return ret;
2623 	}
2624 
2625 	mac->link = 0;
2626 
2627 	if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2628 		ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2629 		if (ret)
2630 			return ret;
2631 	}
2632 
2633 	ret = hclge_set_mac_mtu(hdev, hdev->mps);
2634 	if (ret) {
2635 		dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2636 		return ret;
2637 	}
2638 
2639 	ret = hclge_set_default_loopback(hdev);
2640 	if (ret)
2641 		return ret;
2642 
2643 	ret = hclge_buffer_alloc(hdev);
2644 	if (ret)
2645 		dev_err(&hdev->pdev->dev,
2646 			"allocate buffer fail, ret=%d\n", ret);
2647 
2648 	return ret;
2649 }
2650 
2651 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2652 {
2653 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2654 	    !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2655 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2656 				    hclge_wq, &hdev->service_task, 0);
2657 }
2658 
2659 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2660 {
2661 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2662 	    !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2663 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2664 				    hclge_wq, &hdev->service_task, 0);
2665 }
2666 
2667 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2668 {
2669 	if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2670 	    !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2671 		mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2672 				    hclge_wq, &hdev->service_task,
2673 				    delay_time);
2674 }
2675 
2676 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2677 {
2678 	struct hclge_link_status_cmd *req;
2679 	struct hclge_desc desc;
2680 	int link_status;
2681 	int ret;
2682 
2683 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2684 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2685 	if (ret) {
2686 		dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2687 			ret);
2688 		return ret;
2689 	}
2690 
2691 	req = (struct hclge_link_status_cmd *)desc.data;
2692 	link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2693 
2694 	return !!link_status;
2695 }
2696 
2697 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2698 {
2699 	unsigned int mac_state;
2700 	int link_stat;
2701 
2702 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2703 		return 0;
2704 
2705 	mac_state = hclge_get_mac_link_status(hdev);
2706 
2707 	if (hdev->hw.mac.phydev) {
2708 		if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2709 			link_stat = mac_state &
2710 				hdev->hw.mac.phydev->link;
2711 		else
2712 			link_stat = 0;
2713 
2714 	} else {
2715 		link_stat = mac_state;
2716 	}
2717 
2718 	return !!link_stat;
2719 }
2720 
2721 static void hclge_update_link_status(struct hclge_dev *hdev)
2722 {
2723 	struct hnae3_client *rclient = hdev->roce_client;
2724 	struct hnae3_client *client = hdev->nic_client;
2725 	struct hnae3_handle *rhandle;
2726 	struct hnae3_handle *handle;
2727 	int state;
2728 	int i;
2729 
2730 	if (!client)
2731 		return;
2732 
2733 	if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2734 		return;
2735 
2736 	state = hclge_get_mac_phy_link(hdev);
2737 	if (state != hdev->hw.mac.link) {
2738 		for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2739 			handle = &hdev->vport[i].nic;
2740 			client->ops->link_status_change(handle, state);
2741 			hclge_config_mac_tnl_int(hdev, state);
2742 			rhandle = &hdev->vport[i].roce;
2743 			if (rclient && rclient->ops->link_status_change)
2744 				rclient->ops->link_status_change(rhandle,
2745 								 state);
2746 		}
2747 		hdev->hw.mac.link = state;
2748 	}
2749 
2750 	clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2751 }
2752 
2753 static void hclge_update_port_capability(struct hclge_mac *mac)
2754 {
2755 	/* update fec ability by speed */
2756 	hclge_convert_setting_fec(mac);
2757 
2758 	/* firmware can not identify back plane type, the media type
2759 	 * read from configuration can help deal it
2760 	 */
2761 	if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2762 	    mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2763 		mac->module_type = HNAE3_MODULE_TYPE_KR;
2764 	else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2765 		mac->module_type = HNAE3_MODULE_TYPE_TP;
2766 
2767 	if (mac->support_autoneg) {
2768 		linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2769 		linkmode_copy(mac->advertising, mac->supported);
2770 	} else {
2771 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2772 				   mac->supported);
2773 		linkmode_zero(mac->advertising);
2774 	}
2775 }
2776 
2777 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2778 {
2779 	struct hclge_sfp_info_cmd *resp;
2780 	struct hclge_desc desc;
2781 	int ret;
2782 
2783 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2784 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2785 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2786 	if (ret == -EOPNOTSUPP) {
2787 		dev_warn(&hdev->pdev->dev,
2788 			 "IMP do not support get SFP speed %d\n", ret);
2789 		return ret;
2790 	} else if (ret) {
2791 		dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2792 		return ret;
2793 	}
2794 
2795 	*speed = le32_to_cpu(resp->speed);
2796 
2797 	return 0;
2798 }
2799 
2800 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2801 {
2802 	struct hclge_sfp_info_cmd *resp;
2803 	struct hclge_desc desc;
2804 	int ret;
2805 
2806 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2807 	resp = (struct hclge_sfp_info_cmd *)desc.data;
2808 
2809 	resp->query_type = QUERY_ACTIVE_SPEED;
2810 
2811 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2812 	if (ret == -EOPNOTSUPP) {
2813 		dev_warn(&hdev->pdev->dev,
2814 			 "IMP does not support get SFP info %d\n", ret);
2815 		return ret;
2816 	} else if (ret) {
2817 		dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2818 		return ret;
2819 	}
2820 
2821 	/* In some case, mac speed get from IMP may be 0, it shouldn't be
2822 	 * set to mac->speed.
2823 	 */
2824 	if (!le32_to_cpu(resp->speed))
2825 		return 0;
2826 
2827 	mac->speed = le32_to_cpu(resp->speed);
2828 	/* if resp->speed_ability is 0, it means it's an old version
2829 	 * firmware, do not update these params
2830 	 */
2831 	if (resp->speed_ability) {
2832 		mac->module_type = le32_to_cpu(resp->module_type);
2833 		mac->speed_ability = le32_to_cpu(resp->speed_ability);
2834 		mac->autoneg = resp->autoneg;
2835 		mac->support_autoneg = resp->autoneg_ability;
2836 		mac->speed_type = QUERY_ACTIVE_SPEED;
2837 		if (!resp->active_fec)
2838 			mac->fec_mode = 0;
2839 		else
2840 			mac->fec_mode = BIT(resp->active_fec);
2841 	} else {
2842 		mac->speed_type = QUERY_SFP_SPEED;
2843 	}
2844 
2845 	return 0;
2846 }
2847 
2848 static int hclge_update_port_info(struct hclge_dev *hdev)
2849 {
2850 	struct hclge_mac *mac = &hdev->hw.mac;
2851 	int speed = HCLGE_MAC_SPEED_UNKNOWN;
2852 	int ret;
2853 
2854 	/* get the port info from SFP cmd if not copper port */
2855 	if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2856 		return 0;
2857 
2858 	/* if IMP does not support get SFP/qSFP info, return directly */
2859 	if (!hdev->support_sfp_query)
2860 		return 0;
2861 
2862 	if (hdev->pdev->revision >= 0x21)
2863 		ret = hclge_get_sfp_info(hdev, mac);
2864 	else
2865 		ret = hclge_get_sfp_speed(hdev, &speed);
2866 
2867 	if (ret == -EOPNOTSUPP) {
2868 		hdev->support_sfp_query = false;
2869 		return ret;
2870 	} else if (ret) {
2871 		return ret;
2872 	}
2873 
2874 	if (hdev->pdev->revision >= 0x21) {
2875 		if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2876 			hclge_update_port_capability(mac);
2877 			return 0;
2878 		}
2879 		return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2880 					       HCLGE_MAC_FULL);
2881 	} else {
2882 		if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2883 			return 0; /* do nothing if no SFP */
2884 
2885 		/* must config full duplex for SFP */
2886 		return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2887 	}
2888 }
2889 
2890 static int hclge_get_status(struct hnae3_handle *handle)
2891 {
2892 	struct hclge_vport *vport = hclge_get_vport(handle);
2893 	struct hclge_dev *hdev = vport->back;
2894 
2895 	hclge_update_link_status(hdev);
2896 
2897 	return hdev->hw.mac.link;
2898 }
2899 
2900 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2901 {
2902 	if (!pci_num_vf(hdev->pdev)) {
2903 		dev_err(&hdev->pdev->dev,
2904 			"SRIOV is disabled, can not get vport(%d) info.\n", vf);
2905 		return NULL;
2906 	}
2907 
2908 	if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2909 		dev_err(&hdev->pdev->dev,
2910 			"vf id(%d) is out of range(0 <= vfid < %d)\n",
2911 			vf, pci_num_vf(hdev->pdev));
2912 		return NULL;
2913 	}
2914 
2915 	/* VF start from 1 in vport */
2916 	vf += HCLGE_VF_VPORT_START_NUM;
2917 	return &hdev->vport[vf];
2918 }
2919 
2920 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2921 			       struct ifla_vf_info *ivf)
2922 {
2923 	struct hclge_vport *vport = hclge_get_vport(handle);
2924 	struct hclge_dev *hdev = vport->back;
2925 
2926 	vport = hclge_get_vf_vport(hdev, vf);
2927 	if (!vport)
2928 		return -EINVAL;
2929 
2930 	ivf->vf = vf;
2931 	ivf->linkstate = vport->vf_info.link_state;
2932 	ivf->spoofchk = vport->vf_info.spoofchk;
2933 	ivf->trusted = vport->vf_info.trusted;
2934 	ivf->min_tx_rate = 0;
2935 	ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2936 	ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
2937 	ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
2938 	ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
2939 	ether_addr_copy(ivf->mac, vport->vf_info.mac);
2940 
2941 	return 0;
2942 }
2943 
2944 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2945 				   int link_state)
2946 {
2947 	struct hclge_vport *vport = hclge_get_vport(handle);
2948 	struct hclge_dev *hdev = vport->back;
2949 
2950 	vport = hclge_get_vf_vport(hdev, vf);
2951 	if (!vport)
2952 		return -EINVAL;
2953 
2954 	vport->vf_info.link_state = link_state;
2955 
2956 	return 0;
2957 }
2958 
2959 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2960 {
2961 	u32 cmdq_src_reg, msix_src_reg;
2962 
2963 	/* fetch the events from their corresponding regs */
2964 	cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2965 	msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2966 
2967 	/* Assumption: If by any chance reset and mailbox events are reported
2968 	 * together then we will only process reset event in this go and will
2969 	 * defer the processing of the mailbox events. Since, we would have not
2970 	 * cleared RX CMDQ event this time we would receive again another
2971 	 * interrupt from H/W just for the mailbox.
2972 	 *
2973 	 * check for vector0 reset event sources
2974 	 */
2975 	if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
2976 		dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2977 		set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2978 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2979 		*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2980 		hdev->rst_stats.imp_rst_cnt++;
2981 		return HCLGE_VECTOR0_EVENT_RST;
2982 	}
2983 
2984 	if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
2985 		dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2986 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2987 		set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2988 		*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2989 		hdev->rst_stats.global_rst_cnt++;
2990 		return HCLGE_VECTOR0_EVENT_RST;
2991 	}
2992 
2993 	/* check for vector0 msix event source */
2994 	if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2995 		*clearval = msix_src_reg;
2996 		return HCLGE_VECTOR0_EVENT_ERR;
2997 	}
2998 
2999 	/* check for vector0 mailbox(=CMDQ RX) event source */
3000 	if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3001 		cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3002 		*clearval = cmdq_src_reg;
3003 		return HCLGE_VECTOR0_EVENT_MBX;
3004 	}
3005 
3006 	/* print other vector0 event source */
3007 	dev_info(&hdev->pdev->dev,
3008 		 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3009 		 cmdq_src_reg, msix_src_reg);
3010 	*clearval = msix_src_reg;
3011 
3012 	return HCLGE_VECTOR0_EVENT_OTHER;
3013 }
3014 
3015 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3016 				    u32 regclr)
3017 {
3018 	switch (event_type) {
3019 	case HCLGE_VECTOR0_EVENT_RST:
3020 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3021 		break;
3022 	case HCLGE_VECTOR0_EVENT_MBX:
3023 		hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3024 		break;
3025 	default:
3026 		break;
3027 	}
3028 }
3029 
3030 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3031 {
3032 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3033 				BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3034 				BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3035 				BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3036 	hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3037 }
3038 
3039 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3040 {
3041 	writel(enable ? 1 : 0, vector->addr);
3042 }
3043 
3044 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3045 {
3046 	struct hclge_dev *hdev = data;
3047 	u32 clearval = 0;
3048 	u32 event_cause;
3049 
3050 	hclge_enable_vector(&hdev->misc_vector, false);
3051 	event_cause = hclge_check_event_cause(hdev, &clearval);
3052 
3053 	/* vector 0 interrupt is shared with reset and mailbox source events.*/
3054 	switch (event_cause) {
3055 	case HCLGE_VECTOR0_EVENT_ERR:
3056 		/* we do not know what type of reset is required now. This could
3057 		 * only be decided after we fetch the type of errors which
3058 		 * caused this event. Therefore, we will do below for now:
3059 		 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3060 		 *    have defered type of reset to be used.
3061 		 * 2. Schedule the reset serivce task.
3062 		 * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3063 		 *    will fetch the correct type of reset.  This would be done
3064 		 *    by first decoding the types of errors.
3065 		 */
3066 		set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3067 		/* fall through */
3068 	case HCLGE_VECTOR0_EVENT_RST:
3069 		hclge_reset_task_schedule(hdev);
3070 		break;
3071 	case HCLGE_VECTOR0_EVENT_MBX:
3072 		/* If we are here then,
3073 		 * 1. Either we are not handling any mbx task and we are not
3074 		 *    scheduled as well
3075 		 *                        OR
3076 		 * 2. We could be handling a mbx task but nothing more is
3077 		 *    scheduled.
3078 		 * In both cases, we should schedule mbx task as there are more
3079 		 * mbx messages reported by this interrupt.
3080 		 */
3081 		hclge_mbx_task_schedule(hdev);
3082 		break;
3083 	default:
3084 		dev_warn(&hdev->pdev->dev,
3085 			 "received unknown or unhandled event of vector0\n");
3086 		break;
3087 	}
3088 
3089 	hclge_clear_event_cause(hdev, event_cause, clearval);
3090 
3091 	/* Enable interrupt if it is not cause by reset. And when
3092 	 * clearval equal to 0, it means interrupt status may be
3093 	 * cleared by hardware before driver reads status register.
3094 	 * For this case, vector0 interrupt also should be enabled.
3095 	 */
3096 	if (!clearval ||
3097 	    event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3098 		hclge_enable_vector(&hdev->misc_vector, true);
3099 	}
3100 
3101 	return IRQ_HANDLED;
3102 }
3103 
3104 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3105 {
3106 	if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3107 		dev_warn(&hdev->pdev->dev,
3108 			 "vector(vector_id %d) has been freed.\n", vector_id);
3109 		return;
3110 	}
3111 
3112 	hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3113 	hdev->num_msi_left += 1;
3114 	hdev->num_msi_used -= 1;
3115 }
3116 
3117 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3118 {
3119 	struct hclge_misc_vector *vector = &hdev->misc_vector;
3120 
3121 	vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3122 
3123 	vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3124 	hdev->vector_status[0] = 0;
3125 
3126 	hdev->num_msi_left -= 1;
3127 	hdev->num_msi_used += 1;
3128 }
3129 
3130 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3131 				      const cpumask_t *mask)
3132 {
3133 	struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3134 					      affinity_notify);
3135 
3136 	cpumask_copy(&hdev->affinity_mask, mask);
3137 }
3138 
3139 static void hclge_irq_affinity_release(struct kref *ref)
3140 {
3141 }
3142 
3143 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3144 {
3145 	irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3146 			      &hdev->affinity_mask);
3147 
3148 	hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3149 	hdev->affinity_notify.release = hclge_irq_affinity_release;
3150 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3151 				  &hdev->affinity_notify);
3152 }
3153 
3154 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3155 {
3156 	irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3157 	irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3158 }
3159 
3160 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3161 {
3162 	int ret;
3163 
3164 	hclge_get_misc_vector(hdev);
3165 
3166 	/* this would be explicitly freed in the end */
3167 	snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3168 		 HCLGE_NAME, pci_name(hdev->pdev));
3169 	ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3170 			  0, hdev->misc_vector.name, hdev);
3171 	if (ret) {
3172 		hclge_free_vector(hdev, 0);
3173 		dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3174 			hdev->misc_vector.vector_irq);
3175 	}
3176 
3177 	return ret;
3178 }
3179 
3180 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3181 {
3182 	free_irq(hdev->misc_vector.vector_irq, hdev);
3183 	hclge_free_vector(hdev, 0);
3184 }
3185 
3186 int hclge_notify_client(struct hclge_dev *hdev,
3187 			enum hnae3_reset_notify_type type)
3188 {
3189 	struct hnae3_client *client = hdev->nic_client;
3190 	u16 i;
3191 
3192 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3193 		return 0;
3194 
3195 	if (!client->ops->reset_notify)
3196 		return -EOPNOTSUPP;
3197 
3198 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3199 		struct hnae3_handle *handle = &hdev->vport[i].nic;
3200 		int ret;
3201 
3202 		ret = client->ops->reset_notify(handle, type);
3203 		if (ret) {
3204 			dev_err(&hdev->pdev->dev,
3205 				"notify nic client failed %d(%d)\n", type, ret);
3206 			return ret;
3207 		}
3208 	}
3209 
3210 	return 0;
3211 }
3212 
3213 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3214 				    enum hnae3_reset_notify_type type)
3215 {
3216 	struct hnae3_client *client = hdev->roce_client;
3217 	int ret = 0;
3218 	u16 i;
3219 
3220 	if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3221 		return 0;
3222 
3223 	if (!client->ops->reset_notify)
3224 		return -EOPNOTSUPP;
3225 
3226 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3227 		struct hnae3_handle *handle = &hdev->vport[i].roce;
3228 
3229 		ret = client->ops->reset_notify(handle, type);
3230 		if (ret) {
3231 			dev_err(&hdev->pdev->dev,
3232 				"notify roce client failed %d(%d)",
3233 				type, ret);
3234 			return ret;
3235 		}
3236 	}
3237 
3238 	return ret;
3239 }
3240 
3241 static int hclge_reset_wait(struct hclge_dev *hdev)
3242 {
3243 #define HCLGE_RESET_WATI_MS	100
3244 #define HCLGE_RESET_WAIT_CNT	350
3245 
3246 	u32 val, reg, reg_bit;
3247 	u32 cnt = 0;
3248 
3249 	switch (hdev->reset_type) {
3250 	case HNAE3_IMP_RESET:
3251 		reg = HCLGE_GLOBAL_RESET_REG;
3252 		reg_bit = HCLGE_IMP_RESET_BIT;
3253 		break;
3254 	case HNAE3_GLOBAL_RESET:
3255 		reg = HCLGE_GLOBAL_RESET_REG;
3256 		reg_bit = HCLGE_GLOBAL_RESET_BIT;
3257 		break;
3258 	case HNAE3_FUNC_RESET:
3259 		reg = HCLGE_FUN_RST_ING;
3260 		reg_bit = HCLGE_FUN_RST_ING_B;
3261 		break;
3262 	default:
3263 		dev_err(&hdev->pdev->dev,
3264 			"Wait for unsupported reset type: %d\n",
3265 			hdev->reset_type);
3266 		return -EINVAL;
3267 	}
3268 
3269 	val = hclge_read_dev(&hdev->hw, reg);
3270 	while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3271 		msleep(HCLGE_RESET_WATI_MS);
3272 		val = hclge_read_dev(&hdev->hw, reg);
3273 		cnt++;
3274 	}
3275 
3276 	if (cnt >= HCLGE_RESET_WAIT_CNT) {
3277 		dev_warn(&hdev->pdev->dev,
3278 			 "Wait for reset timeout: %d\n", hdev->reset_type);
3279 		return -EBUSY;
3280 	}
3281 
3282 	return 0;
3283 }
3284 
3285 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3286 {
3287 	struct hclge_vf_rst_cmd *req;
3288 	struct hclge_desc desc;
3289 
3290 	req = (struct hclge_vf_rst_cmd *)desc.data;
3291 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3292 	req->dest_vfid = func_id;
3293 
3294 	if (reset)
3295 		req->vf_rst = 0x1;
3296 
3297 	return hclge_cmd_send(&hdev->hw, &desc, 1);
3298 }
3299 
3300 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3301 {
3302 	int i;
3303 
3304 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3305 		struct hclge_vport *vport = &hdev->vport[i];
3306 		int ret;
3307 
3308 		/* Send cmd to set/clear VF's FUNC_RST_ING */
3309 		ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3310 		if (ret) {
3311 			dev_err(&hdev->pdev->dev,
3312 				"set vf(%u) rst failed %d!\n",
3313 				vport->vport_id, ret);
3314 			return ret;
3315 		}
3316 
3317 		if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3318 			continue;
3319 
3320 		/* Inform VF to process the reset.
3321 		 * hclge_inform_reset_assert_to_vf may fail if VF
3322 		 * driver is not loaded.
3323 		 */
3324 		ret = hclge_inform_reset_assert_to_vf(vport);
3325 		if (ret)
3326 			dev_warn(&hdev->pdev->dev,
3327 				 "inform reset to vf(%u) failed %d!\n",
3328 				 vport->vport_id, ret);
3329 	}
3330 
3331 	return 0;
3332 }
3333 
3334 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3335 {
3336 	if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3337 	    test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3338 	    test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3339 		return;
3340 
3341 	hclge_mbx_handler(hdev);
3342 
3343 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3344 }
3345 
3346 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3347 {
3348 	struct hclge_pf_rst_sync_cmd *req;
3349 	struct hclge_desc desc;
3350 	int cnt = 0;
3351 	int ret;
3352 
3353 	req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3354 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3355 
3356 	do {
3357 		/* vf need to down netdev by mbx during PF or FLR reset */
3358 		hclge_mailbox_service_task(hdev);
3359 
3360 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3361 		/* for compatible with old firmware, wait
3362 		 * 100 ms for VF to stop IO
3363 		 */
3364 		if (ret == -EOPNOTSUPP) {
3365 			msleep(HCLGE_RESET_SYNC_TIME);
3366 			return;
3367 		} else if (ret) {
3368 			dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3369 				 ret);
3370 			return;
3371 		} else if (req->all_vf_ready) {
3372 			return;
3373 		}
3374 		msleep(HCLGE_PF_RESET_SYNC_TIME);
3375 		hclge_cmd_reuse_desc(&desc, true);
3376 	} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3377 
3378 	dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3379 }
3380 
3381 void hclge_report_hw_error(struct hclge_dev *hdev,
3382 			   enum hnae3_hw_error_type type)
3383 {
3384 	struct hnae3_client *client = hdev->nic_client;
3385 	u16 i;
3386 
3387 	if (!client || !client->ops->process_hw_error ||
3388 	    !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3389 		return;
3390 
3391 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3392 		client->ops->process_hw_error(&hdev->vport[i].nic, type);
3393 }
3394 
3395 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3396 {
3397 	u32 reg_val;
3398 
3399 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3400 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3401 		hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3402 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3403 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3404 	}
3405 
3406 	if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3407 		hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3408 		reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3409 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3410 	}
3411 }
3412 
3413 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3414 {
3415 	struct hclge_desc desc;
3416 	struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3417 	int ret;
3418 
3419 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3420 	hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3421 	req->fun_reset_vfid = func_id;
3422 
3423 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3424 	if (ret)
3425 		dev_err(&hdev->pdev->dev,
3426 			"send function reset cmd fail, status =%d\n", ret);
3427 
3428 	return ret;
3429 }
3430 
3431 static void hclge_do_reset(struct hclge_dev *hdev)
3432 {
3433 	struct hnae3_handle *handle = &hdev->vport[0].nic;
3434 	struct pci_dev *pdev = hdev->pdev;
3435 	u32 val;
3436 
3437 	if (hclge_get_hw_reset_stat(handle)) {
3438 		dev_info(&pdev->dev, "hardware reset not finish\n");
3439 		dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3440 			 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3441 			 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3442 		return;
3443 	}
3444 
3445 	switch (hdev->reset_type) {
3446 	case HNAE3_GLOBAL_RESET:
3447 		dev_info(&pdev->dev, "global reset requested\n");
3448 		val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3449 		hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3450 		hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3451 		break;
3452 	case HNAE3_FUNC_RESET:
3453 		dev_info(&pdev->dev, "PF reset requested\n");
3454 		/* schedule again to check later */
3455 		set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3456 		hclge_reset_task_schedule(hdev);
3457 		break;
3458 	default:
3459 		dev_warn(&pdev->dev,
3460 			 "unsupported reset type: %d\n", hdev->reset_type);
3461 		break;
3462 	}
3463 }
3464 
3465 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3466 						   unsigned long *addr)
3467 {
3468 	enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3469 	struct hclge_dev *hdev = ae_dev->priv;
3470 
3471 	/* first, resolve any unknown reset type to the known type(s) */
3472 	if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3473 		u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
3474 					HCLGE_MISC_VECTOR_INT_STS);
3475 		/* we will intentionally ignore any errors from this function
3476 		 *  as we will end up in *some* reset request in any case
3477 		 */
3478 		if (hclge_handle_hw_msix_error(hdev, addr))
3479 			dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
3480 				 msix_sts_reg);
3481 
3482 		clear_bit(HNAE3_UNKNOWN_RESET, addr);
3483 		/* We defered the clearing of the error event which caused
3484 		 * interrupt since it was not posssible to do that in
3485 		 * interrupt context (and this is the reason we introduced
3486 		 * new UNKNOWN reset type). Now, the errors have been
3487 		 * handled and cleared in hardware we can safely enable
3488 		 * interrupts. This is an exception to the norm.
3489 		 */
3490 		hclge_enable_vector(&hdev->misc_vector, true);
3491 	}
3492 
3493 	/* return the highest priority reset level amongst all */
3494 	if (test_bit(HNAE3_IMP_RESET, addr)) {
3495 		rst_level = HNAE3_IMP_RESET;
3496 		clear_bit(HNAE3_IMP_RESET, addr);
3497 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3498 		clear_bit(HNAE3_FUNC_RESET, addr);
3499 	} else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3500 		rst_level = HNAE3_GLOBAL_RESET;
3501 		clear_bit(HNAE3_GLOBAL_RESET, addr);
3502 		clear_bit(HNAE3_FUNC_RESET, addr);
3503 	} else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3504 		rst_level = HNAE3_FUNC_RESET;
3505 		clear_bit(HNAE3_FUNC_RESET, addr);
3506 	} else if (test_bit(HNAE3_FLR_RESET, addr)) {
3507 		rst_level = HNAE3_FLR_RESET;
3508 		clear_bit(HNAE3_FLR_RESET, addr);
3509 	}
3510 
3511 	if (hdev->reset_type != HNAE3_NONE_RESET &&
3512 	    rst_level < hdev->reset_type)
3513 		return HNAE3_NONE_RESET;
3514 
3515 	return rst_level;
3516 }
3517 
3518 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3519 {
3520 	u32 clearval = 0;
3521 
3522 	switch (hdev->reset_type) {
3523 	case HNAE3_IMP_RESET:
3524 		clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3525 		break;
3526 	case HNAE3_GLOBAL_RESET:
3527 		clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3528 		break;
3529 	default:
3530 		break;
3531 	}
3532 
3533 	if (!clearval)
3534 		return;
3535 
3536 	/* For revision 0x20, the reset interrupt source
3537 	 * can only be cleared after hardware reset done
3538 	 */
3539 	if (hdev->pdev->revision == 0x20)
3540 		hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3541 				clearval);
3542 
3543 	hclge_enable_vector(&hdev->misc_vector, true);
3544 }
3545 
3546 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3547 {
3548 	u32 reg_val;
3549 
3550 	reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3551 	if (enable)
3552 		reg_val |= HCLGE_NIC_SW_RST_RDY;
3553 	else
3554 		reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3555 
3556 	hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3557 }
3558 
3559 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3560 {
3561 	int ret;
3562 
3563 	ret = hclge_set_all_vf_rst(hdev, true);
3564 	if (ret)
3565 		return ret;
3566 
3567 	hclge_func_reset_sync_vf(hdev);
3568 
3569 	return 0;
3570 }
3571 
3572 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3573 {
3574 	u32 reg_val;
3575 	int ret = 0;
3576 
3577 	switch (hdev->reset_type) {
3578 	case HNAE3_FUNC_RESET:
3579 		ret = hclge_func_reset_notify_vf(hdev);
3580 		if (ret)
3581 			return ret;
3582 
3583 		ret = hclge_func_reset_cmd(hdev, 0);
3584 		if (ret) {
3585 			dev_err(&hdev->pdev->dev,
3586 				"asserting function reset fail %d!\n", ret);
3587 			return ret;
3588 		}
3589 
3590 		/* After performaning pf reset, it is not necessary to do the
3591 		 * mailbox handling or send any command to firmware, because
3592 		 * any mailbox handling or command to firmware is only valid
3593 		 * after hclge_cmd_init is called.
3594 		 */
3595 		set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3596 		hdev->rst_stats.pf_rst_cnt++;
3597 		break;
3598 	case HNAE3_FLR_RESET:
3599 		ret = hclge_func_reset_notify_vf(hdev);
3600 		if (ret)
3601 			return ret;
3602 		break;
3603 	case HNAE3_IMP_RESET:
3604 		hclge_handle_imp_error(hdev);
3605 		reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3606 		hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3607 				BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3608 		break;
3609 	default:
3610 		break;
3611 	}
3612 
3613 	/* inform hardware that preparatory work is done */
3614 	msleep(HCLGE_RESET_SYNC_TIME);
3615 	hclge_reset_handshake(hdev, true);
3616 	dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3617 
3618 	return ret;
3619 }
3620 
3621 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3622 {
3623 #define MAX_RESET_FAIL_CNT 5
3624 
3625 	if (hdev->reset_pending) {
3626 		dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3627 			 hdev->reset_pending);
3628 		return true;
3629 	} else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3630 		   HCLGE_RESET_INT_M) {
3631 		dev_info(&hdev->pdev->dev,
3632 			 "reset failed because new reset interrupt\n");
3633 		hclge_clear_reset_cause(hdev);
3634 		return false;
3635 	} else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3636 		hdev->rst_stats.reset_fail_cnt++;
3637 		set_bit(hdev->reset_type, &hdev->reset_pending);
3638 		dev_info(&hdev->pdev->dev,
3639 			 "re-schedule reset task(%u)\n",
3640 			 hdev->rst_stats.reset_fail_cnt);
3641 		return true;
3642 	}
3643 
3644 	hclge_clear_reset_cause(hdev);
3645 
3646 	/* recover the handshake status when reset fail */
3647 	hclge_reset_handshake(hdev, true);
3648 
3649 	dev_err(&hdev->pdev->dev, "Reset fail!\n");
3650 
3651 	hclge_dbg_dump_rst_info(hdev);
3652 
3653 	set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3654 
3655 	return false;
3656 }
3657 
3658 static int hclge_set_rst_done(struct hclge_dev *hdev)
3659 {
3660 	struct hclge_pf_rst_done_cmd *req;
3661 	struct hclge_desc desc;
3662 	int ret;
3663 
3664 	req = (struct hclge_pf_rst_done_cmd *)desc.data;
3665 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3666 	req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3667 
3668 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3669 	/* To be compatible with the old firmware, which does not support
3670 	 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3671 	 * return success
3672 	 */
3673 	if (ret == -EOPNOTSUPP) {
3674 		dev_warn(&hdev->pdev->dev,
3675 			 "current firmware does not support command(0x%x)!\n",
3676 			 HCLGE_OPC_PF_RST_DONE);
3677 		return 0;
3678 	} else if (ret) {
3679 		dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3680 			ret);
3681 	}
3682 
3683 	return ret;
3684 }
3685 
3686 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3687 {
3688 	int ret = 0;
3689 
3690 	switch (hdev->reset_type) {
3691 	case HNAE3_FUNC_RESET:
3692 		/* fall through */
3693 	case HNAE3_FLR_RESET:
3694 		ret = hclge_set_all_vf_rst(hdev, false);
3695 		break;
3696 	case HNAE3_GLOBAL_RESET:
3697 		/* fall through */
3698 	case HNAE3_IMP_RESET:
3699 		ret = hclge_set_rst_done(hdev);
3700 		break;
3701 	default:
3702 		break;
3703 	}
3704 
3705 	/* clear up the handshake status after re-initialize done */
3706 	hclge_reset_handshake(hdev, false);
3707 
3708 	return ret;
3709 }
3710 
3711 static int hclge_reset_stack(struct hclge_dev *hdev)
3712 {
3713 	int ret;
3714 
3715 	ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3716 	if (ret)
3717 		return ret;
3718 
3719 	ret = hclge_reset_ae_dev(hdev->ae_dev);
3720 	if (ret)
3721 		return ret;
3722 
3723 	return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3724 }
3725 
3726 static int hclge_reset_prepare(struct hclge_dev *hdev)
3727 {
3728 	int ret;
3729 
3730 	hdev->rst_stats.reset_cnt++;
3731 	/* perform reset of the stack & ae device for a client */
3732 	ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3733 	if (ret)
3734 		return ret;
3735 
3736 	rtnl_lock();
3737 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3738 	rtnl_unlock();
3739 	if (ret)
3740 		return ret;
3741 
3742 	return hclge_reset_prepare_wait(hdev);
3743 }
3744 
3745 static int hclge_reset_rebuild(struct hclge_dev *hdev)
3746 {
3747 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3748 	enum hnae3_reset_type reset_level;
3749 	int ret;
3750 
3751 	hdev->rst_stats.hw_reset_done_cnt++;
3752 
3753 	ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3754 	if (ret)
3755 		return ret;
3756 
3757 	rtnl_lock();
3758 	ret = hclge_reset_stack(hdev);
3759 	rtnl_unlock();
3760 	if (ret)
3761 		return ret;
3762 
3763 	hclge_clear_reset_cause(hdev);
3764 
3765 	ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3766 	/* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3767 	 * times
3768 	 */
3769 	if (ret &&
3770 	    hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3771 		return ret;
3772 
3773 	ret = hclge_reset_prepare_up(hdev);
3774 	if (ret)
3775 		return ret;
3776 
3777 	rtnl_lock();
3778 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3779 	rtnl_unlock();
3780 	if (ret)
3781 		return ret;
3782 
3783 	ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3784 	if (ret)
3785 		return ret;
3786 
3787 	hdev->last_reset_time = jiffies;
3788 	hdev->rst_stats.reset_fail_cnt = 0;
3789 	hdev->rst_stats.reset_done_cnt++;
3790 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
3791 
3792 	/* if default_reset_request has a higher level reset request,
3793 	 * it should be handled as soon as possible. since some errors
3794 	 * need this kind of reset to fix.
3795 	 */
3796 	reset_level = hclge_get_reset_level(ae_dev,
3797 					    &hdev->default_reset_request);
3798 	if (reset_level != HNAE3_NONE_RESET)
3799 		set_bit(reset_level, &hdev->reset_request);
3800 
3801 	return 0;
3802 }
3803 
3804 static void hclge_reset(struct hclge_dev *hdev)
3805 {
3806 	if (hclge_reset_prepare(hdev))
3807 		goto err_reset;
3808 
3809 	if (hclge_reset_wait(hdev))
3810 		goto err_reset;
3811 
3812 	if (hclge_reset_rebuild(hdev))
3813 		goto err_reset;
3814 
3815 	return;
3816 
3817 err_reset:
3818 	if (hclge_reset_err_handle(hdev))
3819 		hclge_reset_task_schedule(hdev);
3820 }
3821 
3822 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3823 {
3824 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3825 	struct hclge_dev *hdev = ae_dev->priv;
3826 
3827 	/* We might end up getting called broadly because of 2 below cases:
3828 	 * 1. Recoverable error was conveyed through APEI and only way to bring
3829 	 *    normalcy is to reset.
3830 	 * 2. A new reset request from the stack due to timeout
3831 	 *
3832 	 * For the first case,error event might not have ae handle available.
3833 	 * check if this is a new reset request and we are not here just because
3834 	 * last reset attempt did not succeed and watchdog hit us again. We will
3835 	 * know this if last reset request did not occur very recently (watchdog
3836 	 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3837 	 * In case of new request we reset the "reset level" to PF reset.
3838 	 * And if it is a repeat reset request of the most recent one then we
3839 	 * want to make sure we throttle the reset request. Therefore, we will
3840 	 * not allow it again before 3*HZ times.
3841 	 */
3842 	if (!handle)
3843 		handle = &hdev->vport[0].nic;
3844 
3845 	if (time_before(jiffies, (hdev->last_reset_time +
3846 				  HCLGE_RESET_INTERVAL))) {
3847 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3848 		return;
3849 	} else if (hdev->default_reset_request) {
3850 		hdev->reset_level =
3851 			hclge_get_reset_level(ae_dev,
3852 					      &hdev->default_reset_request);
3853 	} else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3854 		hdev->reset_level = HNAE3_FUNC_RESET;
3855 	}
3856 
3857 	dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3858 		 hdev->reset_level);
3859 
3860 	/* request reset & schedule reset task */
3861 	set_bit(hdev->reset_level, &hdev->reset_request);
3862 	hclge_reset_task_schedule(hdev);
3863 
3864 	if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3865 		hdev->reset_level++;
3866 }
3867 
3868 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3869 					enum hnae3_reset_type rst_type)
3870 {
3871 	struct hclge_dev *hdev = ae_dev->priv;
3872 
3873 	set_bit(rst_type, &hdev->default_reset_request);
3874 }
3875 
3876 static void hclge_reset_timer(struct timer_list *t)
3877 {
3878 	struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3879 
3880 	/* if default_reset_request has no value, it means that this reset
3881 	 * request has already be handled, so just return here
3882 	 */
3883 	if (!hdev->default_reset_request)
3884 		return;
3885 
3886 	dev_info(&hdev->pdev->dev,
3887 		 "triggering reset in reset timer\n");
3888 	hclge_reset_event(hdev->pdev, NULL);
3889 }
3890 
3891 static void hclge_reset_subtask(struct hclge_dev *hdev)
3892 {
3893 	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3894 
3895 	/* check if there is any ongoing reset in the hardware. This status can
3896 	 * be checked from reset_pending. If there is then, we need to wait for
3897 	 * hardware to complete reset.
3898 	 *    a. If we are able to figure out in reasonable time that hardware
3899 	 *       has fully resetted then, we can proceed with driver, client
3900 	 *       reset.
3901 	 *    b. else, we can come back later to check this status so re-sched
3902 	 *       now.
3903 	 */
3904 	hdev->last_reset_time = jiffies;
3905 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3906 	if (hdev->reset_type != HNAE3_NONE_RESET)
3907 		hclge_reset(hdev);
3908 
3909 	/* check if we got any *new* reset requests to be honored */
3910 	hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3911 	if (hdev->reset_type != HNAE3_NONE_RESET)
3912 		hclge_do_reset(hdev);
3913 
3914 	hdev->reset_type = HNAE3_NONE_RESET;
3915 }
3916 
3917 static void hclge_reset_service_task(struct hclge_dev *hdev)
3918 {
3919 	if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3920 		return;
3921 
3922 	down(&hdev->reset_sem);
3923 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3924 
3925 	hclge_reset_subtask(hdev);
3926 
3927 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3928 	up(&hdev->reset_sem);
3929 }
3930 
3931 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3932 {
3933 	int i;
3934 
3935 	/* start from vport 1 for PF is always alive */
3936 	for (i = 1; i < hdev->num_alloc_vport; i++) {
3937 		struct hclge_vport *vport = &hdev->vport[i];
3938 
3939 		if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3940 			clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3941 
3942 		/* If vf is not alive, set to default value */
3943 		if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3944 			vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3945 	}
3946 }
3947 
3948 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3949 {
3950 	unsigned long delta = round_jiffies_relative(HZ);
3951 
3952 	/* Always handle the link updating to make sure link state is
3953 	 * updated when it is triggered by mbx.
3954 	 */
3955 	hclge_update_link_status(hdev);
3956 	hclge_sync_mac_table(hdev);
3957 	hclge_sync_promisc_mode(hdev);
3958 
3959 	if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
3960 		delta = jiffies - hdev->last_serv_processed;
3961 
3962 		if (delta < round_jiffies_relative(HZ)) {
3963 			delta = round_jiffies_relative(HZ) - delta;
3964 			goto out;
3965 		}
3966 	}
3967 
3968 	hdev->serv_processed_cnt++;
3969 	hclge_update_vport_alive(hdev);
3970 
3971 	if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
3972 		hdev->last_serv_processed = jiffies;
3973 		goto out;
3974 	}
3975 
3976 	if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
3977 		hclge_update_stats_for_all(hdev);
3978 
3979 	hclge_update_port_info(hdev);
3980 	hclge_sync_vlan_filter(hdev);
3981 
3982 	if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
3983 		hclge_rfs_filter_expire(hdev);
3984 
3985 	hdev->last_serv_processed = jiffies;
3986 
3987 out:
3988 	hclge_task_schedule(hdev, delta);
3989 }
3990 
3991 static void hclge_service_task(struct work_struct *work)
3992 {
3993 	struct hclge_dev *hdev =
3994 		container_of(work, struct hclge_dev, service_task.work);
3995 
3996 	hclge_reset_service_task(hdev);
3997 	hclge_mailbox_service_task(hdev);
3998 	hclge_periodic_service_task(hdev);
3999 
4000 	/* Handle reset and mbx again in case periodical task delays the
4001 	 * handling by calling hclge_task_schedule() in
4002 	 * hclge_periodic_service_task().
4003 	 */
4004 	hclge_reset_service_task(hdev);
4005 	hclge_mailbox_service_task(hdev);
4006 }
4007 
4008 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4009 {
4010 	/* VF handle has no client */
4011 	if (!handle->client)
4012 		return container_of(handle, struct hclge_vport, nic);
4013 	else if (handle->client->type == HNAE3_CLIENT_ROCE)
4014 		return container_of(handle, struct hclge_vport, roce);
4015 	else
4016 		return container_of(handle, struct hclge_vport, nic);
4017 }
4018 
4019 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4020 			    struct hnae3_vector_info *vector_info)
4021 {
4022 	struct hclge_vport *vport = hclge_get_vport(handle);
4023 	struct hnae3_vector_info *vector = vector_info;
4024 	struct hclge_dev *hdev = vport->back;
4025 	int alloc = 0;
4026 	int i, j;
4027 
4028 	vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4029 	vector_num = min(hdev->num_msi_left, vector_num);
4030 
4031 	for (j = 0; j < vector_num; j++) {
4032 		for (i = 1; i < hdev->num_msi; i++) {
4033 			if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4034 				vector->vector = pci_irq_vector(hdev->pdev, i);
4035 				vector->io_addr = hdev->hw.io_base +
4036 					HCLGE_VECTOR_REG_BASE +
4037 					(i - 1) * HCLGE_VECTOR_REG_OFFSET +
4038 					vport->vport_id *
4039 					HCLGE_VECTOR_VF_OFFSET;
4040 				hdev->vector_status[i] = vport->vport_id;
4041 				hdev->vector_irq[i] = vector->vector;
4042 
4043 				vector++;
4044 				alloc++;
4045 
4046 				break;
4047 			}
4048 		}
4049 	}
4050 	hdev->num_msi_left -= alloc;
4051 	hdev->num_msi_used += alloc;
4052 
4053 	return alloc;
4054 }
4055 
4056 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4057 {
4058 	int i;
4059 
4060 	for (i = 0; i < hdev->num_msi; i++)
4061 		if (vector == hdev->vector_irq[i])
4062 			return i;
4063 
4064 	return -EINVAL;
4065 }
4066 
4067 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4068 {
4069 	struct hclge_vport *vport = hclge_get_vport(handle);
4070 	struct hclge_dev *hdev = vport->back;
4071 	int vector_id;
4072 
4073 	vector_id = hclge_get_vector_index(hdev, vector);
4074 	if (vector_id < 0) {
4075 		dev_err(&hdev->pdev->dev,
4076 			"Get vector index fail. vector = %d\n", vector);
4077 		return vector_id;
4078 	}
4079 
4080 	hclge_free_vector(hdev, vector_id);
4081 
4082 	return 0;
4083 }
4084 
4085 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4086 {
4087 	return HCLGE_RSS_KEY_SIZE;
4088 }
4089 
4090 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4091 {
4092 	return HCLGE_RSS_IND_TBL_SIZE;
4093 }
4094 
4095 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4096 				  const u8 hfunc, const u8 *key)
4097 {
4098 	struct hclge_rss_config_cmd *req;
4099 	unsigned int key_offset = 0;
4100 	struct hclge_desc desc;
4101 	int key_counts;
4102 	int key_size;
4103 	int ret;
4104 
4105 	key_counts = HCLGE_RSS_KEY_SIZE;
4106 	req = (struct hclge_rss_config_cmd *)desc.data;
4107 
4108 	while (key_counts) {
4109 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4110 					   false);
4111 
4112 		req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4113 		req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4114 
4115 		key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4116 		memcpy(req->hash_key,
4117 		       key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4118 
4119 		key_counts -= key_size;
4120 		key_offset++;
4121 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4122 		if (ret) {
4123 			dev_err(&hdev->pdev->dev,
4124 				"Configure RSS config fail, status = %d\n",
4125 				ret);
4126 			return ret;
4127 		}
4128 	}
4129 	return 0;
4130 }
4131 
4132 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4133 {
4134 	struct hclge_rss_indirection_table_cmd *req;
4135 	struct hclge_desc desc;
4136 	int i, j;
4137 	int ret;
4138 
4139 	req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4140 
4141 	for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4142 		hclge_cmd_setup_basic_desc
4143 			(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4144 
4145 		req->start_table_index =
4146 			cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4147 		req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4148 
4149 		for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4150 			req->rss_result[j] =
4151 				indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4152 
4153 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4154 		if (ret) {
4155 			dev_err(&hdev->pdev->dev,
4156 				"Configure rss indir table fail,status = %d\n",
4157 				ret);
4158 			return ret;
4159 		}
4160 	}
4161 	return 0;
4162 }
4163 
4164 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4165 				 u16 *tc_size, u16 *tc_offset)
4166 {
4167 	struct hclge_rss_tc_mode_cmd *req;
4168 	struct hclge_desc desc;
4169 	int ret;
4170 	int i;
4171 
4172 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4173 	req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4174 
4175 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4176 		u16 mode = 0;
4177 
4178 		hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4179 		hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4180 				HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4181 		hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4182 				HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4183 
4184 		req->rss_tc_mode[i] = cpu_to_le16(mode);
4185 	}
4186 
4187 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4188 	if (ret)
4189 		dev_err(&hdev->pdev->dev,
4190 			"Configure rss tc mode fail, status = %d\n", ret);
4191 
4192 	return ret;
4193 }
4194 
4195 static void hclge_get_rss_type(struct hclge_vport *vport)
4196 {
4197 	if (vport->rss_tuple_sets.ipv4_tcp_en ||
4198 	    vport->rss_tuple_sets.ipv4_udp_en ||
4199 	    vport->rss_tuple_sets.ipv4_sctp_en ||
4200 	    vport->rss_tuple_sets.ipv6_tcp_en ||
4201 	    vport->rss_tuple_sets.ipv6_udp_en ||
4202 	    vport->rss_tuple_sets.ipv6_sctp_en)
4203 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4204 	else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4205 		 vport->rss_tuple_sets.ipv6_fragment_en)
4206 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4207 	else
4208 		vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4209 }
4210 
4211 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4212 {
4213 	struct hclge_rss_input_tuple_cmd *req;
4214 	struct hclge_desc desc;
4215 	int ret;
4216 
4217 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4218 
4219 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4220 
4221 	/* Get the tuple cfg from pf */
4222 	req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4223 	req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4224 	req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4225 	req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4226 	req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4227 	req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4228 	req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4229 	req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4230 	hclge_get_rss_type(&hdev->vport[0]);
4231 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4232 	if (ret)
4233 		dev_err(&hdev->pdev->dev,
4234 			"Configure rss input fail, status = %d\n", ret);
4235 	return ret;
4236 }
4237 
4238 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4239 			 u8 *key, u8 *hfunc)
4240 {
4241 	struct hclge_vport *vport = hclge_get_vport(handle);
4242 	int i;
4243 
4244 	/* Get hash algorithm */
4245 	if (hfunc) {
4246 		switch (vport->rss_algo) {
4247 		case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4248 			*hfunc = ETH_RSS_HASH_TOP;
4249 			break;
4250 		case HCLGE_RSS_HASH_ALGO_SIMPLE:
4251 			*hfunc = ETH_RSS_HASH_XOR;
4252 			break;
4253 		default:
4254 			*hfunc = ETH_RSS_HASH_UNKNOWN;
4255 			break;
4256 		}
4257 	}
4258 
4259 	/* Get the RSS Key required by the user */
4260 	if (key)
4261 		memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4262 
4263 	/* Get indirect table */
4264 	if (indir)
4265 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4266 			indir[i] =  vport->rss_indirection_tbl[i];
4267 
4268 	return 0;
4269 }
4270 
4271 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4272 			 const  u8 *key, const  u8 hfunc)
4273 {
4274 	struct hclge_vport *vport = hclge_get_vport(handle);
4275 	struct hclge_dev *hdev = vport->back;
4276 	u8 hash_algo;
4277 	int ret, i;
4278 
4279 	/* Set the RSS Hash Key if specififed by the user */
4280 	if (key) {
4281 		switch (hfunc) {
4282 		case ETH_RSS_HASH_TOP:
4283 			hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4284 			break;
4285 		case ETH_RSS_HASH_XOR:
4286 			hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4287 			break;
4288 		case ETH_RSS_HASH_NO_CHANGE:
4289 			hash_algo = vport->rss_algo;
4290 			break;
4291 		default:
4292 			return -EINVAL;
4293 		}
4294 
4295 		ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4296 		if (ret)
4297 			return ret;
4298 
4299 		/* Update the shadow RSS key with user specified qids */
4300 		memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4301 		vport->rss_algo = hash_algo;
4302 	}
4303 
4304 	/* Update the shadow RSS table with user specified qids */
4305 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4306 		vport->rss_indirection_tbl[i] = indir[i];
4307 
4308 	/* Update the hardware */
4309 	return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4310 }
4311 
4312 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4313 {
4314 	u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4315 
4316 	if (nfc->data & RXH_L4_B_2_3)
4317 		hash_sets |= HCLGE_D_PORT_BIT;
4318 	else
4319 		hash_sets &= ~HCLGE_D_PORT_BIT;
4320 
4321 	if (nfc->data & RXH_IP_SRC)
4322 		hash_sets |= HCLGE_S_IP_BIT;
4323 	else
4324 		hash_sets &= ~HCLGE_S_IP_BIT;
4325 
4326 	if (nfc->data & RXH_IP_DST)
4327 		hash_sets |= HCLGE_D_IP_BIT;
4328 	else
4329 		hash_sets &= ~HCLGE_D_IP_BIT;
4330 
4331 	if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4332 		hash_sets |= HCLGE_V_TAG_BIT;
4333 
4334 	return hash_sets;
4335 }
4336 
4337 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4338 			       struct ethtool_rxnfc *nfc)
4339 {
4340 	struct hclge_vport *vport = hclge_get_vport(handle);
4341 	struct hclge_dev *hdev = vport->back;
4342 	struct hclge_rss_input_tuple_cmd *req;
4343 	struct hclge_desc desc;
4344 	u8 tuple_sets;
4345 	int ret;
4346 
4347 	if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4348 			  RXH_L4_B_0_1 | RXH_L4_B_2_3))
4349 		return -EINVAL;
4350 
4351 	req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4352 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4353 
4354 	req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4355 	req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4356 	req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4357 	req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4358 	req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4359 	req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4360 	req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4361 	req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4362 
4363 	tuple_sets = hclge_get_rss_hash_bits(nfc);
4364 	switch (nfc->flow_type) {
4365 	case TCP_V4_FLOW:
4366 		req->ipv4_tcp_en = tuple_sets;
4367 		break;
4368 	case TCP_V6_FLOW:
4369 		req->ipv6_tcp_en = tuple_sets;
4370 		break;
4371 	case UDP_V4_FLOW:
4372 		req->ipv4_udp_en = tuple_sets;
4373 		break;
4374 	case UDP_V6_FLOW:
4375 		req->ipv6_udp_en = tuple_sets;
4376 		break;
4377 	case SCTP_V4_FLOW:
4378 		req->ipv4_sctp_en = tuple_sets;
4379 		break;
4380 	case SCTP_V6_FLOW:
4381 		if ((nfc->data & RXH_L4_B_0_1) ||
4382 		    (nfc->data & RXH_L4_B_2_3))
4383 			return -EINVAL;
4384 
4385 		req->ipv6_sctp_en = tuple_sets;
4386 		break;
4387 	case IPV4_FLOW:
4388 		req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4389 		break;
4390 	case IPV6_FLOW:
4391 		req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4392 		break;
4393 	default:
4394 		return -EINVAL;
4395 	}
4396 
4397 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4398 	if (ret) {
4399 		dev_err(&hdev->pdev->dev,
4400 			"Set rss tuple fail, status = %d\n", ret);
4401 		return ret;
4402 	}
4403 
4404 	vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4405 	vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4406 	vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4407 	vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4408 	vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4409 	vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4410 	vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4411 	vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4412 	hclge_get_rss_type(vport);
4413 	return 0;
4414 }
4415 
4416 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4417 			       struct ethtool_rxnfc *nfc)
4418 {
4419 	struct hclge_vport *vport = hclge_get_vport(handle);
4420 	u8 tuple_sets;
4421 
4422 	nfc->data = 0;
4423 
4424 	switch (nfc->flow_type) {
4425 	case TCP_V4_FLOW:
4426 		tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4427 		break;
4428 	case UDP_V4_FLOW:
4429 		tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4430 		break;
4431 	case TCP_V6_FLOW:
4432 		tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4433 		break;
4434 	case UDP_V6_FLOW:
4435 		tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4436 		break;
4437 	case SCTP_V4_FLOW:
4438 		tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4439 		break;
4440 	case SCTP_V6_FLOW:
4441 		tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4442 		break;
4443 	case IPV4_FLOW:
4444 	case IPV6_FLOW:
4445 		tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4446 		break;
4447 	default:
4448 		return -EINVAL;
4449 	}
4450 
4451 	if (!tuple_sets)
4452 		return 0;
4453 
4454 	if (tuple_sets & HCLGE_D_PORT_BIT)
4455 		nfc->data |= RXH_L4_B_2_3;
4456 	if (tuple_sets & HCLGE_S_PORT_BIT)
4457 		nfc->data |= RXH_L4_B_0_1;
4458 	if (tuple_sets & HCLGE_D_IP_BIT)
4459 		nfc->data |= RXH_IP_DST;
4460 	if (tuple_sets & HCLGE_S_IP_BIT)
4461 		nfc->data |= RXH_IP_SRC;
4462 
4463 	return 0;
4464 }
4465 
4466 static int hclge_get_tc_size(struct hnae3_handle *handle)
4467 {
4468 	struct hclge_vport *vport = hclge_get_vport(handle);
4469 	struct hclge_dev *hdev = vport->back;
4470 
4471 	return hdev->rss_size_max;
4472 }
4473 
4474 int hclge_rss_init_hw(struct hclge_dev *hdev)
4475 {
4476 	struct hclge_vport *vport = hdev->vport;
4477 	u8 *rss_indir = vport[0].rss_indirection_tbl;
4478 	u16 rss_size = vport[0].alloc_rss_size;
4479 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4480 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4481 	u8 *key = vport[0].rss_hash_key;
4482 	u8 hfunc = vport[0].rss_algo;
4483 	u16 tc_valid[HCLGE_MAX_TC_NUM];
4484 	u16 roundup_size;
4485 	unsigned int i;
4486 	int ret;
4487 
4488 	ret = hclge_set_rss_indir_table(hdev, rss_indir);
4489 	if (ret)
4490 		return ret;
4491 
4492 	ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4493 	if (ret)
4494 		return ret;
4495 
4496 	ret = hclge_set_rss_input_tuple(hdev);
4497 	if (ret)
4498 		return ret;
4499 
4500 	/* Each TC have the same queue size, and tc_size set to hardware is
4501 	 * the log2 of roundup power of two of rss_size, the acutal queue
4502 	 * size is limited by indirection table.
4503 	 */
4504 	if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4505 		dev_err(&hdev->pdev->dev,
4506 			"Configure rss tc size failed, invalid TC_SIZE = %u\n",
4507 			rss_size);
4508 		return -EINVAL;
4509 	}
4510 
4511 	roundup_size = roundup_pow_of_two(rss_size);
4512 	roundup_size = ilog2(roundup_size);
4513 
4514 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4515 		tc_valid[i] = 0;
4516 
4517 		if (!(hdev->hw_tc_map & BIT(i)))
4518 			continue;
4519 
4520 		tc_valid[i] = 1;
4521 		tc_size[i] = roundup_size;
4522 		tc_offset[i] = rss_size * i;
4523 	}
4524 
4525 	return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4526 }
4527 
4528 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4529 {
4530 	struct hclge_vport *vport = hdev->vport;
4531 	int i, j;
4532 
4533 	for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4534 		for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4535 			vport[j].rss_indirection_tbl[i] =
4536 				i % vport[j].alloc_rss_size;
4537 	}
4538 }
4539 
4540 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4541 {
4542 	int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4543 	struct hclge_vport *vport = hdev->vport;
4544 
4545 	if (hdev->pdev->revision >= 0x21)
4546 		rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4547 
4548 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4549 		vport[i].rss_tuple_sets.ipv4_tcp_en =
4550 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4551 		vport[i].rss_tuple_sets.ipv4_udp_en =
4552 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4553 		vport[i].rss_tuple_sets.ipv4_sctp_en =
4554 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4555 		vport[i].rss_tuple_sets.ipv4_fragment_en =
4556 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4557 		vport[i].rss_tuple_sets.ipv6_tcp_en =
4558 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4559 		vport[i].rss_tuple_sets.ipv6_udp_en =
4560 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4561 		vport[i].rss_tuple_sets.ipv6_sctp_en =
4562 			HCLGE_RSS_INPUT_TUPLE_SCTP;
4563 		vport[i].rss_tuple_sets.ipv6_fragment_en =
4564 			HCLGE_RSS_INPUT_TUPLE_OTHER;
4565 
4566 		vport[i].rss_algo = rss_algo;
4567 
4568 		memcpy(vport[i].rss_hash_key, hclge_hash_key,
4569 		       HCLGE_RSS_KEY_SIZE);
4570 	}
4571 
4572 	hclge_rss_indir_init_cfg(hdev);
4573 }
4574 
4575 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4576 				int vector_id, bool en,
4577 				struct hnae3_ring_chain_node *ring_chain)
4578 {
4579 	struct hclge_dev *hdev = vport->back;
4580 	struct hnae3_ring_chain_node *node;
4581 	struct hclge_desc desc;
4582 	struct hclge_ctrl_vector_chain_cmd *req =
4583 		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
4584 	enum hclge_cmd_status status;
4585 	enum hclge_opcode_type op;
4586 	u16 tqp_type_and_id;
4587 	int i;
4588 
4589 	op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4590 	hclge_cmd_setup_basic_desc(&desc, op, false);
4591 	req->int_vector_id = vector_id;
4592 
4593 	i = 0;
4594 	for (node = ring_chain; node; node = node->next) {
4595 		tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4596 		hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4597 				HCLGE_INT_TYPE_S,
4598 				hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4599 		hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4600 				HCLGE_TQP_ID_S, node->tqp_index);
4601 		hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4602 				HCLGE_INT_GL_IDX_S,
4603 				hnae3_get_field(node->int_gl_idx,
4604 						HNAE3_RING_GL_IDX_M,
4605 						HNAE3_RING_GL_IDX_S));
4606 		req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4607 		if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4608 			req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4609 			req->vfid = vport->vport_id;
4610 
4611 			status = hclge_cmd_send(&hdev->hw, &desc, 1);
4612 			if (status) {
4613 				dev_err(&hdev->pdev->dev,
4614 					"Map TQP fail, status is %d.\n",
4615 					status);
4616 				return -EIO;
4617 			}
4618 			i = 0;
4619 
4620 			hclge_cmd_setup_basic_desc(&desc,
4621 						   op,
4622 						   false);
4623 			req->int_vector_id = vector_id;
4624 		}
4625 	}
4626 
4627 	if (i > 0) {
4628 		req->int_cause_num = i;
4629 		req->vfid = vport->vport_id;
4630 		status = hclge_cmd_send(&hdev->hw, &desc, 1);
4631 		if (status) {
4632 			dev_err(&hdev->pdev->dev,
4633 				"Map TQP fail, status is %d.\n", status);
4634 			return -EIO;
4635 		}
4636 	}
4637 
4638 	return 0;
4639 }
4640 
4641 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4642 				    struct hnae3_ring_chain_node *ring_chain)
4643 {
4644 	struct hclge_vport *vport = hclge_get_vport(handle);
4645 	struct hclge_dev *hdev = vport->back;
4646 	int vector_id;
4647 
4648 	vector_id = hclge_get_vector_index(hdev, vector);
4649 	if (vector_id < 0) {
4650 		dev_err(&hdev->pdev->dev,
4651 			"failed to get vector index. vector=%d\n", vector);
4652 		return vector_id;
4653 	}
4654 
4655 	return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4656 }
4657 
4658 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4659 				       struct hnae3_ring_chain_node *ring_chain)
4660 {
4661 	struct hclge_vport *vport = hclge_get_vport(handle);
4662 	struct hclge_dev *hdev = vport->back;
4663 	int vector_id, ret;
4664 
4665 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4666 		return 0;
4667 
4668 	vector_id = hclge_get_vector_index(hdev, vector);
4669 	if (vector_id < 0) {
4670 		dev_err(&handle->pdev->dev,
4671 			"Get vector index fail. ret =%d\n", vector_id);
4672 		return vector_id;
4673 	}
4674 
4675 	ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4676 	if (ret)
4677 		dev_err(&handle->pdev->dev,
4678 			"Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4679 			vector_id, ret);
4680 
4681 	return ret;
4682 }
4683 
4684 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4685 				      struct hclge_promisc_param *param)
4686 {
4687 	struct hclge_promisc_cfg_cmd *req;
4688 	struct hclge_desc desc;
4689 	int ret;
4690 
4691 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4692 
4693 	req = (struct hclge_promisc_cfg_cmd *)desc.data;
4694 	req->vf_id = param->vf_id;
4695 
4696 	/* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4697 	 * pdev revision(0x20), new revision support them. The
4698 	 * value of this two fields will not return error when driver
4699 	 * send command to fireware in revision(0x20).
4700 	 */
4701 	req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4702 		HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4703 
4704 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4705 	if (ret)
4706 		dev_err(&hdev->pdev->dev,
4707 			"failed to set vport %d promisc mode, ret = %d.\n",
4708 			param->vf_id, ret);
4709 
4710 	return ret;
4711 }
4712 
4713 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4714 				     bool en_uc, bool en_mc, bool en_bc,
4715 				     int vport_id)
4716 {
4717 	if (!param)
4718 		return;
4719 
4720 	memset(param, 0, sizeof(struct hclge_promisc_param));
4721 	if (en_uc)
4722 		param->enable = HCLGE_PROMISC_EN_UC;
4723 	if (en_mc)
4724 		param->enable |= HCLGE_PROMISC_EN_MC;
4725 	if (en_bc)
4726 		param->enable |= HCLGE_PROMISC_EN_BC;
4727 	param->vf_id = vport_id;
4728 }
4729 
4730 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4731 				 bool en_mc_pmc, bool en_bc_pmc)
4732 {
4733 	struct hclge_dev *hdev = vport->back;
4734 	struct hclge_promisc_param param;
4735 
4736 	hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4737 				 vport->vport_id);
4738 	return hclge_cmd_set_promisc_mode(hdev, &param);
4739 }
4740 
4741 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4742 				  bool en_mc_pmc)
4743 {
4744 	struct hclge_vport *vport = hclge_get_vport(handle);
4745 	bool en_bc_pmc = true;
4746 
4747 	/* For revision 0x20, if broadcast promisc enabled, vlan filter is
4748 	 * always bypassed. So broadcast promisc should be disabled until
4749 	 * user enable promisc mode
4750 	 */
4751 	if (handle->pdev->revision == 0x20)
4752 		en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4753 
4754 	return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4755 					    en_bc_pmc);
4756 }
4757 
4758 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4759 {
4760 	struct hclge_vport *vport = hclge_get_vport(handle);
4761 	struct hclge_dev *hdev = vport->back;
4762 
4763 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
4764 }
4765 
4766 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4767 {
4768 	struct hclge_get_fd_mode_cmd *req;
4769 	struct hclge_desc desc;
4770 	int ret;
4771 
4772 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4773 
4774 	req = (struct hclge_get_fd_mode_cmd *)desc.data;
4775 
4776 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4777 	if (ret) {
4778 		dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4779 		return ret;
4780 	}
4781 
4782 	*fd_mode = req->mode;
4783 
4784 	return ret;
4785 }
4786 
4787 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4788 				   u32 *stage1_entry_num,
4789 				   u32 *stage2_entry_num,
4790 				   u16 *stage1_counter_num,
4791 				   u16 *stage2_counter_num)
4792 {
4793 	struct hclge_get_fd_allocation_cmd *req;
4794 	struct hclge_desc desc;
4795 	int ret;
4796 
4797 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4798 
4799 	req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4800 
4801 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4802 	if (ret) {
4803 		dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4804 			ret);
4805 		return ret;
4806 	}
4807 
4808 	*stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4809 	*stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4810 	*stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4811 	*stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4812 
4813 	return ret;
4814 }
4815 
4816 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
4817 				   enum HCLGE_FD_STAGE stage_num)
4818 {
4819 	struct hclge_set_fd_key_config_cmd *req;
4820 	struct hclge_fd_key_cfg *stage;
4821 	struct hclge_desc desc;
4822 	int ret;
4823 
4824 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4825 
4826 	req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4827 	stage = &hdev->fd_cfg.key_cfg[stage_num];
4828 	req->stage = stage_num;
4829 	req->key_select = stage->key_sel;
4830 	req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4831 	req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4832 	req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4833 	req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4834 	req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4835 	req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4836 
4837 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4838 	if (ret)
4839 		dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4840 
4841 	return ret;
4842 }
4843 
4844 static int hclge_init_fd_config(struct hclge_dev *hdev)
4845 {
4846 #define LOW_2_WORDS		0x03
4847 	struct hclge_fd_key_cfg *key_cfg;
4848 	int ret;
4849 
4850 	if (!hnae3_dev_fd_supported(hdev))
4851 		return 0;
4852 
4853 	ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4854 	if (ret)
4855 		return ret;
4856 
4857 	switch (hdev->fd_cfg.fd_mode) {
4858 	case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4859 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4860 		break;
4861 	case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4862 		hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4863 		break;
4864 	default:
4865 		dev_err(&hdev->pdev->dev,
4866 			"Unsupported flow director mode %u\n",
4867 			hdev->fd_cfg.fd_mode);
4868 		return -EOPNOTSUPP;
4869 	}
4870 
4871 	key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4872 	key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4873 	key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4874 	key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4875 	key_cfg->outer_sipv6_word_en = 0;
4876 	key_cfg->outer_dipv6_word_en = 0;
4877 
4878 	key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4879 				BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4880 				BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4881 				BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4882 
4883 	/* If use max 400bit key, we can support tuples for ether type */
4884 	if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
4885 		key_cfg->tuple_active |=
4886 				BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4887 
4888 	/* roce_type is used to filter roce frames
4889 	 * dst_vport is used to specify the rule
4890 	 */
4891 	key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4892 
4893 	ret = hclge_get_fd_allocation(hdev,
4894 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4895 				      &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4896 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4897 				      &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4898 	if (ret)
4899 		return ret;
4900 
4901 	return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4902 }
4903 
4904 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4905 				int loc, u8 *key, bool is_add)
4906 {
4907 	struct hclge_fd_tcam_config_1_cmd *req1;
4908 	struct hclge_fd_tcam_config_2_cmd *req2;
4909 	struct hclge_fd_tcam_config_3_cmd *req3;
4910 	struct hclge_desc desc[3];
4911 	int ret;
4912 
4913 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4914 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4915 	hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4916 	desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4917 	hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4918 
4919 	req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4920 	req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4921 	req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4922 
4923 	req1->stage = stage;
4924 	req1->xy_sel = sel_x ? 1 : 0;
4925 	hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4926 	req1->index = cpu_to_le32(loc);
4927 	req1->entry_vld = sel_x ? is_add : 0;
4928 
4929 	if (key) {
4930 		memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4931 		memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4932 		       sizeof(req2->tcam_data));
4933 		memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4934 		       sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4935 	}
4936 
4937 	ret = hclge_cmd_send(&hdev->hw, desc, 3);
4938 	if (ret)
4939 		dev_err(&hdev->pdev->dev,
4940 			"config tcam key fail, ret=%d\n",
4941 			ret);
4942 
4943 	return ret;
4944 }
4945 
4946 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4947 			      struct hclge_fd_ad_data *action)
4948 {
4949 	struct hclge_fd_ad_config_cmd *req;
4950 	struct hclge_desc desc;
4951 	u64 ad_data = 0;
4952 	int ret;
4953 
4954 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4955 
4956 	req = (struct hclge_fd_ad_config_cmd *)desc.data;
4957 	req->index = cpu_to_le32(loc);
4958 	req->stage = stage;
4959 
4960 	hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4961 		      action->write_rule_id_to_bd);
4962 	hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4963 			action->rule_id);
4964 	ad_data <<= 32;
4965 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4966 	hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4967 		      action->forward_to_direct_queue);
4968 	hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4969 			action->queue_id);
4970 	hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4971 	hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4972 			HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4973 	hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4974 	hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4975 			action->counter_id);
4976 
4977 	req->ad_data = cpu_to_le64(ad_data);
4978 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4979 	if (ret)
4980 		dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4981 
4982 	return ret;
4983 }
4984 
4985 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4986 				   struct hclge_fd_rule *rule)
4987 {
4988 	u16 tmp_x_s, tmp_y_s;
4989 	u32 tmp_x_l, tmp_y_l;
4990 	int i;
4991 
4992 	if (rule->unused_tuple & tuple_bit)
4993 		return true;
4994 
4995 	switch (tuple_bit) {
4996 	case BIT(INNER_DST_MAC):
4997 		for (i = 0; i < ETH_ALEN; i++) {
4998 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4999 			       rule->tuples_mask.dst_mac[i]);
5000 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5001 			       rule->tuples_mask.dst_mac[i]);
5002 		}
5003 
5004 		return true;
5005 	case BIT(INNER_SRC_MAC):
5006 		for (i = 0; i < ETH_ALEN; i++) {
5007 			calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5008 			       rule->tuples.src_mac[i]);
5009 			calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5010 			       rule->tuples.src_mac[i]);
5011 		}
5012 
5013 		return true;
5014 	case BIT(INNER_VLAN_TAG_FST):
5015 		calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5016 		       rule->tuples_mask.vlan_tag1);
5017 		calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5018 		       rule->tuples_mask.vlan_tag1);
5019 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5020 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5021 
5022 		return true;
5023 	case BIT(INNER_ETH_TYPE):
5024 		calc_x(tmp_x_s, rule->tuples.ether_proto,
5025 		       rule->tuples_mask.ether_proto);
5026 		calc_y(tmp_y_s, rule->tuples.ether_proto,
5027 		       rule->tuples_mask.ether_proto);
5028 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5029 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5030 
5031 		return true;
5032 	case BIT(INNER_IP_TOS):
5033 		calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5034 		calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5035 
5036 		return true;
5037 	case BIT(INNER_IP_PROTO):
5038 		calc_x(*key_x, rule->tuples.ip_proto,
5039 		       rule->tuples_mask.ip_proto);
5040 		calc_y(*key_y, rule->tuples.ip_proto,
5041 		       rule->tuples_mask.ip_proto);
5042 
5043 		return true;
5044 	case BIT(INNER_SRC_IP):
5045 		calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5046 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5047 		calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5048 		       rule->tuples_mask.src_ip[IPV4_INDEX]);
5049 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5050 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5051 
5052 		return true;
5053 	case BIT(INNER_DST_IP):
5054 		calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5055 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5056 		calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5057 		       rule->tuples_mask.dst_ip[IPV4_INDEX]);
5058 		*(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5059 		*(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5060 
5061 		return true;
5062 	case BIT(INNER_SRC_PORT):
5063 		calc_x(tmp_x_s, rule->tuples.src_port,
5064 		       rule->tuples_mask.src_port);
5065 		calc_y(tmp_y_s, rule->tuples.src_port,
5066 		       rule->tuples_mask.src_port);
5067 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5068 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5069 
5070 		return true;
5071 	case BIT(INNER_DST_PORT):
5072 		calc_x(tmp_x_s, rule->tuples.dst_port,
5073 		       rule->tuples_mask.dst_port);
5074 		calc_y(tmp_y_s, rule->tuples.dst_port,
5075 		       rule->tuples_mask.dst_port);
5076 		*(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5077 		*(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5078 
5079 		return true;
5080 	default:
5081 		return false;
5082 	}
5083 }
5084 
5085 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5086 				 u8 vf_id, u8 network_port_id)
5087 {
5088 	u32 port_number = 0;
5089 
5090 	if (port_type == HOST_PORT) {
5091 		hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5092 				pf_id);
5093 		hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5094 				vf_id);
5095 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5096 	} else {
5097 		hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5098 				HCLGE_NETWORK_PORT_ID_S, network_port_id);
5099 		hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5100 	}
5101 
5102 	return port_number;
5103 }
5104 
5105 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5106 				       __le32 *key_x, __le32 *key_y,
5107 				       struct hclge_fd_rule *rule)
5108 {
5109 	u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5110 	u8 cur_pos = 0, tuple_size, shift_bits;
5111 	unsigned int i;
5112 
5113 	for (i = 0; i < MAX_META_DATA; i++) {
5114 		tuple_size = meta_data_key_info[i].key_length;
5115 		tuple_bit = key_cfg->meta_data_active & BIT(i);
5116 
5117 		switch (tuple_bit) {
5118 		case BIT(ROCE_TYPE):
5119 			hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5120 			cur_pos += tuple_size;
5121 			break;
5122 		case BIT(DST_VPORT):
5123 			port_number = hclge_get_port_number(HOST_PORT, 0,
5124 							    rule->vf_id, 0);
5125 			hnae3_set_field(meta_data,
5126 					GENMASK(cur_pos + tuple_size, cur_pos),
5127 					cur_pos, port_number);
5128 			cur_pos += tuple_size;
5129 			break;
5130 		default:
5131 			break;
5132 		}
5133 	}
5134 
5135 	calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5136 	calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5137 	shift_bits = sizeof(meta_data) * 8 - cur_pos;
5138 
5139 	*key_x = cpu_to_le32(tmp_x << shift_bits);
5140 	*key_y = cpu_to_le32(tmp_y << shift_bits);
5141 }
5142 
5143 /* A complete key is combined with meta data key and tuple key.
5144  * Meta data key is stored at the MSB region, and tuple key is stored at
5145  * the LSB region, unused bits will be filled 0.
5146  */
5147 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5148 			    struct hclge_fd_rule *rule)
5149 {
5150 	struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5151 	u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5152 	u8 *cur_key_x, *cur_key_y;
5153 	u8 meta_data_region;
5154 	u8 tuple_size;
5155 	int ret;
5156 	u32 i;
5157 
5158 	memset(key_x, 0, sizeof(key_x));
5159 	memset(key_y, 0, sizeof(key_y));
5160 	cur_key_x = key_x;
5161 	cur_key_y = key_y;
5162 
5163 	for (i = 0 ; i < MAX_TUPLE; i++) {
5164 		bool tuple_valid;
5165 		u32 check_tuple;
5166 
5167 		tuple_size = tuple_key_info[i].key_length / 8;
5168 		check_tuple = key_cfg->tuple_active & BIT(i);
5169 
5170 		tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5171 						     cur_key_y, rule);
5172 		if (tuple_valid) {
5173 			cur_key_x += tuple_size;
5174 			cur_key_y += tuple_size;
5175 		}
5176 	}
5177 
5178 	meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5179 			MAX_META_DATA_LENGTH / 8;
5180 
5181 	hclge_fd_convert_meta_data(key_cfg,
5182 				   (__le32 *)(key_x + meta_data_region),
5183 				   (__le32 *)(key_y + meta_data_region),
5184 				   rule);
5185 
5186 	ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5187 				   true);
5188 	if (ret) {
5189 		dev_err(&hdev->pdev->dev,
5190 			"fd key_y config fail, loc=%u, ret=%d\n",
5191 			rule->queue_id, ret);
5192 		return ret;
5193 	}
5194 
5195 	ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5196 				   true);
5197 	if (ret)
5198 		dev_err(&hdev->pdev->dev,
5199 			"fd key_x config fail, loc=%u, ret=%d\n",
5200 			rule->queue_id, ret);
5201 	return ret;
5202 }
5203 
5204 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5205 			       struct hclge_fd_rule *rule)
5206 {
5207 	struct hclge_fd_ad_data ad_data;
5208 
5209 	ad_data.ad_id = rule->location;
5210 
5211 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5212 		ad_data.drop_packet = true;
5213 		ad_data.forward_to_direct_queue = false;
5214 		ad_data.queue_id = 0;
5215 	} else {
5216 		ad_data.drop_packet = false;
5217 		ad_data.forward_to_direct_queue = true;
5218 		ad_data.queue_id = rule->queue_id;
5219 	}
5220 
5221 	ad_data.use_counter = false;
5222 	ad_data.counter_id = 0;
5223 
5224 	ad_data.use_next_stage = false;
5225 	ad_data.next_input_key = 0;
5226 
5227 	ad_data.write_rule_id_to_bd = true;
5228 	ad_data.rule_id = rule->location;
5229 
5230 	return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5231 }
5232 
5233 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5234 				       u32 *unused_tuple)
5235 {
5236 	if (!spec || !unused_tuple)
5237 		return -EINVAL;
5238 
5239 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5240 
5241 	if (!spec->ip4src)
5242 		*unused_tuple |= BIT(INNER_SRC_IP);
5243 
5244 	if (!spec->ip4dst)
5245 		*unused_tuple |= BIT(INNER_DST_IP);
5246 
5247 	if (!spec->psrc)
5248 		*unused_tuple |= BIT(INNER_SRC_PORT);
5249 
5250 	if (!spec->pdst)
5251 		*unused_tuple |= BIT(INNER_DST_PORT);
5252 
5253 	if (!spec->tos)
5254 		*unused_tuple |= BIT(INNER_IP_TOS);
5255 
5256 	return 0;
5257 }
5258 
5259 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5260 				    u32 *unused_tuple)
5261 {
5262 	if (!spec || !unused_tuple)
5263 		return -EINVAL;
5264 
5265 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5266 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5267 
5268 	if (!spec->ip4src)
5269 		*unused_tuple |= BIT(INNER_SRC_IP);
5270 
5271 	if (!spec->ip4dst)
5272 		*unused_tuple |= BIT(INNER_DST_IP);
5273 
5274 	if (!spec->tos)
5275 		*unused_tuple |= BIT(INNER_IP_TOS);
5276 
5277 	if (!spec->proto)
5278 		*unused_tuple |= BIT(INNER_IP_PROTO);
5279 
5280 	if (spec->l4_4_bytes)
5281 		return -EOPNOTSUPP;
5282 
5283 	if (spec->ip_ver != ETH_RX_NFC_IP4)
5284 		return -EOPNOTSUPP;
5285 
5286 	return 0;
5287 }
5288 
5289 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5290 				       u32 *unused_tuple)
5291 {
5292 	if (!spec || !unused_tuple)
5293 		return -EINVAL;
5294 
5295 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5296 		BIT(INNER_IP_TOS);
5297 
5298 	/* check whether src/dst ip address used */
5299 	if (!spec->ip6src[0] && !spec->ip6src[1] &&
5300 	    !spec->ip6src[2] && !spec->ip6src[3])
5301 		*unused_tuple |= BIT(INNER_SRC_IP);
5302 
5303 	if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5304 	    !spec->ip6dst[2] && !spec->ip6dst[3])
5305 		*unused_tuple |= BIT(INNER_DST_IP);
5306 
5307 	if (!spec->psrc)
5308 		*unused_tuple |= BIT(INNER_SRC_PORT);
5309 
5310 	if (!spec->pdst)
5311 		*unused_tuple |= BIT(INNER_DST_PORT);
5312 
5313 	if (spec->tclass)
5314 		return -EOPNOTSUPP;
5315 
5316 	return 0;
5317 }
5318 
5319 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5320 				    u32 *unused_tuple)
5321 {
5322 	if (!spec || !unused_tuple)
5323 		return -EINVAL;
5324 
5325 	*unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5326 		BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5327 
5328 	/* check whether src/dst ip address used */
5329 	if (!spec->ip6src[0] && !spec->ip6src[1] &&
5330 	    !spec->ip6src[2] && !spec->ip6src[3])
5331 		*unused_tuple |= BIT(INNER_SRC_IP);
5332 
5333 	if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
5334 	    !spec->ip6dst[2] && !spec->ip6dst[3])
5335 		*unused_tuple |= BIT(INNER_DST_IP);
5336 
5337 	if (!spec->l4_proto)
5338 		*unused_tuple |= BIT(INNER_IP_PROTO);
5339 
5340 	if (spec->tclass)
5341 		return -EOPNOTSUPP;
5342 
5343 	if (spec->l4_4_bytes)
5344 		return -EOPNOTSUPP;
5345 
5346 	return 0;
5347 }
5348 
5349 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5350 {
5351 	if (!spec || !unused_tuple)
5352 		return -EINVAL;
5353 
5354 	*unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5355 		BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5356 		BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5357 
5358 	if (is_zero_ether_addr(spec->h_source))
5359 		*unused_tuple |= BIT(INNER_SRC_MAC);
5360 
5361 	if (is_zero_ether_addr(spec->h_dest))
5362 		*unused_tuple |= BIT(INNER_DST_MAC);
5363 
5364 	if (!spec->h_proto)
5365 		*unused_tuple |= BIT(INNER_ETH_TYPE);
5366 
5367 	return 0;
5368 }
5369 
5370 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5371 				    struct ethtool_rx_flow_spec *fs,
5372 				    u32 *unused_tuple)
5373 {
5374 	if (fs->flow_type & FLOW_EXT) {
5375 		if (fs->h_ext.vlan_etype) {
5376 			dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5377 			return -EOPNOTSUPP;
5378 		}
5379 
5380 		if (!fs->h_ext.vlan_tci)
5381 			*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5382 
5383 		if (fs->m_ext.vlan_tci &&
5384 		    be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5385 			dev_err(&hdev->pdev->dev,
5386 				"failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
5387 				ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5388 			return -EINVAL;
5389 		}
5390 	} else {
5391 		*unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5392 	}
5393 
5394 	if (fs->flow_type & FLOW_MAC_EXT) {
5395 		if (hdev->fd_cfg.fd_mode !=
5396 		    HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5397 			dev_err(&hdev->pdev->dev,
5398 				"FLOW_MAC_EXT is not supported in current fd mode!\n");
5399 			return -EOPNOTSUPP;
5400 		}
5401 
5402 		if (is_zero_ether_addr(fs->h_ext.h_dest))
5403 			*unused_tuple |= BIT(INNER_DST_MAC);
5404 		else
5405 			*unused_tuple &= ~BIT(INNER_DST_MAC);
5406 	}
5407 
5408 	return 0;
5409 }
5410 
5411 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5412 			       struct ethtool_rx_flow_spec *fs,
5413 			       u32 *unused_tuple)
5414 {
5415 	u32 flow_type;
5416 	int ret;
5417 
5418 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5419 		dev_err(&hdev->pdev->dev,
5420 			"failed to config fd rules, invalid rule location: %u, max is %u\n.",
5421 			fs->location,
5422 			hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5423 		return -EINVAL;
5424 	}
5425 
5426 	if ((fs->flow_type & FLOW_EXT) &&
5427 	    (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5428 		dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5429 		return -EOPNOTSUPP;
5430 	}
5431 
5432 	flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5433 	switch (flow_type) {
5434 	case SCTP_V4_FLOW:
5435 	case TCP_V4_FLOW:
5436 	case UDP_V4_FLOW:
5437 		ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5438 						  unused_tuple);
5439 		break;
5440 	case IP_USER_FLOW:
5441 		ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5442 					       unused_tuple);
5443 		break;
5444 	case SCTP_V6_FLOW:
5445 	case TCP_V6_FLOW:
5446 	case UDP_V6_FLOW:
5447 		ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
5448 						  unused_tuple);
5449 		break;
5450 	case IPV6_USER_FLOW:
5451 		ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
5452 					       unused_tuple);
5453 		break;
5454 	case ETHER_FLOW:
5455 		if (hdev->fd_cfg.fd_mode !=
5456 			HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5457 			dev_err(&hdev->pdev->dev,
5458 				"ETHER_FLOW is not supported in current fd mode!\n");
5459 			return -EOPNOTSUPP;
5460 		}
5461 
5462 		ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
5463 						 unused_tuple);
5464 		break;
5465 	default:
5466 		dev_err(&hdev->pdev->dev,
5467 			"unsupported protocol type, protocol type = %#x\n",
5468 			flow_type);
5469 		return -EOPNOTSUPP;
5470 	}
5471 
5472 	if (ret) {
5473 		dev_err(&hdev->pdev->dev,
5474 			"failed to check flow union tuple, ret = %d\n",
5475 			ret);
5476 		return ret;
5477 	}
5478 
5479 	return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
5480 }
5481 
5482 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5483 {
5484 	struct hclge_fd_rule *rule = NULL;
5485 	struct hlist_node *node2;
5486 
5487 	spin_lock_bh(&hdev->fd_rule_lock);
5488 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5489 		if (rule->location >= location)
5490 			break;
5491 	}
5492 
5493 	spin_unlock_bh(&hdev->fd_rule_lock);
5494 
5495 	return  rule && rule->location == location;
5496 }
5497 
5498 /* make sure being called after lock up with fd_rule_lock */
5499 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5500 				     struct hclge_fd_rule *new_rule,
5501 				     u16 location,
5502 				     bool is_add)
5503 {
5504 	struct hclge_fd_rule *rule = NULL, *parent = NULL;
5505 	struct hlist_node *node2;
5506 
5507 	if (is_add && !new_rule)
5508 		return -EINVAL;
5509 
5510 	hlist_for_each_entry_safe(rule, node2,
5511 				  &hdev->fd_rule_list, rule_node) {
5512 		if (rule->location >= location)
5513 			break;
5514 		parent = rule;
5515 	}
5516 
5517 	if (rule && rule->location == location) {
5518 		hlist_del(&rule->rule_node);
5519 		kfree(rule);
5520 		hdev->hclge_fd_rule_num--;
5521 
5522 		if (!is_add) {
5523 			if (!hdev->hclge_fd_rule_num)
5524 				hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5525 			clear_bit(location, hdev->fd_bmap);
5526 
5527 			return 0;
5528 		}
5529 	} else if (!is_add) {
5530 		dev_err(&hdev->pdev->dev,
5531 			"delete fail, rule %u is inexistent\n",
5532 			location);
5533 		return -EINVAL;
5534 	}
5535 
5536 	INIT_HLIST_NODE(&new_rule->rule_node);
5537 
5538 	if (parent)
5539 		hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5540 	else
5541 		hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5542 
5543 	set_bit(location, hdev->fd_bmap);
5544 	hdev->hclge_fd_rule_num++;
5545 	hdev->fd_active_type = new_rule->rule_type;
5546 
5547 	return 0;
5548 }
5549 
5550 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5551 			      struct ethtool_rx_flow_spec *fs,
5552 			      struct hclge_fd_rule *rule)
5553 {
5554 	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5555 
5556 	switch (flow_type) {
5557 	case SCTP_V4_FLOW:
5558 	case TCP_V4_FLOW:
5559 	case UDP_V4_FLOW:
5560 		rule->tuples.src_ip[IPV4_INDEX] =
5561 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5562 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5563 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5564 
5565 		rule->tuples.dst_ip[IPV4_INDEX] =
5566 				be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5567 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5568 				be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5569 
5570 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5571 		rule->tuples_mask.src_port =
5572 				be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5573 
5574 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5575 		rule->tuples_mask.dst_port =
5576 				be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5577 
5578 		rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5579 		rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5580 
5581 		rule->tuples.ether_proto = ETH_P_IP;
5582 		rule->tuples_mask.ether_proto = 0xFFFF;
5583 
5584 		break;
5585 	case IP_USER_FLOW:
5586 		rule->tuples.src_ip[IPV4_INDEX] =
5587 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5588 		rule->tuples_mask.src_ip[IPV4_INDEX] =
5589 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5590 
5591 		rule->tuples.dst_ip[IPV4_INDEX] =
5592 				be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5593 		rule->tuples_mask.dst_ip[IPV4_INDEX] =
5594 				be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5595 
5596 		rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5597 		rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5598 
5599 		rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5600 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5601 
5602 		rule->tuples.ether_proto = ETH_P_IP;
5603 		rule->tuples_mask.ether_proto = 0xFFFF;
5604 
5605 		break;
5606 	case SCTP_V6_FLOW:
5607 	case TCP_V6_FLOW:
5608 	case UDP_V6_FLOW:
5609 		be32_to_cpu_array(rule->tuples.src_ip,
5610 				  fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5611 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5612 				  fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5613 
5614 		be32_to_cpu_array(rule->tuples.dst_ip,
5615 				  fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5616 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5617 				  fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5618 
5619 		rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5620 		rule->tuples_mask.src_port =
5621 				be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5622 
5623 		rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5624 		rule->tuples_mask.dst_port =
5625 				be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5626 
5627 		rule->tuples.ether_proto = ETH_P_IPV6;
5628 		rule->tuples_mask.ether_proto = 0xFFFF;
5629 
5630 		break;
5631 	case IPV6_USER_FLOW:
5632 		be32_to_cpu_array(rule->tuples.src_ip,
5633 				  fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5634 		be32_to_cpu_array(rule->tuples_mask.src_ip,
5635 				  fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5636 
5637 		be32_to_cpu_array(rule->tuples.dst_ip,
5638 				  fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5639 		be32_to_cpu_array(rule->tuples_mask.dst_ip,
5640 				  fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5641 
5642 		rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5643 		rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5644 
5645 		rule->tuples.ether_proto = ETH_P_IPV6;
5646 		rule->tuples_mask.ether_proto = 0xFFFF;
5647 
5648 		break;
5649 	case ETHER_FLOW:
5650 		ether_addr_copy(rule->tuples.src_mac,
5651 				fs->h_u.ether_spec.h_source);
5652 		ether_addr_copy(rule->tuples_mask.src_mac,
5653 				fs->m_u.ether_spec.h_source);
5654 
5655 		ether_addr_copy(rule->tuples.dst_mac,
5656 				fs->h_u.ether_spec.h_dest);
5657 		ether_addr_copy(rule->tuples_mask.dst_mac,
5658 				fs->m_u.ether_spec.h_dest);
5659 
5660 		rule->tuples.ether_proto =
5661 				be16_to_cpu(fs->h_u.ether_spec.h_proto);
5662 		rule->tuples_mask.ether_proto =
5663 				be16_to_cpu(fs->m_u.ether_spec.h_proto);
5664 
5665 		break;
5666 	default:
5667 		return -EOPNOTSUPP;
5668 	}
5669 
5670 	switch (flow_type) {
5671 	case SCTP_V4_FLOW:
5672 	case SCTP_V6_FLOW:
5673 		rule->tuples.ip_proto = IPPROTO_SCTP;
5674 		rule->tuples_mask.ip_proto = 0xFF;
5675 		break;
5676 	case TCP_V4_FLOW:
5677 	case TCP_V6_FLOW:
5678 		rule->tuples.ip_proto = IPPROTO_TCP;
5679 		rule->tuples_mask.ip_proto = 0xFF;
5680 		break;
5681 	case UDP_V4_FLOW:
5682 	case UDP_V6_FLOW:
5683 		rule->tuples.ip_proto = IPPROTO_UDP;
5684 		rule->tuples_mask.ip_proto = 0xFF;
5685 		break;
5686 	default:
5687 		break;
5688 	}
5689 
5690 	if (fs->flow_type & FLOW_EXT) {
5691 		rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5692 		rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5693 	}
5694 
5695 	if (fs->flow_type & FLOW_MAC_EXT) {
5696 		ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5697 		ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5698 	}
5699 
5700 	return 0;
5701 }
5702 
5703 /* make sure being called after lock up with fd_rule_lock */
5704 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5705 				struct hclge_fd_rule *rule)
5706 {
5707 	int ret;
5708 
5709 	if (!rule) {
5710 		dev_err(&hdev->pdev->dev,
5711 			"The flow director rule is NULL\n");
5712 		return -EINVAL;
5713 	}
5714 
5715 	/* it will never fail here, so needn't to check return value */
5716 	hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5717 
5718 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5719 	if (ret)
5720 		goto clear_rule;
5721 
5722 	ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5723 	if (ret)
5724 		goto clear_rule;
5725 
5726 	return 0;
5727 
5728 clear_rule:
5729 	hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5730 	return ret;
5731 }
5732 
5733 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5734 			      struct ethtool_rxnfc *cmd)
5735 {
5736 	struct hclge_vport *vport = hclge_get_vport(handle);
5737 	struct hclge_dev *hdev = vport->back;
5738 	u16 dst_vport_id = 0, q_index = 0;
5739 	struct ethtool_rx_flow_spec *fs;
5740 	struct hclge_fd_rule *rule;
5741 	u32 unused = 0;
5742 	u8 action;
5743 	int ret;
5744 
5745 	if (!hnae3_dev_fd_supported(hdev)) {
5746 		dev_err(&hdev->pdev->dev,
5747 			"flow table director is not supported\n");
5748 		return -EOPNOTSUPP;
5749 	}
5750 
5751 	if (!hdev->fd_en) {
5752 		dev_err(&hdev->pdev->dev,
5753 			"please enable flow director first\n");
5754 		return -EOPNOTSUPP;
5755 	}
5756 
5757 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5758 
5759 	ret = hclge_fd_check_spec(hdev, fs, &unused);
5760 	if (ret)
5761 		return ret;
5762 
5763 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5764 		action = HCLGE_FD_ACTION_DROP_PACKET;
5765 	} else {
5766 		u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5767 		u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5768 		u16 tqps;
5769 
5770 		if (vf > hdev->num_req_vfs) {
5771 			dev_err(&hdev->pdev->dev,
5772 				"Error: vf id (%u) > max vf num (%u)\n",
5773 				vf, hdev->num_req_vfs);
5774 			return -EINVAL;
5775 		}
5776 
5777 		dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5778 		tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5779 
5780 		if (ring >= tqps) {
5781 			dev_err(&hdev->pdev->dev,
5782 				"Error: queue id (%u) > max tqp num (%u)\n",
5783 				ring, tqps - 1);
5784 			return -EINVAL;
5785 		}
5786 
5787 		action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5788 		q_index = ring;
5789 	}
5790 
5791 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5792 	if (!rule)
5793 		return -ENOMEM;
5794 
5795 	ret = hclge_fd_get_tuple(hdev, fs, rule);
5796 	if (ret) {
5797 		kfree(rule);
5798 		return ret;
5799 	}
5800 
5801 	rule->flow_type = fs->flow_type;
5802 	rule->location = fs->location;
5803 	rule->unused_tuple = unused;
5804 	rule->vf_id = dst_vport_id;
5805 	rule->queue_id = q_index;
5806 	rule->action = action;
5807 	rule->rule_type = HCLGE_FD_EP_ACTIVE;
5808 
5809 	/* to avoid rule conflict, when user configure rule by ethtool,
5810 	 * we need to clear all arfs rules
5811 	 */
5812 	hclge_clear_arfs_rules(handle);
5813 
5814 	spin_lock_bh(&hdev->fd_rule_lock);
5815 	ret = hclge_fd_config_rule(hdev, rule);
5816 
5817 	spin_unlock_bh(&hdev->fd_rule_lock);
5818 
5819 	return ret;
5820 }
5821 
5822 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5823 			      struct ethtool_rxnfc *cmd)
5824 {
5825 	struct hclge_vport *vport = hclge_get_vport(handle);
5826 	struct hclge_dev *hdev = vport->back;
5827 	struct ethtool_rx_flow_spec *fs;
5828 	int ret;
5829 
5830 	if (!hnae3_dev_fd_supported(hdev))
5831 		return -EOPNOTSUPP;
5832 
5833 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5834 
5835 	if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5836 		return -EINVAL;
5837 
5838 	if (!hclge_fd_rule_exist(hdev, fs->location)) {
5839 		dev_err(&hdev->pdev->dev,
5840 			"Delete fail, rule %u is inexistent\n", fs->location);
5841 		return -ENOENT;
5842 	}
5843 
5844 	ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5845 				   NULL, false);
5846 	if (ret)
5847 		return ret;
5848 
5849 	spin_lock_bh(&hdev->fd_rule_lock);
5850 	ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5851 
5852 	spin_unlock_bh(&hdev->fd_rule_lock);
5853 
5854 	return ret;
5855 }
5856 
5857 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5858 				     bool clear_list)
5859 {
5860 	struct hclge_vport *vport = hclge_get_vport(handle);
5861 	struct hclge_dev *hdev = vport->back;
5862 	struct hclge_fd_rule *rule;
5863 	struct hlist_node *node;
5864 	u16 location;
5865 
5866 	if (!hnae3_dev_fd_supported(hdev))
5867 		return;
5868 
5869 	spin_lock_bh(&hdev->fd_rule_lock);
5870 	for_each_set_bit(location, hdev->fd_bmap,
5871 			 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5872 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5873 				     NULL, false);
5874 
5875 	if (clear_list) {
5876 		hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5877 					  rule_node) {
5878 			hlist_del(&rule->rule_node);
5879 			kfree(rule);
5880 		}
5881 		hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5882 		hdev->hclge_fd_rule_num = 0;
5883 		bitmap_zero(hdev->fd_bmap,
5884 			    hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5885 	}
5886 
5887 	spin_unlock_bh(&hdev->fd_rule_lock);
5888 }
5889 
5890 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5891 {
5892 	struct hclge_vport *vport = hclge_get_vport(handle);
5893 	struct hclge_dev *hdev = vport->back;
5894 	struct hclge_fd_rule *rule;
5895 	struct hlist_node *node;
5896 	int ret;
5897 
5898 	/* Return ok here, because reset error handling will check this
5899 	 * return value. If error is returned here, the reset process will
5900 	 * fail.
5901 	 */
5902 	if (!hnae3_dev_fd_supported(hdev))
5903 		return 0;
5904 
5905 	/* if fd is disabled, should not restore it when reset */
5906 	if (!hdev->fd_en)
5907 		return 0;
5908 
5909 	spin_lock_bh(&hdev->fd_rule_lock);
5910 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5911 		ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5912 		if (!ret)
5913 			ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5914 
5915 		if (ret) {
5916 			dev_warn(&hdev->pdev->dev,
5917 				 "Restore rule %u failed, remove it\n",
5918 				 rule->location);
5919 			clear_bit(rule->location, hdev->fd_bmap);
5920 			hlist_del(&rule->rule_node);
5921 			kfree(rule);
5922 			hdev->hclge_fd_rule_num--;
5923 		}
5924 	}
5925 
5926 	if (hdev->hclge_fd_rule_num)
5927 		hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5928 
5929 	spin_unlock_bh(&hdev->fd_rule_lock);
5930 
5931 	return 0;
5932 }
5933 
5934 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5935 				 struct ethtool_rxnfc *cmd)
5936 {
5937 	struct hclge_vport *vport = hclge_get_vport(handle);
5938 	struct hclge_dev *hdev = vport->back;
5939 
5940 	if (!hnae3_dev_fd_supported(hdev))
5941 		return -EOPNOTSUPP;
5942 
5943 	cmd->rule_cnt = hdev->hclge_fd_rule_num;
5944 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5945 
5946 	return 0;
5947 }
5948 
5949 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
5950 				     struct ethtool_tcpip4_spec *spec,
5951 				     struct ethtool_tcpip4_spec *spec_mask)
5952 {
5953 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5954 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5955 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5956 
5957 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5958 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5959 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5960 
5961 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
5962 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5963 			0 : cpu_to_be16(rule->tuples_mask.src_port);
5964 
5965 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
5966 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
5967 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
5968 
5969 	spec->tos = rule->tuples.ip_tos;
5970 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
5971 			0 : rule->tuples_mask.ip_tos;
5972 }
5973 
5974 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
5975 				  struct ethtool_usrip4_spec *spec,
5976 				  struct ethtool_usrip4_spec *spec_mask)
5977 {
5978 	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5979 	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
5980 			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5981 
5982 	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5983 	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
5984 			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5985 
5986 	spec->tos = rule->tuples.ip_tos;
5987 	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
5988 			0 : rule->tuples_mask.ip_tos;
5989 
5990 	spec->proto = rule->tuples.ip_proto;
5991 	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5992 			0 : rule->tuples_mask.ip_proto;
5993 
5994 	spec->ip_ver = ETH_RX_NFC_IP4;
5995 }
5996 
5997 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
5998 				     struct ethtool_tcpip6_spec *spec,
5999 				     struct ethtool_tcpip6_spec *spec_mask)
6000 {
6001 	cpu_to_be32_array(spec->ip6src,
6002 			  rule->tuples.src_ip, IPV6_SIZE);
6003 	cpu_to_be32_array(spec->ip6dst,
6004 			  rule->tuples.dst_ip, IPV6_SIZE);
6005 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6006 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6007 	else
6008 		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6009 				  IPV6_SIZE);
6010 
6011 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6012 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6013 	else
6014 		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6015 				  IPV6_SIZE);
6016 
6017 	spec->psrc = cpu_to_be16(rule->tuples.src_port);
6018 	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6019 			0 : cpu_to_be16(rule->tuples_mask.src_port);
6020 
6021 	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6022 	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6023 			0 : cpu_to_be16(rule->tuples_mask.dst_port);
6024 }
6025 
6026 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6027 				  struct ethtool_usrip6_spec *spec,
6028 				  struct ethtool_usrip6_spec *spec_mask)
6029 {
6030 	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6031 	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6032 	if (rule->unused_tuple & BIT(INNER_SRC_IP))
6033 		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6034 	else
6035 		cpu_to_be32_array(spec_mask->ip6src,
6036 				  rule->tuples_mask.src_ip, IPV6_SIZE);
6037 
6038 	if (rule->unused_tuple & BIT(INNER_DST_IP))
6039 		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6040 	else
6041 		cpu_to_be32_array(spec_mask->ip6dst,
6042 				  rule->tuples_mask.dst_ip, IPV6_SIZE);
6043 
6044 	spec->l4_proto = rule->tuples.ip_proto;
6045 	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6046 			0 : rule->tuples_mask.ip_proto;
6047 }
6048 
6049 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6050 				    struct ethhdr *spec,
6051 				    struct ethhdr *spec_mask)
6052 {
6053 	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6054 	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6055 
6056 	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6057 		eth_zero_addr(spec_mask->h_source);
6058 	else
6059 		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6060 
6061 	if (rule->unused_tuple & BIT(INNER_DST_MAC))
6062 		eth_zero_addr(spec_mask->h_dest);
6063 	else
6064 		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6065 
6066 	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6067 	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6068 			0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6069 }
6070 
6071 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6072 				  struct hclge_fd_rule *rule)
6073 {
6074 	if (fs->flow_type & FLOW_EXT) {
6075 		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6076 		fs->m_ext.vlan_tci =
6077 				rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6078 				cpu_to_be16(VLAN_VID_MASK) :
6079 				cpu_to_be16(rule->tuples_mask.vlan_tag1);
6080 	}
6081 
6082 	if (fs->flow_type & FLOW_MAC_EXT) {
6083 		ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6084 		if (rule->unused_tuple & BIT(INNER_DST_MAC))
6085 			eth_zero_addr(fs->m_u.ether_spec.h_dest);
6086 		else
6087 			ether_addr_copy(fs->m_u.ether_spec.h_dest,
6088 					rule->tuples_mask.dst_mac);
6089 	}
6090 }
6091 
6092 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6093 				  struct ethtool_rxnfc *cmd)
6094 {
6095 	struct hclge_vport *vport = hclge_get_vport(handle);
6096 	struct hclge_fd_rule *rule = NULL;
6097 	struct hclge_dev *hdev = vport->back;
6098 	struct ethtool_rx_flow_spec *fs;
6099 	struct hlist_node *node2;
6100 
6101 	if (!hnae3_dev_fd_supported(hdev))
6102 		return -EOPNOTSUPP;
6103 
6104 	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6105 
6106 	spin_lock_bh(&hdev->fd_rule_lock);
6107 
6108 	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6109 		if (rule->location >= fs->location)
6110 			break;
6111 	}
6112 
6113 	if (!rule || fs->location != rule->location) {
6114 		spin_unlock_bh(&hdev->fd_rule_lock);
6115 
6116 		return -ENOENT;
6117 	}
6118 
6119 	fs->flow_type = rule->flow_type;
6120 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6121 	case SCTP_V4_FLOW:
6122 	case TCP_V4_FLOW:
6123 	case UDP_V4_FLOW:
6124 		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6125 					 &fs->m_u.tcp_ip4_spec);
6126 		break;
6127 	case IP_USER_FLOW:
6128 		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6129 				      &fs->m_u.usr_ip4_spec);
6130 		break;
6131 	case SCTP_V6_FLOW:
6132 	case TCP_V6_FLOW:
6133 	case UDP_V6_FLOW:
6134 		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6135 					 &fs->m_u.tcp_ip6_spec);
6136 		break;
6137 	case IPV6_USER_FLOW:
6138 		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6139 				      &fs->m_u.usr_ip6_spec);
6140 		break;
6141 	/* The flow type of fd rule has been checked before adding in to rule
6142 	 * list. As other flow types have been handled, it must be ETHER_FLOW
6143 	 * for the default case
6144 	 */
6145 	default:
6146 		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6147 					&fs->m_u.ether_spec);
6148 		break;
6149 	}
6150 
6151 	hclge_fd_get_ext_info(fs, rule);
6152 
6153 	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6154 		fs->ring_cookie = RX_CLS_FLOW_DISC;
6155 	} else {
6156 		u64 vf_id;
6157 
6158 		fs->ring_cookie = rule->queue_id;
6159 		vf_id = rule->vf_id;
6160 		vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6161 		fs->ring_cookie |= vf_id;
6162 	}
6163 
6164 	spin_unlock_bh(&hdev->fd_rule_lock);
6165 
6166 	return 0;
6167 }
6168 
6169 static int hclge_get_all_rules(struct hnae3_handle *handle,
6170 			       struct ethtool_rxnfc *cmd, u32 *rule_locs)
6171 {
6172 	struct hclge_vport *vport = hclge_get_vport(handle);
6173 	struct hclge_dev *hdev = vport->back;
6174 	struct hclge_fd_rule *rule;
6175 	struct hlist_node *node2;
6176 	int cnt = 0;
6177 
6178 	if (!hnae3_dev_fd_supported(hdev))
6179 		return -EOPNOTSUPP;
6180 
6181 	cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6182 
6183 	spin_lock_bh(&hdev->fd_rule_lock);
6184 	hlist_for_each_entry_safe(rule, node2,
6185 				  &hdev->fd_rule_list, rule_node) {
6186 		if (cnt == cmd->rule_cnt) {
6187 			spin_unlock_bh(&hdev->fd_rule_lock);
6188 			return -EMSGSIZE;
6189 		}
6190 
6191 		rule_locs[cnt] = rule->location;
6192 		cnt++;
6193 	}
6194 
6195 	spin_unlock_bh(&hdev->fd_rule_lock);
6196 
6197 	cmd->rule_cnt = cnt;
6198 
6199 	return 0;
6200 }
6201 
6202 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6203 				     struct hclge_fd_rule_tuples *tuples)
6204 {
6205 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6206 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6207 
6208 	tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6209 	tuples->ip_proto = fkeys->basic.ip_proto;
6210 	tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6211 
6212 	if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6213 		tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6214 		tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6215 	} else {
6216 		int i;
6217 
6218 		for (i = 0; i < IPV6_SIZE; i++) {
6219 			tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6220 			tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6221 		}
6222 	}
6223 }
6224 
6225 /* traverse all rules, check whether an existed rule has the same tuples */
6226 static struct hclge_fd_rule *
6227 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6228 			  const struct hclge_fd_rule_tuples *tuples)
6229 {
6230 	struct hclge_fd_rule *rule = NULL;
6231 	struct hlist_node *node;
6232 
6233 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6234 		if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6235 			return rule;
6236 	}
6237 
6238 	return NULL;
6239 }
6240 
6241 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6242 				     struct hclge_fd_rule *rule)
6243 {
6244 	rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6245 			     BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6246 			     BIT(INNER_SRC_PORT);
6247 	rule->action = 0;
6248 	rule->vf_id = 0;
6249 	rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6250 	if (tuples->ether_proto == ETH_P_IP) {
6251 		if (tuples->ip_proto == IPPROTO_TCP)
6252 			rule->flow_type = TCP_V4_FLOW;
6253 		else
6254 			rule->flow_type = UDP_V4_FLOW;
6255 	} else {
6256 		if (tuples->ip_proto == IPPROTO_TCP)
6257 			rule->flow_type = TCP_V6_FLOW;
6258 		else
6259 			rule->flow_type = UDP_V6_FLOW;
6260 	}
6261 	memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6262 	memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6263 }
6264 
6265 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6266 				      u16 flow_id, struct flow_keys *fkeys)
6267 {
6268 	struct hclge_vport *vport = hclge_get_vport(handle);
6269 	struct hclge_fd_rule_tuples new_tuples;
6270 	struct hclge_dev *hdev = vport->back;
6271 	struct hclge_fd_rule *rule;
6272 	u16 tmp_queue_id;
6273 	u16 bit_id;
6274 	int ret;
6275 
6276 	if (!hnae3_dev_fd_supported(hdev))
6277 		return -EOPNOTSUPP;
6278 
6279 	memset(&new_tuples, 0, sizeof(new_tuples));
6280 	hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6281 
6282 	spin_lock_bh(&hdev->fd_rule_lock);
6283 
6284 	/* when there is already fd rule existed add by user,
6285 	 * arfs should not work
6286 	 */
6287 	if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6288 		spin_unlock_bh(&hdev->fd_rule_lock);
6289 		return -EOPNOTSUPP;
6290 	}
6291 
6292 	/* check is there flow director filter existed for this flow,
6293 	 * if not, create a new filter for it;
6294 	 * if filter exist with different queue id, modify the filter;
6295 	 * if filter exist with same queue id, do nothing
6296 	 */
6297 	rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6298 	if (!rule) {
6299 		bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6300 		if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6301 			spin_unlock_bh(&hdev->fd_rule_lock);
6302 			return -ENOSPC;
6303 		}
6304 
6305 		rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6306 		if (!rule) {
6307 			spin_unlock_bh(&hdev->fd_rule_lock);
6308 			return -ENOMEM;
6309 		}
6310 
6311 		set_bit(bit_id, hdev->fd_bmap);
6312 		rule->location = bit_id;
6313 		rule->flow_id = flow_id;
6314 		rule->queue_id = queue_id;
6315 		hclge_fd_build_arfs_rule(&new_tuples, rule);
6316 		ret = hclge_fd_config_rule(hdev, rule);
6317 
6318 		spin_unlock_bh(&hdev->fd_rule_lock);
6319 
6320 		if (ret)
6321 			return ret;
6322 
6323 		return rule->location;
6324 	}
6325 
6326 	spin_unlock_bh(&hdev->fd_rule_lock);
6327 
6328 	if (rule->queue_id == queue_id)
6329 		return rule->location;
6330 
6331 	tmp_queue_id = rule->queue_id;
6332 	rule->queue_id = queue_id;
6333 	ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6334 	if (ret) {
6335 		rule->queue_id = tmp_queue_id;
6336 		return ret;
6337 	}
6338 
6339 	return rule->location;
6340 }
6341 
6342 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6343 {
6344 #ifdef CONFIG_RFS_ACCEL
6345 	struct hnae3_handle *handle = &hdev->vport[0].nic;
6346 	struct hclge_fd_rule *rule;
6347 	struct hlist_node *node;
6348 	HLIST_HEAD(del_list);
6349 
6350 	spin_lock_bh(&hdev->fd_rule_lock);
6351 	if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6352 		spin_unlock_bh(&hdev->fd_rule_lock);
6353 		return;
6354 	}
6355 	hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6356 		if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6357 					rule->flow_id, rule->location)) {
6358 			hlist_del_init(&rule->rule_node);
6359 			hlist_add_head(&rule->rule_node, &del_list);
6360 			hdev->hclge_fd_rule_num--;
6361 			clear_bit(rule->location, hdev->fd_bmap);
6362 		}
6363 	}
6364 	spin_unlock_bh(&hdev->fd_rule_lock);
6365 
6366 	hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6367 		hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6368 				     rule->location, NULL, false);
6369 		kfree(rule);
6370 	}
6371 #endif
6372 }
6373 
6374 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6375 {
6376 #ifdef CONFIG_RFS_ACCEL
6377 	struct hclge_vport *vport = hclge_get_vport(handle);
6378 	struct hclge_dev *hdev = vport->back;
6379 
6380 	if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6381 		hclge_del_all_fd_entries(handle, true);
6382 #endif
6383 }
6384 
6385 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6386 {
6387 	struct hclge_vport *vport = hclge_get_vport(handle);
6388 	struct hclge_dev *hdev = vport->back;
6389 
6390 	return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6391 	       hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6392 }
6393 
6394 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
6395 {
6396 	struct hclge_vport *vport = hclge_get_vport(handle);
6397 	struct hclge_dev *hdev = vport->back;
6398 
6399 	return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
6400 }
6401 
6402 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6403 {
6404 	struct hclge_vport *vport = hclge_get_vport(handle);
6405 	struct hclge_dev *hdev = vport->back;
6406 
6407 	return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6408 }
6409 
6410 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6411 {
6412 	struct hclge_vport *vport = hclge_get_vport(handle);
6413 	struct hclge_dev *hdev = vport->back;
6414 
6415 	return hdev->rst_stats.hw_reset_done_cnt;
6416 }
6417 
6418 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6419 {
6420 	struct hclge_vport *vport = hclge_get_vport(handle);
6421 	struct hclge_dev *hdev = vport->back;
6422 	bool clear;
6423 
6424 	hdev->fd_en = enable;
6425 	clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6426 	if (!enable)
6427 		hclge_del_all_fd_entries(handle, clear);
6428 	else
6429 		hclge_restore_fd_entries(handle);
6430 }
6431 
6432 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6433 {
6434 	struct hclge_desc desc;
6435 	struct hclge_config_mac_mode_cmd *req =
6436 		(struct hclge_config_mac_mode_cmd *)desc.data;
6437 	u32 loop_en = 0;
6438 	int ret;
6439 
6440 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6441 
6442 	if (enable) {
6443 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6444 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6445 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6446 		hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6447 		hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6448 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6449 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6450 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6451 		hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6452 		hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6453 	}
6454 
6455 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6456 
6457 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6458 	if (ret)
6459 		dev_err(&hdev->pdev->dev,
6460 			"mac enable fail, ret =%d.\n", ret);
6461 }
6462 
6463 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6464 				     u8 switch_param, u8 param_mask)
6465 {
6466 	struct hclge_mac_vlan_switch_cmd *req;
6467 	struct hclge_desc desc;
6468 	u32 func_id;
6469 	int ret;
6470 
6471 	func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6472 	req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6473 
6474 	/* read current config parameter */
6475 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6476 				   true);
6477 	req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6478 	req->func_id = cpu_to_le32(func_id);
6479 
6480 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6481 	if (ret) {
6482 		dev_err(&hdev->pdev->dev,
6483 			"read mac vlan switch parameter fail, ret = %d\n", ret);
6484 		return ret;
6485 	}
6486 
6487 	/* modify and write new config parameter */
6488 	hclge_cmd_reuse_desc(&desc, false);
6489 	req->switch_param = (req->switch_param & param_mask) | switch_param;
6490 	req->param_mask = param_mask;
6491 
6492 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6493 	if (ret)
6494 		dev_err(&hdev->pdev->dev,
6495 			"set mac vlan switch parameter fail, ret = %d\n", ret);
6496 	return ret;
6497 }
6498 
6499 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6500 				       int link_ret)
6501 {
6502 #define HCLGE_PHY_LINK_STATUS_NUM  200
6503 
6504 	struct phy_device *phydev = hdev->hw.mac.phydev;
6505 	int i = 0;
6506 	int ret;
6507 
6508 	do {
6509 		ret = phy_read_status(phydev);
6510 		if (ret) {
6511 			dev_err(&hdev->pdev->dev,
6512 				"phy update link status fail, ret = %d\n", ret);
6513 			return;
6514 		}
6515 
6516 		if (phydev->link == link_ret)
6517 			break;
6518 
6519 		msleep(HCLGE_LINK_STATUS_MS);
6520 	} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6521 }
6522 
6523 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6524 {
6525 #define HCLGE_MAC_LINK_STATUS_NUM  100
6526 
6527 	int i = 0;
6528 	int ret;
6529 
6530 	do {
6531 		ret = hclge_get_mac_link_status(hdev);
6532 		if (ret < 0)
6533 			return ret;
6534 		else if (ret == link_ret)
6535 			return 0;
6536 
6537 		msleep(HCLGE_LINK_STATUS_MS);
6538 	} while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6539 	return -EBUSY;
6540 }
6541 
6542 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6543 					  bool is_phy)
6544 {
6545 #define HCLGE_LINK_STATUS_DOWN 0
6546 #define HCLGE_LINK_STATUS_UP   1
6547 
6548 	int link_ret;
6549 
6550 	link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6551 
6552 	if (is_phy)
6553 		hclge_phy_link_status_wait(hdev, link_ret);
6554 
6555 	return hclge_mac_link_status_wait(hdev, link_ret);
6556 }
6557 
6558 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6559 {
6560 	struct hclge_config_mac_mode_cmd *req;
6561 	struct hclge_desc desc;
6562 	u32 loop_en;
6563 	int ret;
6564 
6565 	req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6566 	/* 1 Read out the MAC mode config at first */
6567 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6568 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6569 	if (ret) {
6570 		dev_err(&hdev->pdev->dev,
6571 			"mac loopback get fail, ret =%d.\n", ret);
6572 		return ret;
6573 	}
6574 
6575 	/* 2 Then setup the loopback flag */
6576 	loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6577 	hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6578 
6579 	req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6580 
6581 	/* 3 Config mac work mode with loopback flag
6582 	 * and its original configure parameters
6583 	 */
6584 	hclge_cmd_reuse_desc(&desc, false);
6585 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6586 	if (ret)
6587 		dev_err(&hdev->pdev->dev,
6588 			"mac loopback set fail, ret =%d.\n", ret);
6589 	return ret;
6590 }
6591 
6592 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6593 				     enum hnae3_loop loop_mode)
6594 {
6595 #define HCLGE_SERDES_RETRY_MS	10
6596 #define HCLGE_SERDES_RETRY_NUM	100
6597 
6598 	struct hclge_serdes_lb_cmd *req;
6599 	struct hclge_desc desc;
6600 	int ret, i = 0;
6601 	u8 loop_mode_b;
6602 
6603 	req = (struct hclge_serdes_lb_cmd *)desc.data;
6604 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6605 
6606 	switch (loop_mode) {
6607 	case HNAE3_LOOP_SERIAL_SERDES:
6608 		loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6609 		break;
6610 	case HNAE3_LOOP_PARALLEL_SERDES:
6611 		loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6612 		break;
6613 	default:
6614 		dev_err(&hdev->pdev->dev,
6615 			"unsupported serdes loopback mode %d\n", loop_mode);
6616 		return -ENOTSUPP;
6617 	}
6618 
6619 	if (en) {
6620 		req->enable = loop_mode_b;
6621 		req->mask = loop_mode_b;
6622 	} else {
6623 		req->mask = loop_mode_b;
6624 	}
6625 
6626 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6627 	if (ret) {
6628 		dev_err(&hdev->pdev->dev,
6629 			"serdes loopback set fail, ret = %d\n", ret);
6630 		return ret;
6631 	}
6632 
6633 	do {
6634 		msleep(HCLGE_SERDES_RETRY_MS);
6635 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6636 					   true);
6637 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6638 		if (ret) {
6639 			dev_err(&hdev->pdev->dev,
6640 				"serdes loopback get, ret = %d\n", ret);
6641 			return ret;
6642 		}
6643 	} while (++i < HCLGE_SERDES_RETRY_NUM &&
6644 		 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6645 
6646 	if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6647 		dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6648 		return -EBUSY;
6649 	} else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6650 		dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6651 		return -EIO;
6652 	}
6653 	return ret;
6654 }
6655 
6656 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6657 				     enum hnae3_loop loop_mode)
6658 {
6659 	int ret;
6660 
6661 	ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6662 	if (ret)
6663 		return ret;
6664 
6665 	hclge_cfg_mac_mode(hdev, en);
6666 
6667 	ret = hclge_mac_phy_link_status_wait(hdev, en, false);
6668 	if (ret)
6669 		dev_err(&hdev->pdev->dev,
6670 			"serdes loopback config mac mode timeout\n");
6671 
6672 	return ret;
6673 }
6674 
6675 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6676 				     struct phy_device *phydev)
6677 {
6678 	int ret;
6679 
6680 	if (!phydev->suspended) {
6681 		ret = phy_suspend(phydev);
6682 		if (ret)
6683 			return ret;
6684 	}
6685 
6686 	ret = phy_resume(phydev);
6687 	if (ret)
6688 		return ret;
6689 
6690 	return phy_loopback(phydev, true);
6691 }
6692 
6693 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6694 				      struct phy_device *phydev)
6695 {
6696 	int ret;
6697 
6698 	ret = phy_loopback(phydev, false);
6699 	if (ret)
6700 		return ret;
6701 
6702 	return phy_suspend(phydev);
6703 }
6704 
6705 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6706 {
6707 	struct phy_device *phydev = hdev->hw.mac.phydev;
6708 	int ret;
6709 
6710 	if (!phydev)
6711 		return -ENOTSUPP;
6712 
6713 	if (en)
6714 		ret = hclge_enable_phy_loopback(hdev, phydev);
6715 	else
6716 		ret = hclge_disable_phy_loopback(hdev, phydev);
6717 	if (ret) {
6718 		dev_err(&hdev->pdev->dev,
6719 			"set phy loopback fail, ret = %d\n", ret);
6720 		return ret;
6721 	}
6722 
6723 	hclge_cfg_mac_mode(hdev, en);
6724 
6725 	ret = hclge_mac_phy_link_status_wait(hdev, en, true);
6726 	if (ret)
6727 		dev_err(&hdev->pdev->dev,
6728 			"phy loopback config mac mode timeout\n");
6729 
6730 	return ret;
6731 }
6732 
6733 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6734 			    int stream_id, bool enable)
6735 {
6736 	struct hclge_desc desc;
6737 	struct hclge_cfg_com_tqp_queue_cmd *req =
6738 		(struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6739 	int ret;
6740 
6741 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6742 	req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6743 	req->stream_id = cpu_to_le16(stream_id);
6744 	if (enable)
6745 		req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6746 
6747 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6748 	if (ret)
6749 		dev_err(&hdev->pdev->dev,
6750 			"Tqp enable fail, status =%d.\n", ret);
6751 	return ret;
6752 }
6753 
6754 static int hclge_set_loopback(struct hnae3_handle *handle,
6755 			      enum hnae3_loop loop_mode, bool en)
6756 {
6757 	struct hclge_vport *vport = hclge_get_vport(handle);
6758 	struct hnae3_knic_private_info *kinfo;
6759 	struct hclge_dev *hdev = vport->back;
6760 	int i, ret;
6761 
6762 	/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6763 	 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6764 	 * the same, the packets are looped back in the SSU. If SSU loopback
6765 	 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6766 	 */
6767 	if (hdev->pdev->revision >= 0x21) {
6768 		u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6769 
6770 		ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6771 						HCLGE_SWITCH_ALW_LPBK_MASK);
6772 		if (ret)
6773 			return ret;
6774 	}
6775 
6776 	switch (loop_mode) {
6777 	case HNAE3_LOOP_APP:
6778 		ret = hclge_set_app_loopback(hdev, en);
6779 		break;
6780 	case HNAE3_LOOP_SERIAL_SERDES:
6781 	case HNAE3_LOOP_PARALLEL_SERDES:
6782 		ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6783 		break;
6784 	case HNAE3_LOOP_PHY:
6785 		ret = hclge_set_phy_loopback(hdev, en);
6786 		break;
6787 	default:
6788 		ret = -ENOTSUPP;
6789 		dev_err(&hdev->pdev->dev,
6790 			"loop_mode %d is not supported\n", loop_mode);
6791 		break;
6792 	}
6793 
6794 	if (ret)
6795 		return ret;
6796 
6797 	kinfo = &vport->nic.kinfo;
6798 	for (i = 0; i < kinfo->num_tqps; i++) {
6799 		ret = hclge_tqp_enable(hdev, i, 0, en);
6800 		if (ret)
6801 			return ret;
6802 	}
6803 
6804 	return 0;
6805 }
6806 
6807 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6808 {
6809 	int ret;
6810 
6811 	ret = hclge_set_app_loopback(hdev, false);
6812 	if (ret)
6813 		return ret;
6814 
6815 	ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6816 	if (ret)
6817 		return ret;
6818 
6819 	return hclge_cfg_serdes_loopback(hdev, false,
6820 					 HNAE3_LOOP_PARALLEL_SERDES);
6821 }
6822 
6823 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6824 {
6825 	struct hclge_vport *vport = hclge_get_vport(handle);
6826 	struct hnae3_knic_private_info *kinfo;
6827 	struct hnae3_queue *queue;
6828 	struct hclge_tqp *tqp;
6829 	int i;
6830 
6831 	kinfo = &vport->nic.kinfo;
6832 	for (i = 0; i < kinfo->num_tqps; i++) {
6833 		queue = handle->kinfo.tqp[i];
6834 		tqp = container_of(queue, struct hclge_tqp, q);
6835 		memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6836 	}
6837 }
6838 
6839 static void hclge_flush_link_update(struct hclge_dev *hdev)
6840 {
6841 #define HCLGE_FLUSH_LINK_TIMEOUT	100000
6842 
6843 	unsigned long last = hdev->serv_processed_cnt;
6844 	int i = 0;
6845 
6846 	while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6847 	       i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6848 	       last == hdev->serv_processed_cnt)
6849 		usleep_range(1, 1);
6850 }
6851 
6852 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6853 {
6854 	struct hclge_vport *vport = hclge_get_vport(handle);
6855 	struct hclge_dev *hdev = vport->back;
6856 
6857 	if (enable) {
6858 		hclge_task_schedule(hdev, 0);
6859 	} else {
6860 		/* Set the DOWN flag here to disable link updating */
6861 		set_bit(HCLGE_STATE_DOWN, &hdev->state);
6862 
6863 		/* flush memory to make sure DOWN is seen by service task */
6864 		smp_mb__before_atomic();
6865 		hclge_flush_link_update(hdev);
6866 	}
6867 }
6868 
6869 static int hclge_ae_start(struct hnae3_handle *handle)
6870 {
6871 	struct hclge_vport *vport = hclge_get_vport(handle);
6872 	struct hclge_dev *hdev = vport->back;
6873 
6874 	/* mac enable */
6875 	hclge_cfg_mac_mode(hdev, true);
6876 	clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6877 	hdev->hw.mac.link = 0;
6878 
6879 	/* reset tqp stats */
6880 	hclge_reset_tqp_stats(handle);
6881 
6882 	hclge_mac_start_phy(hdev);
6883 
6884 	return 0;
6885 }
6886 
6887 static void hclge_ae_stop(struct hnae3_handle *handle)
6888 {
6889 	struct hclge_vport *vport = hclge_get_vport(handle);
6890 	struct hclge_dev *hdev = vport->back;
6891 	int i;
6892 
6893 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
6894 
6895 	hclge_clear_arfs_rules(handle);
6896 
6897 	/* If it is not PF reset, the firmware will disable the MAC,
6898 	 * so it only need to stop phy here.
6899 	 */
6900 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6901 	    hdev->reset_type != HNAE3_FUNC_RESET) {
6902 		hclge_mac_stop_phy(hdev);
6903 		hclge_update_link_status(hdev);
6904 		return;
6905 	}
6906 
6907 	for (i = 0; i < handle->kinfo.num_tqps; i++)
6908 		hclge_reset_tqp(handle, i);
6909 
6910 	hclge_config_mac_tnl_int(hdev, false);
6911 
6912 	/* Mac disable */
6913 	hclge_cfg_mac_mode(hdev, false);
6914 
6915 	hclge_mac_stop_phy(hdev);
6916 
6917 	/* reset tqp stats */
6918 	hclge_reset_tqp_stats(handle);
6919 	hclge_update_link_status(hdev);
6920 }
6921 
6922 int hclge_vport_start(struct hclge_vport *vport)
6923 {
6924 	struct hclge_dev *hdev = vport->back;
6925 
6926 	set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6927 	vport->last_active_jiffies = jiffies;
6928 
6929 	if (test_bit(vport->vport_id, hdev->vport_config_block)) {
6930 		if (vport->vport_id) {
6931 			hclge_restore_mac_table_common(vport);
6932 			hclge_restore_vport_vlan_table(vport);
6933 		} else {
6934 			hclge_restore_hw_table(hdev);
6935 		}
6936 	}
6937 
6938 	clear_bit(vport->vport_id, hdev->vport_config_block);
6939 
6940 	return 0;
6941 }
6942 
6943 void hclge_vport_stop(struct hclge_vport *vport)
6944 {
6945 	clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6946 }
6947 
6948 static int hclge_client_start(struct hnae3_handle *handle)
6949 {
6950 	struct hclge_vport *vport = hclge_get_vport(handle);
6951 
6952 	return hclge_vport_start(vport);
6953 }
6954 
6955 static void hclge_client_stop(struct hnae3_handle *handle)
6956 {
6957 	struct hclge_vport *vport = hclge_get_vport(handle);
6958 
6959 	hclge_vport_stop(vport);
6960 }
6961 
6962 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6963 					 u16 cmdq_resp, u8  resp_code,
6964 					 enum hclge_mac_vlan_tbl_opcode op)
6965 {
6966 	struct hclge_dev *hdev = vport->back;
6967 
6968 	if (cmdq_resp) {
6969 		dev_err(&hdev->pdev->dev,
6970 			"cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6971 			cmdq_resp);
6972 		return -EIO;
6973 	}
6974 
6975 	if (op == HCLGE_MAC_VLAN_ADD) {
6976 		if (!resp_code || resp_code == 1)
6977 			return 0;
6978 		else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
6979 			 resp_code == HCLGE_ADD_MC_OVERFLOW)
6980 			return -ENOSPC;
6981 
6982 		dev_err(&hdev->pdev->dev,
6983 			"add mac addr failed for undefined, code=%u.\n",
6984 			resp_code);
6985 		return -EIO;
6986 	} else if (op == HCLGE_MAC_VLAN_REMOVE) {
6987 		if (!resp_code) {
6988 			return 0;
6989 		} else if (resp_code == 1) {
6990 			dev_dbg(&hdev->pdev->dev,
6991 				"remove mac addr failed for miss.\n");
6992 			return -ENOENT;
6993 		}
6994 
6995 		dev_err(&hdev->pdev->dev,
6996 			"remove mac addr failed for undefined, code=%u.\n",
6997 			resp_code);
6998 		return -EIO;
6999 	} else if (op == HCLGE_MAC_VLAN_LKUP) {
7000 		if (!resp_code) {
7001 			return 0;
7002 		} else if (resp_code == 1) {
7003 			dev_dbg(&hdev->pdev->dev,
7004 				"lookup mac addr failed for miss.\n");
7005 			return -ENOENT;
7006 		}
7007 
7008 		dev_err(&hdev->pdev->dev,
7009 			"lookup mac addr failed for undefined, code=%u.\n",
7010 			resp_code);
7011 		return -EIO;
7012 	}
7013 
7014 	dev_err(&hdev->pdev->dev,
7015 		"unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7016 
7017 	return -EINVAL;
7018 }
7019 
7020 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7021 {
7022 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7023 
7024 	unsigned int word_num;
7025 	unsigned int bit_num;
7026 
7027 	if (vfid > 255 || vfid < 0)
7028 		return -EIO;
7029 
7030 	if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7031 		word_num = vfid / 32;
7032 		bit_num  = vfid % 32;
7033 		if (clr)
7034 			desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7035 		else
7036 			desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7037 	} else {
7038 		word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7039 		bit_num  = vfid % 32;
7040 		if (clr)
7041 			desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7042 		else
7043 			desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7044 	}
7045 
7046 	return 0;
7047 }
7048 
7049 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7050 {
7051 #define HCLGE_DESC_NUMBER 3
7052 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7053 	int i, j;
7054 
7055 	for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7056 		for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7057 			if (desc[i].data[j])
7058 				return false;
7059 
7060 	return true;
7061 }
7062 
7063 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7064 				   const u8 *addr, bool is_mc)
7065 {
7066 	const unsigned char *mac_addr = addr;
7067 	u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7068 		       (mac_addr[0]) | (mac_addr[1] << 8);
7069 	u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
7070 
7071 	hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7072 	if (is_mc) {
7073 		hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
7074 		hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7075 	}
7076 
7077 	new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7078 	new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7079 }
7080 
7081 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7082 				     struct hclge_mac_vlan_tbl_entry_cmd *req)
7083 {
7084 	struct hclge_dev *hdev = vport->back;
7085 	struct hclge_desc desc;
7086 	u8 resp_code;
7087 	u16 retval;
7088 	int ret;
7089 
7090 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7091 
7092 	memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7093 
7094 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7095 	if (ret) {
7096 		dev_err(&hdev->pdev->dev,
7097 			"del mac addr failed for cmd_send, ret =%d.\n",
7098 			ret);
7099 		return ret;
7100 	}
7101 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7102 	retval = le16_to_cpu(desc.retval);
7103 
7104 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7105 					     HCLGE_MAC_VLAN_REMOVE);
7106 }
7107 
7108 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7109 				     struct hclge_mac_vlan_tbl_entry_cmd *req,
7110 				     struct hclge_desc *desc,
7111 				     bool is_mc)
7112 {
7113 	struct hclge_dev *hdev = vport->back;
7114 	u8 resp_code;
7115 	u16 retval;
7116 	int ret;
7117 
7118 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7119 	if (is_mc) {
7120 		desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7121 		memcpy(desc[0].data,
7122 		       req,
7123 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7124 		hclge_cmd_setup_basic_desc(&desc[1],
7125 					   HCLGE_OPC_MAC_VLAN_ADD,
7126 					   true);
7127 		desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7128 		hclge_cmd_setup_basic_desc(&desc[2],
7129 					   HCLGE_OPC_MAC_VLAN_ADD,
7130 					   true);
7131 		ret = hclge_cmd_send(&hdev->hw, desc, 3);
7132 	} else {
7133 		memcpy(desc[0].data,
7134 		       req,
7135 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7136 		ret = hclge_cmd_send(&hdev->hw, desc, 1);
7137 	}
7138 	if (ret) {
7139 		dev_err(&hdev->pdev->dev,
7140 			"lookup mac addr failed for cmd_send, ret =%d.\n",
7141 			ret);
7142 		return ret;
7143 	}
7144 	resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7145 	retval = le16_to_cpu(desc[0].retval);
7146 
7147 	return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7148 					     HCLGE_MAC_VLAN_LKUP);
7149 }
7150 
7151 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7152 				  struct hclge_mac_vlan_tbl_entry_cmd *req,
7153 				  struct hclge_desc *mc_desc)
7154 {
7155 	struct hclge_dev *hdev = vport->back;
7156 	int cfg_status;
7157 	u8 resp_code;
7158 	u16 retval;
7159 	int ret;
7160 
7161 	if (!mc_desc) {
7162 		struct hclge_desc desc;
7163 
7164 		hclge_cmd_setup_basic_desc(&desc,
7165 					   HCLGE_OPC_MAC_VLAN_ADD,
7166 					   false);
7167 		memcpy(desc.data, req,
7168 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7169 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7170 		resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7171 		retval = le16_to_cpu(desc.retval);
7172 
7173 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7174 							   resp_code,
7175 							   HCLGE_MAC_VLAN_ADD);
7176 	} else {
7177 		hclge_cmd_reuse_desc(&mc_desc[0], false);
7178 		mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7179 		hclge_cmd_reuse_desc(&mc_desc[1], false);
7180 		mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7181 		hclge_cmd_reuse_desc(&mc_desc[2], false);
7182 		mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7183 		memcpy(mc_desc[0].data, req,
7184 		       sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7185 		ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7186 		resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7187 		retval = le16_to_cpu(mc_desc[0].retval);
7188 
7189 		cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7190 							   resp_code,
7191 							   HCLGE_MAC_VLAN_ADD);
7192 	}
7193 
7194 	if (ret) {
7195 		dev_err(&hdev->pdev->dev,
7196 			"add mac addr failed for cmd_send, ret =%d.\n",
7197 			ret);
7198 		return ret;
7199 	}
7200 
7201 	return cfg_status;
7202 }
7203 
7204 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7205 			       u16 *allocated_size)
7206 {
7207 	struct hclge_umv_spc_alc_cmd *req;
7208 	struct hclge_desc desc;
7209 	int ret;
7210 
7211 	req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7212 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7213 
7214 	req->space_size = cpu_to_le32(space_size);
7215 
7216 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7217 	if (ret) {
7218 		dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
7219 			ret);
7220 		return ret;
7221 	}
7222 
7223 	*allocated_size = le32_to_cpu(desc.data[1]);
7224 
7225 	return 0;
7226 }
7227 
7228 static int hclge_init_umv_space(struct hclge_dev *hdev)
7229 {
7230 	u16 allocated_size = 0;
7231 	int ret;
7232 
7233 	ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
7234 	if (ret)
7235 		return ret;
7236 
7237 	if (allocated_size < hdev->wanted_umv_size)
7238 		dev_warn(&hdev->pdev->dev,
7239 			 "failed to alloc umv space, want %u, get %u\n",
7240 			 hdev->wanted_umv_size, allocated_size);
7241 
7242 	hdev->max_umv_size = allocated_size;
7243 	hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
7244 	hdev->share_umv_size = hdev->priv_umv_size +
7245 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7246 
7247 	return 0;
7248 }
7249 
7250 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7251 {
7252 	struct hclge_vport *vport;
7253 	int i;
7254 
7255 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7256 		vport = &hdev->vport[i];
7257 		vport->used_umv_num = 0;
7258 	}
7259 
7260 	mutex_lock(&hdev->vport_lock);
7261 	hdev->share_umv_size = hdev->priv_umv_size +
7262 			hdev->max_umv_size % (hdev->num_alloc_vport + 1);
7263 	mutex_unlock(&hdev->vport_lock);
7264 }
7265 
7266 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
7267 {
7268 	struct hclge_dev *hdev = vport->back;
7269 	bool is_full;
7270 
7271 	if (need_lock)
7272 		mutex_lock(&hdev->vport_lock);
7273 
7274 	is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7275 		   hdev->share_umv_size == 0);
7276 
7277 	if (need_lock)
7278 		mutex_unlock(&hdev->vport_lock);
7279 
7280 	return is_full;
7281 }
7282 
7283 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7284 {
7285 	struct hclge_dev *hdev = vport->back;
7286 
7287 	if (is_free) {
7288 		if (vport->used_umv_num > hdev->priv_umv_size)
7289 			hdev->share_umv_size++;
7290 
7291 		if (vport->used_umv_num > 0)
7292 			vport->used_umv_num--;
7293 	} else {
7294 		if (vport->used_umv_num >= hdev->priv_umv_size &&
7295 		    hdev->share_umv_size > 0)
7296 			hdev->share_umv_size--;
7297 		vport->used_umv_num++;
7298 	}
7299 }
7300 
7301 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
7302 						  const u8 *mac_addr)
7303 {
7304 	struct hclge_mac_node *mac_node, *tmp;
7305 
7306 	list_for_each_entry_safe(mac_node, tmp, list, node)
7307 		if (ether_addr_equal(mac_addr, mac_node->mac_addr))
7308 			return mac_node;
7309 
7310 	return NULL;
7311 }
7312 
7313 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
7314 				  enum HCLGE_MAC_NODE_STATE state)
7315 {
7316 	switch (state) {
7317 	/* from set_rx_mode or tmp_add_list */
7318 	case HCLGE_MAC_TO_ADD:
7319 		if (mac_node->state == HCLGE_MAC_TO_DEL)
7320 			mac_node->state = HCLGE_MAC_ACTIVE;
7321 		break;
7322 	/* only from set_rx_mode */
7323 	case HCLGE_MAC_TO_DEL:
7324 		if (mac_node->state == HCLGE_MAC_TO_ADD) {
7325 			list_del(&mac_node->node);
7326 			kfree(mac_node);
7327 		} else {
7328 			mac_node->state = HCLGE_MAC_TO_DEL;
7329 		}
7330 		break;
7331 	/* only from tmp_add_list, the mac_node->state won't be
7332 	 * ACTIVE.
7333 	 */
7334 	case HCLGE_MAC_ACTIVE:
7335 		if (mac_node->state == HCLGE_MAC_TO_ADD)
7336 			mac_node->state = HCLGE_MAC_ACTIVE;
7337 
7338 		break;
7339 	}
7340 }
7341 
7342 int hclge_update_mac_list(struct hclge_vport *vport,
7343 			  enum HCLGE_MAC_NODE_STATE state,
7344 			  enum HCLGE_MAC_ADDR_TYPE mac_type,
7345 			  const unsigned char *addr)
7346 {
7347 	struct hclge_dev *hdev = vport->back;
7348 	struct hclge_mac_node *mac_node;
7349 	struct list_head *list;
7350 
7351 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7352 		&vport->uc_mac_list : &vport->mc_mac_list;
7353 
7354 	spin_lock_bh(&vport->mac_list_lock);
7355 
7356 	/* if the mac addr is already in the mac list, no need to add a new
7357 	 * one into it, just check the mac addr state, convert it to a new
7358 	 * new state, or just remove it, or do nothing.
7359 	 */
7360 	mac_node = hclge_find_mac_node(list, addr);
7361 	if (mac_node) {
7362 		hclge_update_mac_node(mac_node, state);
7363 		spin_unlock_bh(&vport->mac_list_lock);
7364 		set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7365 		return 0;
7366 	}
7367 
7368 	/* if this address is never added, unnecessary to delete */
7369 	if (state == HCLGE_MAC_TO_DEL) {
7370 		spin_unlock_bh(&vport->mac_list_lock);
7371 		dev_err(&hdev->pdev->dev,
7372 			"failed to delete address %pM from mac list\n",
7373 			addr);
7374 		return -ENOENT;
7375 	}
7376 
7377 	mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
7378 	if (!mac_node) {
7379 		spin_unlock_bh(&vport->mac_list_lock);
7380 		return -ENOMEM;
7381 	}
7382 
7383 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
7384 
7385 	mac_node->state = state;
7386 	ether_addr_copy(mac_node->mac_addr, addr);
7387 	list_add_tail(&mac_node->node, list);
7388 
7389 	spin_unlock_bh(&vport->mac_list_lock);
7390 
7391 	return 0;
7392 }
7393 
7394 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7395 			     const unsigned char *addr)
7396 {
7397 	struct hclge_vport *vport = hclge_get_vport(handle);
7398 
7399 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
7400 				     addr);
7401 }
7402 
7403 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7404 			     const unsigned char *addr)
7405 {
7406 	struct hclge_dev *hdev = vport->back;
7407 	struct hclge_mac_vlan_tbl_entry_cmd req;
7408 	struct hclge_desc desc;
7409 	u16 egress_port = 0;
7410 	int ret;
7411 
7412 	/* mac addr check */
7413 	if (is_zero_ether_addr(addr) ||
7414 	    is_broadcast_ether_addr(addr) ||
7415 	    is_multicast_ether_addr(addr)) {
7416 		dev_err(&hdev->pdev->dev,
7417 			"Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7418 			 addr, is_zero_ether_addr(addr),
7419 			 is_broadcast_ether_addr(addr),
7420 			 is_multicast_ether_addr(addr));
7421 		return -EINVAL;
7422 	}
7423 
7424 	memset(&req, 0, sizeof(req));
7425 
7426 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7427 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7428 
7429 	req.egress_port = cpu_to_le16(egress_port);
7430 
7431 	hclge_prepare_mac_addr(&req, addr, false);
7432 
7433 	/* Lookup the mac address in the mac_vlan table, and add
7434 	 * it if the entry is inexistent. Repeated unicast entry
7435 	 * is not allowed in the mac vlan table.
7436 	 */
7437 	ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7438 	if (ret == -ENOENT) {
7439 		mutex_lock(&hdev->vport_lock);
7440 		if (!hclge_is_umv_space_full(vport, false)) {
7441 			ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7442 			if (!ret)
7443 				hclge_update_umv_space(vport, false);
7444 			mutex_unlock(&hdev->vport_lock);
7445 			return ret;
7446 		}
7447 		mutex_unlock(&hdev->vport_lock);
7448 
7449 		if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
7450 			dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7451 				hdev->priv_umv_size);
7452 
7453 		return -ENOSPC;
7454 	}
7455 
7456 	/* check if we just hit the duplicate */
7457 	if (!ret) {
7458 		dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7459 			 vport->vport_id, addr);
7460 		return 0;
7461 	}
7462 
7463 	dev_err(&hdev->pdev->dev,
7464 		"PF failed to add unicast entry(%pM) in the MAC table\n",
7465 		addr);
7466 
7467 	return ret;
7468 }
7469 
7470 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7471 			    const unsigned char *addr)
7472 {
7473 	struct hclge_vport *vport = hclge_get_vport(handle);
7474 
7475 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
7476 				     addr);
7477 }
7478 
7479 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7480 			    const unsigned char *addr)
7481 {
7482 	struct hclge_dev *hdev = vport->back;
7483 	struct hclge_mac_vlan_tbl_entry_cmd req;
7484 	int ret;
7485 
7486 	/* mac addr check */
7487 	if (is_zero_ether_addr(addr) ||
7488 	    is_broadcast_ether_addr(addr) ||
7489 	    is_multicast_ether_addr(addr)) {
7490 		dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7491 			addr);
7492 		return -EINVAL;
7493 	}
7494 
7495 	memset(&req, 0, sizeof(req));
7496 	hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7497 	hclge_prepare_mac_addr(&req, addr, false);
7498 	ret = hclge_remove_mac_vlan_tbl(vport, &req);
7499 	if (!ret) {
7500 		mutex_lock(&hdev->vport_lock);
7501 		hclge_update_umv_space(vport, true);
7502 		mutex_unlock(&hdev->vport_lock);
7503 	} else if (ret == -ENOENT) {
7504 		ret = 0;
7505 	}
7506 
7507 	return ret;
7508 }
7509 
7510 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7511 			     const unsigned char *addr)
7512 {
7513 	struct hclge_vport *vport = hclge_get_vport(handle);
7514 
7515 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
7516 				     addr);
7517 }
7518 
7519 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7520 			     const unsigned char *addr)
7521 {
7522 	struct hclge_dev *hdev = vport->back;
7523 	struct hclge_mac_vlan_tbl_entry_cmd req;
7524 	struct hclge_desc desc[3];
7525 	int status;
7526 
7527 	/* mac addr check */
7528 	if (!is_multicast_ether_addr(addr)) {
7529 		dev_err(&hdev->pdev->dev,
7530 			"Add mc mac err! invalid mac:%pM.\n",
7531 			 addr);
7532 		return -EINVAL;
7533 	}
7534 	memset(&req, 0, sizeof(req));
7535 	hclge_prepare_mac_addr(&req, addr, true);
7536 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7537 	if (status) {
7538 		/* This mac addr do not exist, add new entry for it */
7539 		memset(desc[0].data, 0, sizeof(desc[0].data));
7540 		memset(desc[1].data, 0, sizeof(desc[0].data));
7541 		memset(desc[2].data, 0, sizeof(desc[0].data));
7542 	}
7543 	status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7544 	if (status)
7545 		return status;
7546 	status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7547 
7548 	/* if already overflow, not to print each time */
7549 	if (status == -ENOSPC &&
7550 	    !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
7551 		dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7552 
7553 	return status;
7554 }
7555 
7556 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7557 			    const unsigned char *addr)
7558 {
7559 	struct hclge_vport *vport = hclge_get_vport(handle);
7560 
7561 	return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
7562 				     addr);
7563 }
7564 
7565 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7566 			    const unsigned char *addr)
7567 {
7568 	struct hclge_dev *hdev = vport->back;
7569 	struct hclge_mac_vlan_tbl_entry_cmd req;
7570 	enum hclge_cmd_status status;
7571 	struct hclge_desc desc[3];
7572 
7573 	/* mac addr check */
7574 	if (!is_multicast_ether_addr(addr)) {
7575 		dev_dbg(&hdev->pdev->dev,
7576 			"Remove mc mac err! invalid mac:%pM.\n",
7577 			 addr);
7578 		return -EINVAL;
7579 	}
7580 
7581 	memset(&req, 0, sizeof(req));
7582 	hclge_prepare_mac_addr(&req, addr, true);
7583 	status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7584 	if (!status) {
7585 		/* This mac addr exist, remove this handle's VFID for it */
7586 		status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7587 		if (status)
7588 			return status;
7589 
7590 		if (hclge_is_all_function_id_zero(desc))
7591 			/* All the vfid is zero, so need to delete this entry */
7592 			status = hclge_remove_mac_vlan_tbl(vport, &req);
7593 		else
7594 			/* Not all the vfid is zero, update the vfid */
7595 			status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7596 
7597 	} else if (status == -ENOENT) {
7598 		status = 0;
7599 	}
7600 
7601 	return status;
7602 }
7603 
7604 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
7605 				      struct list_head *list,
7606 				      int (*sync)(struct hclge_vport *,
7607 						  const unsigned char *))
7608 {
7609 	struct hclge_mac_node *mac_node, *tmp;
7610 	int ret;
7611 
7612 	list_for_each_entry_safe(mac_node, tmp, list, node) {
7613 		ret = sync(vport, mac_node->mac_addr);
7614 		if (!ret) {
7615 			mac_node->state = HCLGE_MAC_ACTIVE;
7616 		} else {
7617 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7618 				&vport->state);
7619 			break;
7620 		}
7621 	}
7622 }
7623 
7624 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
7625 					struct list_head *list,
7626 					int (*unsync)(struct hclge_vport *,
7627 						      const unsigned char *))
7628 {
7629 	struct hclge_mac_node *mac_node, *tmp;
7630 	int ret;
7631 
7632 	list_for_each_entry_safe(mac_node, tmp, list, node) {
7633 		ret = unsync(vport, mac_node->mac_addr);
7634 		if (!ret || ret == -ENOENT) {
7635 			list_del(&mac_node->node);
7636 			kfree(mac_node);
7637 		} else {
7638 			set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
7639 				&vport->state);
7640 			break;
7641 		}
7642 	}
7643 }
7644 
7645 static bool hclge_sync_from_add_list(struct list_head *add_list,
7646 				     struct list_head *mac_list)
7647 {
7648 	struct hclge_mac_node *mac_node, *tmp, *new_node;
7649 	bool all_added = true;
7650 
7651 	list_for_each_entry_safe(mac_node, tmp, add_list, node) {
7652 		if (mac_node->state == HCLGE_MAC_TO_ADD)
7653 			all_added = false;
7654 
7655 		/* if the mac address from tmp_add_list is not in the
7656 		 * uc/mc_mac_list, it means have received a TO_DEL request
7657 		 * during the time window of adding the mac address into mac
7658 		 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
7659 		 * then it will be removed at next time. else it must be TO_ADD,
7660 		 * this address hasn't been added into mac table,
7661 		 * so just remove the mac node.
7662 		 */
7663 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7664 		if (new_node) {
7665 			hclge_update_mac_node(new_node, mac_node->state);
7666 			list_del(&mac_node->node);
7667 			kfree(mac_node);
7668 		} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
7669 			mac_node->state = HCLGE_MAC_TO_DEL;
7670 			list_del(&mac_node->node);
7671 			list_add_tail(&mac_node->node, mac_list);
7672 		} else {
7673 			list_del(&mac_node->node);
7674 			kfree(mac_node);
7675 		}
7676 	}
7677 
7678 	return all_added;
7679 }
7680 
7681 static void hclge_sync_from_del_list(struct list_head *del_list,
7682 				     struct list_head *mac_list)
7683 {
7684 	struct hclge_mac_node *mac_node, *tmp, *new_node;
7685 
7686 	list_for_each_entry_safe(mac_node, tmp, del_list, node) {
7687 		new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
7688 		if (new_node) {
7689 			/* If the mac addr exists in the mac list, it means
7690 			 * received a new TO_ADD request during the time window
7691 			 * of configuring the mac address. For the mac node
7692 			 * state is TO_ADD, and the address is already in the
7693 			 * in the hardware(due to delete fail), so we just need
7694 			 * to change the mac node state to ACTIVE.
7695 			 */
7696 			new_node->state = HCLGE_MAC_ACTIVE;
7697 			list_del(&mac_node->node);
7698 			kfree(mac_node);
7699 		} else {
7700 			list_del(&mac_node->node);
7701 			list_add_tail(&mac_node->node, mac_list);
7702 		}
7703 	}
7704 }
7705 
7706 static void hclge_update_overflow_flags(struct hclge_vport *vport,
7707 					enum HCLGE_MAC_ADDR_TYPE mac_type,
7708 					bool is_all_added)
7709 {
7710 	if (mac_type == HCLGE_MAC_ADDR_UC) {
7711 		if (is_all_added)
7712 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
7713 		else
7714 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
7715 	} else {
7716 		if (is_all_added)
7717 			vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
7718 		else
7719 			vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
7720 	}
7721 }
7722 
7723 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
7724 				       enum HCLGE_MAC_ADDR_TYPE mac_type)
7725 {
7726 	struct hclge_mac_node *mac_node, *tmp, *new_node;
7727 	struct list_head tmp_add_list, tmp_del_list;
7728 	struct list_head *list;
7729 	bool all_added;
7730 
7731 	INIT_LIST_HEAD(&tmp_add_list);
7732 	INIT_LIST_HEAD(&tmp_del_list);
7733 
7734 	/* move the mac addr to the tmp_add_list and tmp_del_list, then
7735 	 * we can add/delete these mac addr outside the spin lock
7736 	 */
7737 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7738 		&vport->uc_mac_list : &vport->mc_mac_list;
7739 
7740 	spin_lock_bh(&vport->mac_list_lock);
7741 
7742 	list_for_each_entry_safe(mac_node, tmp, list, node) {
7743 		switch (mac_node->state) {
7744 		case HCLGE_MAC_TO_DEL:
7745 			list_del(&mac_node->node);
7746 			list_add_tail(&mac_node->node, &tmp_del_list);
7747 			break;
7748 		case HCLGE_MAC_TO_ADD:
7749 			new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
7750 			if (!new_node)
7751 				goto stop_traverse;
7752 			ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
7753 			new_node->state = mac_node->state;
7754 			list_add_tail(&new_node->node, &tmp_add_list);
7755 			break;
7756 		default:
7757 			break;
7758 		}
7759 	}
7760 
7761 stop_traverse:
7762 	spin_unlock_bh(&vport->mac_list_lock);
7763 
7764 	/* delete first, in order to get max mac table space for adding */
7765 	if (mac_type == HCLGE_MAC_ADDR_UC) {
7766 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7767 					    hclge_rm_uc_addr_common);
7768 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
7769 					  hclge_add_uc_addr_common);
7770 	} else {
7771 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7772 					    hclge_rm_mc_addr_common);
7773 		hclge_sync_vport_mac_list(vport, &tmp_add_list,
7774 					  hclge_add_mc_addr_common);
7775 	}
7776 
7777 	/* if some mac addresses were added/deleted fail, move back to the
7778 	 * mac_list, and retry at next time.
7779 	 */
7780 	spin_lock_bh(&vport->mac_list_lock);
7781 
7782 	hclge_sync_from_del_list(&tmp_del_list, list);
7783 	all_added = hclge_sync_from_add_list(&tmp_add_list, list);
7784 
7785 	spin_unlock_bh(&vport->mac_list_lock);
7786 
7787 	hclge_update_overflow_flags(vport, mac_type, all_added);
7788 }
7789 
7790 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
7791 {
7792 	struct hclge_dev *hdev = vport->back;
7793 
7794 	if (test_bit(vport->vport_id, hdev->vport_config_block))
7795 		return false;
7796 
7797 	if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
7798 		return true;
7799 
7800 	return false;
7801 }
7802 
7803 static void hclge_sync_mac_table(struct hclge_dev *hdev)
7804 {
7805 	int i;
7806 
7807 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7808 		struct hclge_vport *vport = &hdev->vport[i];
7809 
7810 		if (!hclge_need_sync_mac_table(vport))
7811 			continue;
7812 
7813 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
7814 		hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
7815 	}
7816 }
7817 
7818 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7819 				  enum HCLGE_MAC_ADDR_TYPE mac_type)
7820 {
7821 	int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
7822 	struct hclge_mac_node *mac_cfg, *tmp;
7823 	struct hclge_dev *hdev = vport->back;
7824 	struct list_head tmp_del_list, *list;
7825 	int ret;
7826 
7827 	if (mac_type == HCLGE_MAC_ADDR_UC) {
7828 		list = &vport->uc_mac_list;
7829 		unsync = hclge_rm_uc_addr_common;
7830 	} else {
7831 		list = &vport->mc_mac_list;
7832 		unsync = hclge_rm_mc_addr_common;
7833 	}
7834 
7835 	INIT_LIST_HEAD(&tmp_del_list);
7836 
7837 	if (!is_del_list)
7838 		set_bit(vport->vport_id, hdev->vport_config_block);
7839 
7840 	spin_lock_bh(&vport->mac_list_lock);
7841 
7842 	list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7843 		switch (mac_cfg->state) {
7844 		case HCLGE_MAC_TO_DEL:
7845 		case HCLGE_MAC_ACTIVE:
7846 			list_del(&mac_cfg->node);
7847 			list_add_tail(&mac_cfg->node, &tmp_del_list);
7848 			break;
7849 		case HCLGE_MAC_TO_ADD:
7850 			if (is_del_list) {
7851 				list_del(&mac_cfg->node);
7852 				kfree(mac_cfg);
7853 			}
7854 			break;
7855 		}
7856 	}
7857 
7858 	spin_unlock_bh(&vport->mac_list_lock);
7859 
7860 	list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
7861 		ret = unsync(vport, mac_cfg->mac_addr);
7862 		if (!ret || ret == -ENOENT) {
7863 			/* clear all mac addr from hardware, but remain these
7864 			 * mac addr in the mac list, and restore them after
7865 			 * vf reset finished.
7866 			 */
7867 			if (!is_del_list &&
7868 			    mac_cfg->state == HCLGE_MAC_ACTIVE) {
7869 				mac_cfg->state = HCLGE_MAC_TO_ADD;
7870 			} else {
7871 				list_del(&mac_cfg->node);
7872 				kfree(mac_cfg);
7873 			}
7874 		} else if (is_del_list) {
7875 			mac_cfg->state = HCLGE_MAC_TO_DEL;
7876 		}
7877 	}
7878 
7879 	spin_lock_bh(&vport->mac_list_lock);
7880 
7881 	hclge_sync_from_del_list(&tmp_del_list, list);
7882 
7883 	spin_unlock_bh(&vport->mac_list_lock);
7884 }
7885 
7886 /* remove all mac address when uninitailize */
7887 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
7888 					enum HCLGE_MAC_ADDR_TYPE mac_type)
7889 {
7890 	struct hclge_mac_node *mac_node, *tmp;
7891 	struct hclge_dev *hdev = vport->back;
7892 	struct list_head tmp_del_list, *list;
7893 
7894 	INIT_LIST_HEAD(&tmp_del_list);
7895 
7896 	list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7897 		&vport->uc_mac_list : &vport->mc_mac_list;
7898 
7899 	spin_lock_bh(&vport->mac_list_lock);
7900 
7901 	list_for_each_entry_safe(mac_node, tmp, list, node) {
7902 		switch (mac_node->state) {
7903 		case HCLGE_MAC_TO_DEL:
7904 		case HCLGE_MAC_ACTIVE:
7905 			list_del(&mac_node->node);
7906 			list_add_tail(&mac_node->node, &tmp_del_list);
7907 			break;
7908 		case HCLGE_MAC_TO_ADD:
7909 			list_del(&mac_node->node);
7910 			kfree(mac_node);
7911 			break;
7912 		}
7913 	}
7914 
7915 	spin_unlock_bh(&vport->mac_list_lock);
7916 
7917 	if (mac_type == HCLGE_MAC_ADDR_UC)
7918 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7919 					    hclge_rm_uc_addr_common);
7920 	else
7921 		hclge_unsync_vport_mac_list(vport, &tmp_del_list,
7922 					    hclge_rm_mc_addr_common);
7923 
7924 	if (!list_empty(&tmp_del_list))
7925 		dev_warn(&hdev->pdev->dev,
7926 			 "uninit %s mac list for vport %u not completely.\n",
7927 			 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
7928 			 vport->vport_id);
7929 
7930 	list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
7931 		list_del(&mac_node->node);
7932 		kfree(mac_node);
7933 	}
7934 }
7935 
7936 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
7937 {
7938 	struct hclge_vport *vport;
7939 	int i;
7940 
7941 	for (i = 0; i < hdev->num_alloc_vport; i++) {
7942 		vport = &hdev->vport[i];
7943 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
7944 		hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
7945 	}
7946 }
7947 
7948 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7949 					      u16 cmdq_resp, u8 resp_code)
7950 {
7951 #define HCLGE_ETHERTYPE_SUCCESS_ADD		0
7952 #define HCLGE_ETHERTYPE_ALREADY_ADD		1
7953 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW	2
7954 #define HCLGE_ETHERTYPE_KEY_CONFLICT		3
7955 
7956 	int return_status;
7957 
7958 	if (cmdq_resp) {
7959 		dev_err(&hdev->pdev->dev,
7960 			"cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7961 			cmdq_resp);
7962 		return -EIO;
7963 	}
7964 
7965 	switch (resp_code) {
7966 	case HCLGE_ETHERTYPE_SUCCESS_ADD:
7967 	case HCLGE_ETHERTYPE_ALREADY_ADD:
7968 		return_status = 0;
7969 		break;
7970 	case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7971 		dev_err(&hdev->pdev->dev,
7972 			"add mac ethertype failed for manager table overflow.\n");
7973 		return_status = -EIO;
7974 		break;
7975 	case HCLGE_ETHERTYPE_KEY_CONFLICT:
7976 		dev_err(&hdev->pdev->dev,
7977 			"add mac ethertype failed for key conflict.\n");
7978 		return_status = -EIO;
7979 		break;
7980 	default:
7981 		dev_err(&hdev->pdev->dev,
7982 			"add mac ethertype failed for undefined, code=%u.\n",
7983 			resp_code);
7984 		return_status = -EIO;
7985 	}
7986 
7987 	return return_status;
7988 }
7989 
7990 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7991 				     u8 *mac_addr)
7992 {
7993 	struct hclge_mac_vlan_tbl_entry_cmd req;
7994 	struct hclge_dev *hdev = vport->back;
7995 	struct hclge_desc desc;
7996 	u16 egress_port = 0;
7997 	int i;
7998 
7999 	if (is_zero_ether_addr(mac_addr))
8000 		return false;
8001 
8002 	memset(&req, 0, sizeof(req));
8003 	hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8004 			HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8005 	req.egress_port = cpu_to_le16(egress_port);
8006 	hclge_prepare_mac_addr(&req, mac_addr, false);
8007 
8008 	if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
8009 		return true;
8010 
8011 	vf_idx += HCLGE_VF_VPORT_START_NUM;
8012 	for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
8013 		if (i != vf_idx &&
8014 		    ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
8015 			return true;
8016 
8017 	return false;
8018 }
8019 
8020 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8021 			    u8 *mac_addr)
8022 {
8023 	struct hclge_vport *vport = hclge_get_vport(handle);
8024 	struct hclge_dev *hdev = vport->back;
8025 
8026 	vport = hclge_get_vf_vport(hdev, vf);
8027 	if (!vport)
8028 		return -EINVAL;
8029 
8030 	if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8031 		dev_info(&hdev->pdev->dev,
8032 			 "Specified MAC(=%pM) is same as before, no change committed!\n",
8033 			 mac_addr);
8034 		return 0;
8035 	}
8036 
8037 	if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
8038 		dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
8039 			mac_addr);
8040 		return -EEXIST;
8041 	}
8042 
8043 	ether_addr_copy(vport->vf_info.mac, mac_addr);
8044 
8045 	if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8046 		dev_info(&hdev->pdev->dev,
8047 			 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
8048 			 vf, mac_addr);
8049 		return hclge_inform_reset_assert_to_vf(vport);
8050 	}
8051 
8052 	dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
8053 		 vf, mac_addr);
8054 	return 0;
8055 }
8056 
8057 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
8058 			     const struct hclge_mac_mgr_tbl_entry_cmd *req)
8059 {
8060 	struct hclge_desc desc;
8061 	u8 resp_code;
8062 	u16 retval;
8063 	int ret;
8064 
8065 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
8066 	memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
8067 
8068 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8069 	if (ret) {
8070 		dev_err(&hdev->pdev->dev,
8071 			"add mac ethertype failed for cmd_send, ret =%d.\n",
8072 			ret);
8073 		return ret;
8074 	}
8075 
8076 	resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8077 	retval = le16_to_cpu(desc.retval);
8078 
8079 	return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
8080 }
8081 
8082 static int init_mgr_tbl(struct hclge_dev *hdev)
8083 {
8084 	int ret;
8085 	int i;
8086 
8087 	for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
8088 		ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
8089 		if (ret) {
8090 			dev_err(&hdev->pdev->dev,
8091 				"add mac ethertype failed, ret =%d.\n",
8092 				ret);
8093 			return ret;
8094 		}
8095 	}
8096 
8097 	return 0;
8098 }
8099 
8100 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
8101 {
8102 	struct hclge_vport *vport = hclge_get_vport(handle);
8103 	struct hclge_dev *hdev = vport->back;
8104 
8105 	ether_addr_copy(p, hdev->hw.mac.mac_addr);
8106 }
8107 
8108 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
8109 				       const u8 *old_addr, const u8 *new_addr)
8110 {
8111 	struct list_head *list = &vport->uc_mac_list;
8112 	struct hclge_mac_node *old_node, *new_node;
8113 
8114 	new_node = hclge_find_mac_node(list, new_addr);
8115 	if (!new_node) {
8116 		new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8117 		if (!new_node)
8118 			return -ENOMEM;
8119 
8120 		new_node->state = HCLGE_MAC_TO_ADD;
8121 		ether_addr_copy(new_node->mac_addr, new_addr);
8122 		list_add(&new_node->node, list);
8123 	} else {
8124 		if (new_node->state == HCLGE_MAC_TO_DEL)
8125 			new_node->state = HCLGE_MAC_ACTIVE;
8126 
8127 		/* make sure the new addr is in the list head, avoid dev
8128 		 * addr may be not re-added into mac table for the umv space
8129 		 * limitation after global/imp reset which will clear mac
8130 		 * table by hardware.
8131 		 */
8132 		list_move(&new_node->node, list);
8133 	}
8134 
8135 	if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
8136 		old_node = hclge_find_mac_node(list, old_addr);
8137 		if (old_node) {
8138 			if (old_node->state == HCLGE_MAC_TO_ADD) {
8139 				list_del(&old_node->node);
8140 				kfree(old_node);
8141 			} else {
8142 				old_node->state = HCLGE_MAC_TO_DEL;
8143 			}
8144 		}
8145 	}
8146 
8147 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8148 
8149 	return 0;
8150 }
8151 
8152 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
8153 			      bool is_first)
8154 {
8155 	const unsigned char *new_addr = (const unsigned char *)p;
8156 	struct hclge_vport *vport = hclge_get_vport(handle);
8157 	struct hclge_dev *hdev = vport->back;
8158 	unsigned char *old_addr = NULL;
8159 	int ret;
8160 
8161 	/* mac addr check */
8162 	if (is_zero_ether_addr(new_addr) ||
8163 	    is_broadcast_ether_addr(new_addr) ||
8164 	    is_multicast_ether_addr(new_addr)) {
8165 		dev_err(&hdev->pdev->dev,
8166 			"change uc mac err! invalid mac: %pM.\n",
8167 			 new_addr);
8168 		return -EINVAL;
8169 	}
8170 
8171 	ret = hclge_pause_addr_cfg(hdev, new_addr);
8172 	if (ret) {
8173 		dev_err(&hdev->pdev->dev,
8174 			"failed to configure mac pause address, ret = %d\n",
8175 			ret);
8176 		return ret;
8177 	}
8178 
8179 	if (!is_first)
8180 		old_addr = hdev->hw.mac.mac_addr;
8181 
8182 	spin_lock_bh(&vport->mac_list_lock);
8183 	ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
8184 	if (ret) {
8185 		dev_err(&hdev->pdev->dev,
8186 			"failed to change the mac addr:%pM, ret = %d\n",
8187 			new_addr, ret);
8188 		spin_unlock_bh(&vport->mac_list_lock);
8189 
8190 		if (!is_first)
8191 			hclge_pause_addr_cfg(hdev, old_addr);
8192 
8193 		return ret;
8194 	}
8195 	/* we must update dev addr with spin lock protect, preventing dev addr
8196 	 * being removed by set_rx_mode path.
8197 	 */
8198 	ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
8199 	spin_unlock_bh(&vport->mac_list_lock);
8200 
8201 	hclge_task_schedule(hdev, 0);
8202 
8203 	return 0;
8204 }
8205 
8206 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
8207 			  int cmd)
8208 {
8209 	struct hclge_vport *vport = hclge_get_vport(handle);
8210 	struct hclge_dev *hdev = vport->back;
8211 
8212 	if (!hdev->hw.mac.phydev)
8213 		return -EOPNOTSUPP;
8214 
8215 	return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
8216 }
8217 
8218 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
8219 				      u8 fe_type, bool filter_en, u8 vf_id)
8220 {
8221 	struct hclge_vlan_filter_ctrl_cmd *req;
8222 	struct hclge_desc desc;
8223 	int ret;
8224 
8225 	/* read current vlan filter parameter */
8226 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
8227 	req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
8228 	req->vlan_type = vlan_type;
8229 	req->vf_id = vf_id;
8230 
8231 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8232 	if (ret) {
8233 		dev_err(&hdev->pdev->dev,
8234 			"failed to get vlan filter config, ret = %d.\n", ret);
8235 		return ret;
8236 	}
8237 
8238 	/* modify and write new config parameter */
8239 	hclge_cmd_reuse_desc(&desc, false);
8240 	req->vlan_fe = filter_en ?
8241 			(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
8242 
8243 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8244 	if (ret)
8245 		dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
8246 			ret);
8247 
8248 	return ret;
8249 }
8250 
8251 #define HCLGE_FILTER_TYPE_VF		0
8252 #define HCLGE_FILTER_TYPE_PORT		1
8253 #define HCLGE_FILTER_FE_EGRESS_V1_B	BIT(0)
8254 #define HCLGE_FILTER_FE_NIC_INGRESS_B	BIT(0)
8255 #define HCLGE_FILTER_FE_NIC_EGRESS_B	BIT(1)
8256 #define HCLGE_FILTER_FE_ROCE_INGRESS_B	BIT(2)
8257 #define HCLGE_FILTER_FE_ROCE_EGRESS_B	BIT(3)
8258 #define HCLGE_FILTER_FE_EGRESS		(HCLGE_FILTER_FE_NIC_EGRESS_B \
8259 					| HCLGE_FILTER_FE_ROCE_EGRESS_B)
8260 #define HCLGE_FILTER_FE_INGRESS		(HCLGE_FILTER_FE_NIC_INGRESS_B \
8261 					| HCLGE_FILTER_FE_ROCE_INGRESS_B)
8262 
8263 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
8264 {
8265 	struct hclge_vport *vport = hclge_get_vport(handle);
8266 	struct hclge_dev *hdev = vport->back;
8267 
8268 	if (hdev->pdev->revision >= 0x21) {
8269 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8270 					   HCLGE_FILTER_FE_EGRESS, enable, 0);
8271 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8272 					   HCLGE_FILTER_FE_INGRESS, enable, 0);
8273 	} else {
8274 		hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8275 					   HCLGE_FILTER_FE_EGRESS_V1_B, enable,
8276 					   0);
8277 	}
8278 	if (enable)
8279 		handle->netdev_flags |= HNAE3_VLAN_FLTR;
8280 	else
8281 		handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
8282 }
8283 
8284 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
8285 				    bool is_kill, u16 vlan,
8286 				    __be16 proto)
8287 {
8288 	struct hclge_vport *vport = &hdev->vport[vfid];
8289 	struct hclge_vlan_filter_vf_cfg_cmd *req0;
8290 	struct hclge_vlan_filter_vf_cfg_cmd *req1;
8291 	struct hclge_desc desc[2];
8292 	u8 vf_byte_val;
8293 	u8 vf_byte_off;
8294 	int ret;
8295 
8296 	/* if vf vlan table is full, firmware will close vf vlan filter, it
8297 	 * is unable and unnecessary to add new vlan id to vf vlan filter.
8298 	 * If spoof check is enable, and vf vlan is full, it shouldn't add
8299 	 * new vlan, because tx packets with these vlan id will be dropped.
8300 	 */
8301 	if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
8302 		if (vport->vf_info.spoofchk && vlan) {
8303 			dev_err(&hdev->pdev->dev,
8304 				"Can't add vlan due to spoof check is on and vf vlan table is full\n");
8305 			return -EPERM;
8306 		}
8307 		return 0;
8308 	}
8309 
8310 	hclge_cmd_setup_basic_desc(&desc[0],
8311 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8312 	hclge_cmd_setup_basic_desc(&desc[1],
8313 				   HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
8314 
8315 	desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8316 
8317 	vf_byte_off = vfid / 8;
8318 	vf_byte_val = 1 << (vfid % 8);
8319 
8320 	req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
8321 	req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
8322 
8323 	req0->vlan_id  = cpu_to_le16(vlan);
8324 	req0->vlan_cfg = is_kill;
8325 
8326 	if (vf_byte_off < HCLGE_MAX_VF_BYTES)
8327 		req0->vf_bitmap[vf_byte_off] = vf_byte_val;
8328 	else
8329 		req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
8330 
8331 	ret = hclge_cmd_send(&hdev->hw, desc, 2);
8332 	if (ret) {
8333 		dev_err(&hdev->pdev->dev,
8334 			"Send vf vlan command fail, ret =%d.\n",
8335 			ret);
8336 		return ret;
8337 	}
8338 
8339 	if (!is_kill) {
8340 #define HCLGE_VF_VLAN_NO_ENTRY	2
8341 		if (!req0->resp_code || req0->resp_code == 1)
8342 			return 0;
8343 
8344 		if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
8345 			set_bit(vfid, hdev->vf_vlan_full);
8346 			dev_warn(&hdev->pdev->dev,
8347 				 "vf vlan table is full, vf vlan filter is disabled\n");
8348 			return 0;
8349 		}
8350 
8351 		dev_err(&hdev->pdev->dev,
8352 			"Add vf vlan filter fail, ret =%u.\n",
8353 			req0->resp_code);
8354 	} else {
8355 #define HCLGE_VF_VLAN_DEL_NO_FOUND	1
8356 		if (!req0->resp_code)
8357 			return 0;
8358 
8359 		/* vf vlan filter is disabled when vf vlan table is full,
8360 		 * then new vlan id will not be added into vf vlan table.
8361 		 * Just return 0 without warning, avoid massive verbose
8362 		 * print logs when unload.
8363 		 */
8364 		if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
8365 			return 0;
8366 
8367 		dev_err(&hdev->pdev->dev,
8368 			"Kill vf vlan filter fail, ret =%u.\n",
8369 			req0->resp_code);
8370 	}
8371 
8372 	return -EIO;
8373 }
8374 
8375 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
8376 				      u16 vlan_id, bool is_kill)
8377 {
8378 	struct hclge_vlan_filter_pf_cfg_cmd *req;
8379 	struct hclge_desc desc;
8380 	u8 vlan_offset_byte_val;
8381 	u8 vlan_offset_byte;
8382 	u8 vlan_offset_160;
8383 	int ret;
8384 
8385 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
8386 
8387 	vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
8388 	vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
8389 			   HCLGE_VLAN_BYTE_SIZE;
8390 	vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
8391 
8392 	req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
8393 	req->vlan_offset = vlan_offset_160;
8394 	req->vlan_cfg = is_kill;
8395 	req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
8396 
8397 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8398 	if (ret)
8399 		dev_err(&hdev->pdev->dev,
8400 			"port vlan command, send fail, ret =%d.\n", ret);
8401 	return ret;
8402 }
8403 
8404 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
8405 				    u16 vport_id, u16 vlan_id,
8406 				    bool is_kill)
8407 {
8408 	u16 vport_idx, vport_num = 0;
8409 	int ret;
8410 
8411 	if (is_kill && !vlan_id)
8412 		return 0;
8413 
8414 	ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
8415 				       proto);
8416 	if (ret) {
8417 		dev_err(&hdev->pdev->dev,
8418 			"Set %u vport vlan filter config fail, ret =%d.\n",
8419 			vport_id, ret);
8420 		return ret;
8421 	}
8422 
8423 	/* vlan 0 may be added twice when 8021q module is enabled */
8424 	if (!is_kill && !vlan_id &&
8425 	    test_bit(vport_id, hdev->vlan_table[vlan_id]))
8426 		return 0;
8427 
8428 	if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
8429 		dev_err(&hdev->pdev->dev,
8430 			"Add port vlan failed, vport %u is already in vlan %u\n",
8431 			vport_id, vlan_id);
8432 		return -EINVAL;
8433 	}
8434 
8435 	if (is_kill &&
8436 	    !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
8437 		dev_err(&hdev->pdev->dev,
8438 			"Delete port vlan failed, vport %u is not in vlan %u\n",
8439 			vport_id, vlan_id);
8440 		return -EINVAL;
8441 	}
8442 
8443 	for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
8444 		vport_num++;
8445 
8446 	if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
8447 		ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
8448 						 is_kill);
8449 
8450 	return ret;
8451 }
8452 
8453 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
8454 {
8455 	struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
8456 	struct hclge_vport_vtag_tx_cfg_cmd *req;
8457 	struct hclge_dev *hdev = vport->back;
8458 	struct hclge_desc desc;
8459 	u16 bmap_index;
8460 	int status;
8461 
8462 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8463 
8464 	req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8465 	req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8466 	req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8467 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8468 		      vcfg->accept_tag1 ? 1 : 0);
8469 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8470 		      vcfg->accept_untag1 ? 1 : 0);
8471 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8472 		      vcfg->accept_tag2 ? 1 : 0);
8473 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8474 		      vcfg->accept_untag2 ? 1 : 0);
8475 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8476 		      vcfg->insert_tag1_en ? 1 : 0);
8477 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8478 		      vcfg->insert_tag2_en ? 1 : 0);
8479 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8480 
8481 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8482 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8483 			HCLGE_VF_NUM_PER_BYTE;
8484 	req->vf_bitmap[bmap_index] =
8485 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8486 
8487 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8488 	if (status)
8489 		dev_err(&hdev->pdev->dev,
8490 			"Send port txvlan cfg command fail, ret =%d\n",
8491 			status);
8492 
8493 	return status;
8494 }
8495 
8496 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8497 {
8498 	struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8499 	struct hclge_vport_vtag_rx_cfg_cmd *req;
8500 	struct hclge_dev *hdev = vport->back;
8501 	struct hclge_desc desc;
8502 	u16 bmap_index;
8503 	int status;
8504 
8505 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8506 
8507 	req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8508 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8509 		      vcfg->strip_tag1_en ? 1 : 0);
8510 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8511 		      vcfg->strip_tag2_en ? 1 : 0);
8512 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8513 		      vcfg->vlan1_vlan_prionly ? 1 : 0);
8514 	hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8515 		      vcfg->vlan2_vlan_prionly ? 1 : 0);
8516 
8517 	req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8518 	bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8519 			HCLGE_VF_NUM_PER_BYTE;
8520 	req->vf_bitmap[bmap_index] =
8521 		1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8522 
8523 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8524 	if (status)
8525 		dev_err(&hdev->pdev->dev,
8526 			"Send port rxvlan cfg command fail, ret =%d\n",
8527 			status);
8528 
8529 	return status;
8530 }
8531 
8532 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8533 				  u16 port_base_vlan_state,
8534 				  u16 vlan_tag)
8535 {
8536 	int ret;
8537 
8538 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8539 		vport->txvlan_cfg.accept_tag1 = true;
8540 		vport->txvlan_cfg.insert_tag1_en = false;
8541 		vport->txvlan_cfg.default_tag1 = 0;
8542 	} else {
8543 		vport->txvlan_cfg.accept_tag1 = false;
8544 		vport->txvlan_cfg.insert_tag1_en = true;
8545 		vport->txvlan_cfg.default_tag1 = vlan_tag;
8546 	}
8547 
8548 	vport->txvlan_cfg.accept_untag1 = true;
8549 
8550 	/* accept_tag2 and accept_untag2 are not supported on
8551 	 * pdev revision(0x20), new revision support them,
8552 	 * this two fields can not be configured by user.
8553 	 */
8554 	vport->txvlan_cfg.accept_tag2 = true;
8555 	vport->txvlan_cfg.accept_untag2 = true;
8556 	vport->txvlan_cfg.insert_tag2_en = false;
8557 	vport->txvlan_cfg.default_tag2 = 0;
8558 
8559 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8560 		vport->rxvlan_cfg.strip_tag1_en = false;
8561 		vport->rxvlan_cfg.strip_tag2_en =
8562 				vport->rxvlan_cfg.rx_vlan_offload_en;
8563 	} else {
8564 		vport->rxvlan_cfg.strip_tag1_en =
8565 				vport->rxvlan_cfg.rx_vlan_offload_en;
8566 		vport->rxvlan_cfg.strip_tag2_en = true;
8567 	}
8568 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8569 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8570 
8571 	ret = hclge_set_vlan_tx_offload_cfg(vport);
8572 	if (ret)
8573 		return ret;
8574 
8575 	return hclge_set_vlan_rx_offload_cfg(vport);
8576 }
8577 
8578 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8579 {
8580 	struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8581 	struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8582 	struct hclge_desc desc;
8583 	int status;
8584 
8585 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8586 	rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8587 	rx_req->ot_fst_vlan_type =
8588 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8589 	rx_req->ot_sec_vlan_type =
8590 		cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8591 	rx_req->in_fst_vlan_type =
8592 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8593 	rx_req->in_sec_vlan_type =
8594 		cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8595 
8596 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8597 	if (status) {
8598 		dev_err(&hdev->pdev->dev,
8599 			"Send rxvlan protocol type command fail, ret =%d\n",
8600 			status);
8601 		return status;
8602 	}
8603 
8604 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8605 
8606 	tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8607 	tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8608 	tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8609 
8610 	status = hclge_cmd_send(&hdev->hw, &desc, 1);
8611 	if (status)
8612 		dev_err(&hdev->pdev->dev,
8613 			"Send txvlan protocol type command fail, ret =%d\n",
8614 			status);
8615 
8616 	return status;
8617 }
8618 
8619 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8620 {
8621 #define HCLGE_DEF_VLAN_TYPE		0x8100
8622 
8623 	struct hnae3_handle *handle = &hdev->vport[0].nic;
8624 	struct hclge_vport *vport;
8625 	int ret;
8626 	int i;
8627 
8628 	if (hdev->pdev->revision >= 0x21) {
8629 		/* for revision 0x21, vf vlan filter is per function */
8630 		for (i = 0; i < hdev->num_alloc_vport; i++) {
8631 			vport = &hdev->vport[i];
8632 			ret = hclge_set_vlan_filter_ctrl(hdev,
8633 							 HCLGE_FILTER_TYPE_VF,
8634 							 HCLGE_FILTER_FE_EGRESS,
8635 							 true,
8636 							 vport->vport_id);
8637 			if (ret)
8638 				return ret;
8639 		}
8640 
8641 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8642 						 HCLGE_FILTER_FE_INGRESS, true,
8643 						 0);
8644 		if (ret)
8645 			return ret;
8646 	} else {
8647 		ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8648 						 HCLGE_FILTER_FE_EGRESS_V1_B,
8649 						 true, 0);
8650 		if (ret)
8651 			return ret;
8652 	}
8653 
8654 	handle->netdev_flags |= HNAE3_VLAN_FLTR;
8655 
8656 	hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8657 	hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8658 	hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8659 	hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8660 	hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8661 	hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8662 
8663 	ret = hclge_set_vlan_protocol_type(hdev);
8664 	if (ret)
8665 		return ret;
8666 
8667 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8668 		u16 vlan_tag;
8669 
8670 		vport = &hdev->vport[i];
8671 		vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8672 
8673 		ret = hclge_vlan_offload_cfg(vport,
8674 					     vport->port_base_vlan_cfg.state,
8675 					     vlan_tag);
8676 		if (ret)
8677 			return ret;
8678 	}
8679 
8680 	return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8681 }
8682 
8683 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8684 				       bool writen_to_tbl)
8685 {
8686 	struct hclge_vport_vlan_cfg *vlan;
8687 
8688 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8689 	if (!vlan)
8690 		return;
8691 
8692 	vlan->hd_tbl_status = writen_to_tbl;
8693 	vlan->vlan_id = vlan_id;
8694 
8695 	list_add_tail(&vlan->node, &vport->vlan_list);
8696 }
8697 
8698 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8699 {
8700 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8701 	struct hclge_dev *hdev = vport->back;
8702 	int ret;
8703 
8704 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8705 		if (!vlan->hd_tbl_status) {
8706 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8707 						       vport->vport_id,
8708 						       vlan->vlan_id, false);
8709 			if (ret) {
8710 				dev_err(&hdev->pdev->dev,
8711 					"restore vport vlan list failed, ret=%d\n",
8712 					ret);
8713 				return ret;
8714 			}
8715 		}
8716 		vlan->hd_tbl_status = true;
8717 	}
8718 
8719 	return 0;
8720 }
8721 
8722 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8723 				      bool is_write_tbl)
8724 {
8725 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8726 	struct hclge_dev *hdev = vport->back;
8727 
8728 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8729 		if (vlan->vlan_id == vlan_id) {
8730 			if (is_write_tbl && vlan->hd_tbl_status)
8731 				hclge_set_vlan_filter_hw(hdev,
8732 							 htons(ETH_P_8021Q),
8733 							 vport->vport_id,
8734 							 vlan_id,
8735 							 true);
8736 
8737 			list_del(&vlan->node);
8738 			kfree(vlan);
8739 			break;
8740 		}
8741 	}
8742 }
8743 
8744 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8745 {
8746 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8747 	struct hclge_dev *hdev = vport->back;
8748 
8749 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8750 		if (vlan->hd_tbl_status)
8751 			hclge_set_vlan_filter_hw(hdev,
8752 						 htons(ETH_P_8021Q),
8753 						 vport->vport_id,
8754 						 vlan->vlan_id,
8755 						 true);
8756 
8757 		vlan->hd_tbl_status = false;
8758 		if (is_del_list) {
8759 			list_del(&vlan->node);
8760 			kfree(vlan);
8761 		}
8762 	}
8763 	clear_bit(vport->vport_id, hdev->vf_vlan_full);
8764 }
8765 
8766 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8767 {
8768 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8769 	struct hclge_vport *vport;
8770 	int i;
8771 
8772 	for (i = 0; i < hdev->num_alloc_vport; i++) {
8773 		vport = &hdev->vport[i];
8774 		list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8775 			list_del(&vlan->node);
8776 			kfree(vlan);
8777 		}
8778 	}
8779 }
8780 
8781 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
8782 {
8783 	struct hclge_vport_vlan_cfg *vlan, *tmp;
8784 	struct hclge_dev *hdev = vport->back;
8785 	u16 vlan_proto;
8786 	u16 vlan_id;
8787 	u16 state;
8788 	int ret;
8789 
8790 	vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8791 	vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8792 	state = vport->port_base_vlan_cfg.state;
8793 
8794 	if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8795 		clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
8796 		hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8797 					 vport->vport_id, vlan_id,
8798 					 false);
8799 		return;
8800 	}
8801 
8802 	list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8803 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8804 					       vport->vport_id,
8805 					       vlan->vlan_id, false);
8806 		if (ret)
8807 			break;
8808 		vlan->hd_tbl_status = true;
8809 	}
8810 }
8811 
8812 /* For global reset and imp reset, hardware will clear the mac table,
8813  * so we change the mac address state from ACTIVE to TO_ADD, then they
8814  * can be restored in the service task after reset complete. Furtherly,
8815  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
8816  * be restored after reset, so just remove these mac nodes from mac_list.
8817  */
8818 static void hclge_mac_node_convert_for_reset(struct list_head *list)
8819 {
8820 	struct hclge_mac_node *mac_node, *tmp;
8821 
8822 	list_for_each_entry_safe(mac_node, tmp, list, node) {
8823 		if (mac_node->state == HCLGE_MAC_ACTIVE) {
8824 			mac_node->state = HCLGE_MAC_TO_ADD;
8825 		} else if (mac_node->state == HCLGE_MAC_TO_DEL) {
8826 			list_del(&mac_node->node);
8827 			kfree(mac_node);
8828 		}
8829 	}
8830 }
8831 
8832 void hclge_restore_mac_table_common(struct hclge_vport *vport)
8833 {
8834 	spin_lock_bh(&vport->mac_list_lock);
8835 
8836 	hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
8837 	hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
8838 	set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8839 
8840 	spin_unlock_bh(&vport->mac_list_lock);
8841 }
8842 
8843 static void hclge_restore_hw_table(struct hclge_dev *hdev)
8844 {
8845 	struct hclge_vport *vport = &hdev->vport[0];
8846 	struct hnae3_handle *handle = &vport->nic;
8847 
8848 	hclge_restore_mac_table_common(vport);
8849 	hclge_restore_vport_vlan_table(vport);
8850 	set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
8851 
8852 	hclge_restore_fd_entries(handle);
8853 }
8854 
8855 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8856 {
8857 	struct hclge_vport *vport = hclge_get_vport(handle);
8858 
8859 	if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8860 		vport->rxvlan_cfg.strip_tag1_en = false;
8861 		vport->rxvlan_cfg.strip_tag2_en = enable;
8862 	} else {
8863 		vport->rxvlan_cfg.strip_tag1_en = enable;
8864 		vport->rxvlan_cfg.strip_tag2_en = true;
8865 	}
8866 	vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8867 	vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8868 	vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8869 
8870 	return hclge_set_vlan_rx_offload_cfg(vport);
8871 }
8872 
8873 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8874 					    u16 port_base_vlan_state,
8875 					    struct hclge_vlan_info *new_info,
8876 					    struct hclge_vlan_info *old_info)
8877 {
8878 	struct hclge_dev *hdev = vport->back;
8879 	int ret;
8880 
8881 	if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8882 		hclge_rm_vport_all_vlan_table(vport, false);
8883 		return hclge_set_vlan_filter_hw(hdev,
8884 						 htons(new_info->vlan_proto),
8885 						 vport->vport_id,
8886 						 new_info->vlan_tag,
8887 						 false);
8888 	}
8889 
8890 	ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8891 				       vport->vport_id, old_info->vlan_tag,
8892 				       true);
8893 	if (ret)
8894 		return ret;
8895 
8896 	return hclge_add_vport_all_vlan_table(vport);
8897 }
8898 
8899 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8900 				    struct hclge_vlan_info *vlan_info)
8901 {
8902 	struct hnae3_handle *nic = &vport->nic;
8903 	struct hclge_vlan_info *old_vlan_info;
8904 	struct hclge_dev *hdev = vport->back;
8905 	int ret;
8906 
8907 	old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8908 
8909 	ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8910 	if (ret)
8911 		return ret;
8912 
8913 	if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8914 		/* add new VLAN tag */
8915 		ret = hclge_set_vlan_filter_hw(hdev,
8916 					       htons(vlan_info->vlan_proto),
8917 					       vport->vport_id,
8918 					       vlan_info->vlan_tag,
8919 					       false);
8920 		if (ret)
8921 			return ret;
8922 
8923 		/* remove old VLAN tag */
8924 		ret = hclge_set_vlan_filter_hw(hdev,
8925 					       htons(old_vlan_info->vlan_proto),
8926 					       vport->vport_id,
8927 					       old_vlan_info->vlan_tag,
8928 					       true);
8929 		if (ret)
8930 			return ret;
8931 
8932 		goto update;
8933 	}
8934 
8935 	ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8936 					       old_vlan_info);
8937 	if (ret)
8938 		return ret;
8939 
8940 	/* update state only when disable/enable port based VLAN */
8941 	vport->port_base_vlan_cfg.state = state;
8942 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8943 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8944 	else
8945 		nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8946 
8947 update:
8948 	vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8949 	vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8950 	vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8951 
8952 	return 0;
8953 }
8954 
8955 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8956 					  enum hnae3_port_base_vlan_state state,
8957 					  u16 vlan)
8958 {
8959 	if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8960 		if (!vlan)
8961 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8962 		else
8963 			return HNAE3_PORT_BASE_VLAN_ENABLE;
8964 	} else {
8965 		if (!vlan)
8966 			return HNAE3_PORT_BASE_VLAN_DISABLE;
8967 		else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8968 			return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8969 		else
8970 			return HNAE3_PORT_BASE_VLAN_MODIFY;
8971 	}
8972 }
8973 
8974 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8975 				    u16 vlan, u8 qos, __be16 proto)
8976 {
8977 	struct hclge_vport *vport = hclge_get_vport(handle);
8978 	struct hclge_dev *hdev = vport->back;
8979 	struct hclge_vlan_info vlan_info;
8980 	u16 state;
8981 	int ret;
8982 
8983 	if (hdev->pdev->revision == 0x20)
8984 		return -EOPNOTSUPP;
8985 
8986 	vport = hclge_get_vf_vport(hdev, vfid);
8987 	if (!vport)
8988 		return -EINVAL;
8989 
8990 	/* qos is a 3 bits value, so can not be bigger than 7 */
8991 	if (vlan > VLAN_N_VID - 1 || qos > 7)
8992 		return -EINVAL;
8993 	if (proto != htons(ETH_P_8021Q))
8994 		return -EPROTONOSUPPORT;
8995 
8996 	state = hclge_get_port_base_vlan_state(vport,
8997 					       vport->port_base_vlan_cfg.state,
8998 					       vlan);
8999 	if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
9000 		return 0;
9001 
9002 	vlan_info.vlan_tag = vlan;
9003 	vlan_info.qos = qos;
9004 	vlan_info.vlan_proto = ntohs(proto);
9005 
9006 	if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9007 		return hclge_update_port_base_vlan_cfg(vport, state,
9008 						       &vlan_info);
9009 	} else {
9010 		ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
9011 							vport->vport_id, state,
9012 							vlan, qos,
9013 							ntohs(proto));
9014 		return ret;
9015 	}
9016 }
9017 
9018 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
9019 {
9020 	struct hclge_vlan_info *vlan_info;
9021 	struct hclge_vport *vport;
9022 	int ret;
9023 	int vf;
9024 
9025 	/* clear port base vlan for all vf */
9026 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9027 		vport = &hdev->vport[vf];
9028 		vlan_info = &vport->port_base_vlan_cfg.vlan_info;
9029 
9030 		ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9031 					       vport->vport_id,
9032 					       vlan_info->vlan_tag, true);
9033 		if (ret)
9034 			dev_err(&hdev->pdev->dev,
9035 				"failed to clear vf vlan for vf%d, ret = %d\n",
9036 				vf - HCLGE_VF_VPORT_START_NUM, ret);
9037 	}
9038 }
9039 
9040 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
9041 			  u16 vlan_id, bool is_kill)
9042 {
9043 	struct hclge_vport *vport = hclge_get_vport(handle);
9044 	struct hclge_dev *hdev = vport->back;
9045 	bool writen_to_tbl = false;
9046 	int ret = 0;
9047 
9048 	/* When device is resetting, firmware is unable to handle
9049 	 * mailbox. Just record the vlan id, and remove it after
9050 	 * reset finished.
9051 	 */
9052 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
9053 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
9054 		return -EBUSY;
9055 	}
9056 
9057 	/* when port base vlan enabled, we use port base vlan as the vlan
9058 	 * filter entry. In this case, we don't update vlan filter table
9059 	 * when user add new vlan or remove exist vlan, just update the vport
9060 	 * vlan list. The vlan id in vlan list will be writen in vlan filter
9061 	 * table until port base vlan disabled
9062 	 */
9063 	if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9064 		ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
9065 					       vlan_id, is_kill);
9066 		writen_to_tbl = true;
9067 	}
9068 
9069 	if (!ret) {
9070 		if (is_kill)
9071 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
9072 		else
9073 			hclge_add_vport_vlan_table(vport, vlan_id,
9074 						   writen_to_tbl);
9075 	} else if (is_kill) {
9076 		/* when remove hw vlan filter failed, record the vlan id,
9077 		 * and try to remove it from hw later, to be consistence
9078 		 * with stack
9079 		 */
9080 		set_bit(vlan_id, vport->vlan_del_fail_bmap);
9081 	}
9082 	return ret;
9083 }
9084 
9085 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
9086 {
9087 #define HCLGE_MAX_SYNC_COUNT	60
9088 
9089 	int i, ret, sync_cnt = 0;
9090 	u16 vlan_id;
9091 
9092 	/* start from vport 1 for PF is always alive */
9093 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9094 		struct hclge_vport *vport = &hdev->vport[i];
9095 
9096 		vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9097 					 VLAN_N_VID);
9098 		while (vlan_id != VLAN_N_VID) {
9099 			ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9100 						       vport->vport_id, vlan_id,
9101 						       true);
9102 			if (ret && ret != -EINVAL)
9103 				return;
9104 
9105 			clear_bit(vlan_id, vport->vlan_del_fail_bmap);
9106 			hclge_rm_vport_vlan_table(vport, vlan_id, false);
9107 
9108 			sync_cnt++;
9109 			if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
9110 				return;
9111 
9112 			vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
9113 						 VLAN_N_VID);
9114 		}
9115 	}
9116 }
9117 
9118 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
9119 {
9120 	struct hclge_config_max_frm_size_cmd *req;
9121 	struct hclge_desc desc;
9122 
9123 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
9124 
9125 	req = (struct hclge_config_max_frm_size_cmd *)desc.data;
9126 	req->max_frm_size = cpu_to_le16(new_mps);
9127 	req->min_frm_size = HCLGE_MAC_MIN_FRAME;
9128 
9129 	return hclge_cmd_send(&hdev->hw, &desc, 1);
9130 }
9131 
9132 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
9133 {
9134 	struct hclge_vport *vport = hclge_get_vport(handle);
9135 
9136 	return hclge_set_vport_mtu(vport, new_mtu);
9137 }
9138 
9139 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
9140 {
9141 	struct hclge_dev *hdev = vport->back;
9142 	int i, max_frm_size, ret;
9143 
9144 	/* HW supprt 2 layer vlan */
9145 	max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9146 	if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
9147 	    max_frm_size > HCLGE_MAC_MAX_FRAME)
9148 		return -EINVAL;
9149 
9150 	max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
9151 	mutex_lock(&hdev->vport_lock);
9152 	/* VF's mps must fit within hdev->mps */
9153 	if (vport->vport_id && max_frm_size > hdev->mps) {
9154 		mutex_unlock(&hdev->vport_lock);
9155 		return -EINVAL;
9156 	} else if (vport->vport_id) {
9157 		vport->mps = max_frm_size;
9158 		mutex_unlock(&hdev->vport_lock);
9159 		return 0;
9160 	}
9161 
9162 	/* PF's mps must be greater then VF's mps */
9163 	for (i = 1; i < hdev->num_alloc_vport; i++)
9164 		if (max_frm_size < hdev->vport[i].mps) {
9165 			mutex_unlock(&hdev->vport_lock);
9166 			return -EINVAL;
9167 		}
9168 
9169 	hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
9170 
9171 	ret = hclge_set_mac_mtu(hdev, max_frm_size);
9172 	if (ret) {
9173 		dev_err(&hdev->pdev->dev,
9174 			"Change mtu fail, ret =%d\n", ret);
9175 		goto out;
9176 	}
9177 
9178 	hdev->mps = max_frm_size;
9179 	vport->mps = max_frm_size;
9180 
9181 	ret = hclge_buffer_alloc(hdev);
9182 	if (ret)
9183 		dev_err(&hdev->pdev->dev,
9184 			"Allocate buffer fail, ret =%d\n", ret);
9185 
9186 out:
9187 	hclge_notify_client(hdev, HNAE3_UP_CLIENT);
9188 	mutex_unlock(&hdev->vport_lock);
9189 	return ret;
9190 }
9191 
9192 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
9193 				    bool enable)
9194 {
9195 	struct hclge_reset_tqp_queue_cmd *req;
9196 	struct hclge_desc desc;
9197 	int ret;
9198 
9199 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
9200 
9201 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9202 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9203 	if (enable)
9204 		hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
9205 
9206 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9207 	if (ret) {
9208 		dev_err(&hdev->pdev->dev,
9209 			"Send tqp reset cmd error, status =%d\n", ret);
9210 		return ret;
9211 	}
9212 
9213 	return 0;
9214 }
9215 
9216 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
9217 {
9218 	struct hclge_reset_tqp_queue_cmd *req;
9219 	struct hclge_desc desc;
9220 	int ret;
9221 
9222 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
9223 
9224 	req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
9225 	req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
9226 
9227 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9228 	if (ret) {
9229 		dev_err(&hdev->pdev->dev,
9230 			"Get reset status error, status =%d\n", ret);
9231 		return ret;
9232 	}
9233 
9234 	return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
9235 }
9236 
9237 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
9238 {
9239 	struct hnae3_queue *queue;
9240 	struct hclge_tqp *tqp;
9241 
9242 	queue = handle->kinfo.tqp[queue_id];
9243 	tqp = container_of(queue, struct hclge_tqp, q);
9244 
9245 	return tqp->index;
9246 }
9247 
9248 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
9249 {
9250 	struct hclge_vport *vport = hclge_get_vport(handle);
9251 	struct hclge_dev *hdev = vport->back;
9252 	int reset_try_times = 0;
9253 	int reset_status;
9254 	u16 queue_gid;
9255 	int ret;
9256 
9257 	queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
9258 
9259 	ret = hclge_tqp_enable(hdev, queue_id, 0, false);
9260 	if (ret) {
9261 		dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
9262 		return ret;
9263 	}
9264 
9265 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9266 	if (ret) {
9267 		dev_err(&hdev->pdev->dev,
9268 			"Send reset tqp cmd fail, ret = %d\n", ret);
9269 		return ret;
9270 	}
9271 
9272 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9273 		reset_status = hclge_get_reset_status(hdev, queue_gid);
9274 		if (reset_status)
9275 			break;
9276 
9277 		/* Wait for tqp hw reset */
9278 		usleep_range(1000, 1200);
9279 	}
9280 
9281 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9282 		dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
9283 		return ret;
9284 	}
9285 
9286 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9287 	if (ret)
9288 		dev_err(&hdev->pdev->dev,
9289 			"Deassert the soft reset fail, ret = %d\n", ret);
9290 
9291 	return ret;
9292 }
9293 
9294 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
9295 {
9296 	struct hclge_dev *hdev = vport->back;
9297 	int reset_try_times = 0;
9298 	int reset_status;
9299 	u16 queue_gid;
9300 	int ret;
9301 
9302 	queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
9303 
9304 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
9305 	if (ret) {
9306 		dev_warn(&hdev->pdev->dev,
9307 			 "Send reset tqp cmd fail, ret = %d\n", ret);
9308 		return;
9309 	}
9310 
9311 	while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
9312 		reset_status = hclge_get_reset_status(hdev, queue_gid);
9313 		if (reset_status)
9314 			break;
9315 
9316 		/* Wait for tqp hw reset */
9317 		usleep_range(1000, 1200);
9318 	}
9319 
9320 	if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
9321 		dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
9322 		return;
9323 	}
9324 
9325 	ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
9326 	if (ret)
9327 		dev_warn(&hdev->pdev->dev,
9328 			 "Deassert the soft reset fail, ret = %d\n", ret);
9329 }
9330 
9331 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
9332 {
9333 	struct hclge_vport *vport = hclge_get_vport(handle);
9334 	struct hclge_dev *hdev = vport->back;
9335 
9336 	return hdev->fw_version;
9337 }
9338 
9339 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9340 {
9341 	struct phy_device *phydev = hdev->hw.mac.phydev;
9342 
9343 	if (!phydev)
9344 		return;
9345 
9346 	phy_set_asym_pause(phydev, rx_en, tx_en);
9347 }
9348 
9349 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
9350 {
9351 	int ret;
9352 
9353 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
9354 		return 0;
9355 
9356 	ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
9357 	if (ret)
9358 		dev_err(&hdev->pdev->dev,
9359 			"configure pauseparam error, ret = %d.\n", ret);
9360 
9361 	return ret;
9362 }
9363 
9364 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
9365 {
9366 	struct phy_device *phydev = hdev->hw.mac.phydev;
9367 	u16 remote_advertising = 0;
9368 	u16 local_advertising;
9369 	u32 rx_pause, tx_pause;
9370 	u8 flowctl;
9371 
9372 	if (!phydev->link || !phydev->autoneg)
9373 		return 0;
9374 
9375 	local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
9376 
9377 	if (phydev->pause)
9378 		remote_advertising = LPA_PAUSE_CAP;
9379 
9380 	if (phydev->asym_pause)
9381 		remote_advertising |= LPA_PAUSE_ASYM;
9382 
9383 	flowctl = mii_resolve_flowctrl_fdx(local_advertising,
9384 					   remote_advertising);
9385 	tx_pause = flowctl & FLOW_CTRL_TX;
9386 	rx_pause = flowctl & FLOW_CTRL_RX;
9387 
9388 	if (phydev->duplex == HCLGE_MAC_HALF) {
9389 		tx_pause = 0;
9390 		rx_pause = 0;
9391 	}
9392 
9393 	return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
9394 }
9395 
9396 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
9397 				 u32 *rx_en, u32 *tx_en)
9398 {
9399 	struct hclge_vport *vport = hclge_get_vport(handle);
9400 	struct hclge_dev *hdev = vport->back;
9401 	struct phy_device *phydev = hdev->hw.mac.phydev;
9402 
9403 	*auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
9404 
9405 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9406 		*rx_en = 0;
9407 		*tx_en = 0;
9408 		return;
9409 	}
9410 
9411 	if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
9412 		*rx_en = 1;
9413 		*tx_en = 0;
9414 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
9415 		*tx_en = 1;
9416 		*rx_en = 0;
9417 	} else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
9418 		*rx_en = 1;
9419 		*tx_en = 1;
9420 	} else {
9421 		*rx_en = 0;
9422 		*tx_en = 0;
9423 	}
9424 }
9425 
9426 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
9427 					 u32 rx_en, u32 tx_en)
9428 {
9429 	if (rx_en && tx_en)
9430 		hdev->fc_mode_last_time = HCLGE_FC_FULL;
9431 	else if (rx_en && !tx_en)
9432 		hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
9433 	else if (!rx_en && tx_en)
9434 		hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
9435 	else
9436 		hdev->fc_mode_last_time = HCLGE_FC_NONE;
9437 
9438 	hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
9439 }
9440 
9441 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
9442 				u32 rx_en, u32 tx_en)
9443 {
9444 	struct hclge_vport *vport = hclge_get_vport(handle);
9445 	struct hclge_dev *hdev = vport->back;
9446 	struct phy_device *phydev = hdev->hw.mac.phydev;
9447 	u32 fc_autoneg;
9448 
9449 	if (phydev) {
9450 		fc_autoneg = hclge_get_autoneg(handle);
9451 		if (auto_neg != fc_autoneg) {
9452 			dev_info(&hdev->pdev->dev,
9453 				 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
9454 			return -EOPNOTSUPP;
9455 		}
9456 	}
9457 
9458 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
9459 		dev_info(&hdev->pdev->dev,
9460 			 "Priority flow control enabled. Cannot set link flow control.\n");
9461 		return -EOPNOTSUPP;
9462 	}
9463 
9464 	hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
9465 
9466 	hclge_record_user_pauseparam(hdev, rx_en, tx_en);
9467 
9468 	if (!auto_neg)
9469 		return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
9470 
9471 	if (phydev)
9472 		return phy_start_aneg(phydev);
9473 
9474 	return -EOPNOTSUPP;
9475 }
9476 
9477 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
9478 					  u8 *auto_neg, u32 *speed, u8 *duplex)
9479 {
9480 	struct hclge_vport *vport = hclge_get_vport(handle);
9481 	struct hclge_dev *hdev = vport->back;
9482 
9483 	if (speed)
9484 		*speed = hdev->hw.mac.speed;
9485 	if (duplex)
9486 		*duplex = hdev->hw.mac.duplex;
9487 	if (auto_neg)
9488 		*auto_neg = hdev->hw.mac.autoneg;
9489 }
9490 
9491 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
9492 				 u8 *module_type)
9493 {
9494 	struct hclge_vport *vport = hclge_get_vport(handle);
9495 	struct hclge_dev *hdev = vport->back;
9496 
9497 	/* When nic is down, the service task is not running, doesn't update
9498 	 * the port information per second. Query the port information before
9499 	 * return the media type, ensure getting the correct media information.
9500 	 */
9501 	hclge_update_port_info(hdev);
9502 
9503 	if (media_type)
9504 		*media_type = hdev->hw.mac.media_type;
9505 
9506 	if (module_type)
9507 		*module_type = hdev->hw.mac.module_type;
9508 }
9509 
9510 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
9511 				u8 *tp_mdix_ctrl, u8 *tp_mdix)
9512 {
9513 	struct hclge_vport *vport = hclge_get_vport(handle);
9514 	struct hclge_dev *hdev = vport->back;
9515 	struct phy_device *phydev = hdev->hw.mac.phydev;
9516 	int mdix_ctrl, mdix, is_resolved;
9517 	unsigned int retval;
9518 
9519 	if (!phydev) {
9520 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9521 		*tp_mdix = ETH_TP_MDI_INVALID;
9522 		return;
9523 	}
9524 
9525 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9526 
9527 	retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9528 	mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9529 				    HCLGE_PHY_MDIX_CTRL_S);
9530 
9531 	retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9532 	mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9533 	is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9534 
9535 	phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9536 
9537 	switch (mdix_ctrl) {
9538 	case 0x0:
9539 		*tp_mdix_ctrl = ETH_TP_MDI;
9540 		break;
9541 	case 0x1:
9542 		*tp_mdix_ctrl = ETH_TP_MDI_X;
9543 		break;
9544 	case 0x3:
9545 		*tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9546 		break;
9547 	default:
9548 		*tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9549 		break;
9550 	}
9551 
9552 	if (!is_resolved)
9553 		*tp_mdix = ETH_TP_MDI_INVALID;
9554 	else if (mdix)
9555 		*tp_mdix = ETH_TP_MDI_X;
9556 	else
9557 		*tp_mdix = ETH_TP_MDI;
9558 }
9559 
9560 static void hclge_info_show(struct hclge_dev *hdev)
9561 {
9562 	struct device *dev = &hdev->pdev->dev;
9563 
9564 	dev_info(dev, "PF info begin:\n");
9565 
9566 	dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9567 	dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9568 	dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9569 	dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9570 	dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9571 	dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9572 	dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9573 	dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9574 	dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9575 	dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9576 	dev_info(dev, "This is %s PF\n",
9577 		 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9578 	dev_info(dev, "DCB %s\n",
9579 		 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9580 	dev_info(dev, "MQPRIO %s\n",
9581 		 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9582 
9583 	dev_info(dev, "PF info end.\n");
9584 }
9585 
9586 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9587 					  struct hclge_vport *vport)
9588 {
9589 	struct hnae3_client *client = vport->nic.client;
9590 	struct hclge_dev *hdev = ae_dev->priv;
9591 	int rst_cnt = hdev->rst_stats.reset_cnt;
9592 	int ret;
9593 
9594 	ret = client->ops->init_instance(&vport->nic);
9595 	if (ret)
9596 		return ret;
9597 
9598 	set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9599 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9600 	    rst_cnt != hdev->rst_stats.reset_cnt) {
9601 		ret = -EBUSY;
9602 		goto init_nic_err;
9603 	}
9604 
9605 	/* Enable nic hw error interrupts */
9606 	ret = hclge_config_nic_hw_error(hdev, true);
9607 	if (ret) {
9608 		dev_err(&ae_dev->pdev->dev,
9609 			"fail(%d) to enable hw error interrupts\n", ret);
9610 		goto init_nic_err;
9611 	}
9612 
9613 	hnae3_set_client_init_flag(client, ae_dev, 1);
9614 
9615 	if (netif_msg_drv(&hdev->vport->nic))
9616 		hclge_info_show(hdev);
9617 
9618 	return ret;
9619 
9620 init_nic_err:
9621 	clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9622 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9623 		msleep(HCLGE_WAIT_RESET_DONE);
9624 
9625 	client->ops->uninit_instance(&vport->nic, 0);
9626 
9627 	return ret;
9628 }
9629 
9630 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9631 					   struct hclge_vport *vport)
9632 {
9633 	struct hclge_dev *hdev = ae_dev->priv;
9634 	struct hnae3_client *client;
9635 	int rst_cnt;
9636 	int ret;
9637 
9638 	if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9639 	    !hdev->nic_client)
9640 		return 0;
9641 
9642 	client = hdev->roce_client;
9643 	ret = hclge_init_roce_base_info(vport);
9644 	if (ret)
9645 		return ret;
9646 
9647 	rst_cnt = hdev->rst_stats.reset_cnt;
9648 	ret = client->ops->init_instance(&vport->roce);
9649 	if (ret)
9650 		return ret;
9651 
9652 	set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9653 	if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9654 	    rst_cnt != hdev->rst_stats.reset_cnt) {
9655 		ret = -EBUSY;
9656 		goto init_roce_err;
9657 	}
9658 
9659 	/* Enable roce ras interrupts */
9660 	ret = hclge_config_rocee_ras_interrupt(hdev, true);
9661 	if (ret) {
9662 		dev_err(&ae_dev->pdev->dev,
9663 			"fail(%d) to enable roce ras interrupts\n", ret);
9664 		goto init_roce_err;
9665 	}
9666 
9667 	hnae3_set_client_init_flag(client, ae_dev, 1);
9668 
9669 	return 0;
9670 
9671 init_roce_err:
9672 	clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9673 	while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9674 		msleep(HCLGE_WAIT_RESET_DONE);
9675 
9676 	hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9677 
9678 	return ret;
9679 }
9680 
9681 static int hclge_init_client_instance(struct hnae3_client *client,
9682 				      struct hnae3_ae_dev *ae_dev)
9683 {
9684 	struct hclge_dev *hdev = ae_dev->priv;
9685 	struct hclge_vport *vport;
9686 	int i, ret;
9687 
9688 	for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9689 		vport = &hdev->vport[i];
9690 
9691 		switch (client->type) {
9692 		case HNAE3_CLIENT_KNIC:
9693 			hdev->nic_client = client;
9694 			vport->nic.client = client;
9695 			ret = hclge_init_nic_client_instance(ae_dev, vport);
9696 			if (ret)
9697 				goto clear_nic;
9698 
9699 			ret = hclge_init_roce_client_instance(ae_dev, vport);
9700 			if (ret)
9701 				goto clear_roce;
9702 
9703 			break;
9704 		case HNAE3_CLIENT_ROCE:
9705 			if (hnae3_dev_roce_supported(hdev)) {
9706 				hdev->roce_client = client;
9707 				vport->roce.client = client;
9708 			}
9709 
9710 			ret = hclge_init_roce_client_instance(ae_dev, vport);
9711 			if (ret)
9712 				goto clear_roce;
9713 
9714 			break;
9715 		default:
9716 			return -EINVAL;
9717 		}
9718 	}
9719 
9720 	return 0;
9721 
9722 clear_nic:
9723 	hdev->nic_client = NULL;
9724 	vport->nic.client = NULL;
9725 	return ret;
9726 clear_roce:
9727 	hdev->roce_client = NULL;
9728 	vport->roce.client = NULL;
9729 	return ret;
9730 }
9731 
9732 static void hclge_uninit_client_instance(struct hnae3_client *client,
9733 					 struct hnae3_ae_dev *ae_dev)
9734 {
9735 	struct hclge_dev *hdev = ae_dev->priv;
9736 	struct hclge_vport *vport;
9737 	int i;
9738 
9739 	for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9740 		vport = &hdev->vport[i];
9741 		if (hdev->roce_client) {
9742 			clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9743 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9744 				msleep(HCLGE_WAIT_RESET_DONE);
9745 
9746 			hdev->roce_client->ops->uninit_instance(&vport->roce,
9747 								0);
9748 			hdev->roce_client = NULL;
9749 			vport->roce.client = NULL;
9750 		}
9751 		if (client->type == HNAE3_CLIENT_ROCE)
9752 			return;
9753 		if (hdev->nic_client && client->ops->uninit_instance) {
9754 			clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9755 			while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9756 				msleep(HCLGE_WAIT_RESET_DONE);
9757 
9758 			client->ops->uninit_instance(&vport->nic, 0);
9759 			hdev->nic_client = NULL;
9760 			vport->nic.client = NULL;
9761 		}
9762 	}
9763 }
9764 
9765 static int hclge_pci_init(struct hclge_dev *hdev)
9766 {
9767 	struct pci_dev *pdev = hdev->pdev;
9768 	struct hclge_hw *hw;
9769 	int ret;
9770 
9771 	ret = pci_enable_device(pdev);
9772 	if (ret) {
9773 		dev_err(&pdev->dev, "failed to enable PCI device\n");
9774 		return ret;
9775 	}
9776 
9777 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9778 	if (ret) {
9779 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9780 		if (ret) {
9781 			dev_err(&pdev->dev,
9782 				"can't set consistent PCI DMA");
9783 			goto err_disable_device;
9784 		}
9785 		dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9786 	}
9787 
9788 	ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9789 	if (ret) {
9790 		dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9791 		goto err_disable_device;
9792 	}
9793 
9794 	pci_set_master(pdev);
9795 	hw = &hdev->hw;
9796 	hw->io_base = pcim_iomap(pdev, 2, 0);
9797 	if (!hw->io_base) {
9798 		dev_err(&pdev->dev, "Can't map configuration register space\n");
9799 		ret = -ENOMEM;
9800 		goto err_clr_master;
9801 	}
9802 
9803 	hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9804 
9805 	return 0;
9806 err_clr_master:
9807 	pci_clear_master(pdev);
9808 	pci_release_regions(pdev);
9809 err_disable_device:
9810 	pci_disable_device(pdev);
9811 
9812 	return ret;
9813 }
9814 
9815 static void hclge_pci_uninit(struct hclge_dev *hdev)
9816 {
9817 	struct pci_dev *pdev = hdev->pdev;
9818 
9819 	pcim_iounmap(pdev, hdev->hw.io_base);
9820 	pci_free_irq_vectors(pdev);
9821 	pci_clear_master(pdev);
9822 	pci_release_mem_regions(pdev);
9823 	pci_disable_device(pdev);
9824 }
9825 
9826 static void hclge_state_init(struct hclge_dev *hdev)
9827 {
9828 	set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9829 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9830 	clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9831 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9832 	clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
9833 	clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9834 	clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9835 }
9836 
9837 static void hclge_state_uninit(struct hclge_dev *hdev)
9838 {
9839 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
9840 	set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9841 
9842 	if (hdev->reset_timer.function)
9843 		del_timer_sync(&hdev->reset_timer);
9844 	if (hdev->service_task.work.func)
9845 		cancel_delayed_work_sync(&hdev->service_task);
9846 }
9847 
9848 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9849 {
9850 #define HCLGE_FLR_RETRY_WAIT_MS	500
9851 #define HCLGE_FLR_RETRY_CNT	5
9852 
9853 	struct hclge_dev *hdev = ae_dev->priv;
9854 	int retry_cnt = 0;
9855 	int ret;
9856 
9857 retry:
9858 	down(&hdev->reset_sem);
9859 	set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9860 	hdev->reset_type = HNAE3_FLR_RESET;
9861 	ret = hclge_reset_prepare(hdev);
9862 	if (ret) {
9863 		dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
9864 			ret);
9865 		if (hdev->reset_pending ||
9866 		    retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
9867 			dev_err(&hdev->pdev->dev,
9868 				"reset_pending:0x%lx, retry_cnt:%d\n",
9869 				hdev->reset_pending, retry_cnt);
9870 			clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9871 			up(&hdev->reset_sem);
9872 			msleep(HCLGE_FLR_RETRY_WAIT_MS);
9873 			goto retry;
9874 		}
9875 	}
9876 
9877 	/* disable misc vector before FLR done */
9878 	hclge_enable_vector(&hdev->misc_vector, false);
9879 	set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
9880 	hdev->rst_stats.flr_rst_cnt++;
9881 }
9882 
9883 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9884 {
9885 	struct hclge_dev *hdev = ae_dev->priv;
9886 	int ret;
9887 
9888 	hclge_enable_vector(&hdev->misc_vector, true);
9889 
9890 	ret = hclge_reset_rebuild(hdev);
9891 	if (ret)
9892 		dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
9893 
9894 	hdev->reset_type = HNAE3_NONE_RESET;
9895 	clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9896 	up(&hdev->reset_sem);
9897 }
9898 
9899 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9900 {
9901 	u16 i;
9902 
9903 	for (i = 0; i < hdev->num_alloc_vport; i++) {
9904 		struct hclge_vport *vport = &hdev->vport[i];
9905 		int ret;
9906 
9907 		 /* Send cmd to clear VF's FUNC_RST_ING */
9908 		ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9909 		if (ret)
9910 			dev_warn(&hdev->pdev->dev,
9911 				 "clear vf(%u) rst failed %d!\n",
9912 				 vport->vport_id, ret);
9913 	}
9914 }
9915 
9916 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9917 {
9918 	struct pci_dev *pdev = ae_dev->pdev;
9919 	struct hclge_dev *hdev;
9920 	int ret;
9921 
9922 	hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9923 	if (!hdev)
9924 		return -ENOMEM;
9925 
9926 	hdev->pdev = pdev;
9927 	hdev->ae_dev = ae_dev;
9928 	hdev->reset_type = HNAE3_NONE_RESET;
9929 	hdev->reset_level = HNAE3_FUNC_RESET;
9930 	ae_dev->priv = hdev;
9931 
9932 	/* HW supprt 2 layer vlan */
9933 	hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9934 
9935 	mutex_init(&hdev->vport_lock);
9936 	spin_lock_init(&hdev->fd_rule_lock);
9937 	sema_init(&hdev->reset_sem, 1);
9938 
9939 	ret = hclge_pci_init(hdev);
9940 	if (ret)
9941 		goto out;
9942 
9943 	/* Firmware command queue initialize */
9944 	ret = hclge_cmd_queue_init(hdev);
9945 	if (ret)
9946 		goto err_pci_uninit;
9947 
9948 	/* Firmware command initialize */
9949 	ret = hclge_cmd_init(hdev);
9950 	if (ret)
9951 		goto err_cmd_uninit;
9952 
9953 	ret = hclge_get_cap(hdev);
9954 	if (ret)
9955 		goto err_cmd_uninit;
9956 
9957 	ret = hclge_configure(hdev);
9958 	if (ret) {
9959 		dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9960 		goto err_cmd_uninit;
9961 	}
9962 
9963 	ret = hclge_init_msi(hdev);
9964 	if (ret) {
9965 		dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9966 		goto err_cmd_uninit;
9967 	}
9968 
9969 	ret = hclge_misc_irq_init(hdev);
9970 	if (ret)
9971 		goto err_msi_uninit;
9972 
9973 	ret = hclge_alloc_tqps(hdev);
9974 	if (ret) {
9975 		dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9976 		goto err_msi_irq_uninit;
9977 	}
9978 
9979 	ret = hclge_alloc_vport(hdev);
9980 	if (ret)
9981 		goto err_msi_irq_uninit;
9982 
9983 	ret = hclge_map_tqp(hdev);
9984 	if (ret)
9985 		goto err_msi_irq_uninit;
9986 
9987 	if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9988 		ret = hclge_mac_mdio_config(hdev);
9989 		if (ret)
9990 			goto err_msi_irq_uninit;
9991 	}
9992 
9993 	ret = hclge_init_umv_space(hdev);
9994 	if (ret)
9995 		goto err_mdiobus_unreg;
9996 
9997 	ret = hclge_mac_init(hdev);
9998 	if (ret) {
9999 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10000 		goto err_mdiobus_unreg;
10001 	}
10002 
10003 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10004 	if (ret) {
10005 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10006 		goto err_mdiobus_unreg;
10007 	}
10008 
10009 	ret = hclge_config_gro(hdev, true);
10010 	if (ret)
10011 		goto err_mdiobus_unreg;
10012 
10013 	ret = hclge_init_vlan_config(hdev);
10014 	if (ret) {
10015 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10016 		goto err_mdiobus_unreg;
10017 	}
10018 
10019 	ret = hclge_tm_schd_init(hdev);
10020 	if (ret) {
10021 		dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
10022 		goto err_mdiobus_unreg;
10023 	}
10024 
10025 	hclge_rss_init_cfg(hdev);
10026 	ret = hclge_rss_init_hw(hdev);
10027 	if (ret) {
10028 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10029 		goto err_mdiobus_unreg;
10030 	}
10031 
10032 	ret = init_mgr_tbl(hdev);
10033 	if (ret) {
10034 		dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
10035 		goto err_mdiobus_unreg;
10036 	}
10037 
10038 	ret = hclge_init_fd_config(hdev);
10039 	if (ret) {
10040 		dev_err(&pdev->dev,
10041 			"fd table init fail, ret=%d\n", ret);
10042 		goto err_mdiobus_unreg;
10043 	}
10044 
10045 	INIT_KFIFO(hdev->mac_tnl_log);
10046 
10047 	hclge_dcb_ops_set(hdev);
10048 
10049 	timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
10050 	INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
10051 
10052 	/* Setup affinity after service timer setup because add_timer_on
10053 	 * is called in affinity notify.
10054 	 */
10055 	hclge_misc_affinity_setup(hdev);
10056 
10057 	hclge_clear_all_event_cause(hdev);
10058 	hclge_clear_resetting_state(hdev);
10059 
10060 	/* Log and clear the hw errors those already occurred */
10061 	hclge_handle_all_hns_hw_errors(ae_dev);
10062 
10063 	/* request delayed reset for the error recovery because an immediate
10064 	 * global reset on a PF affecting pending initialization of other PFs
10065 	 */
10066 	if (ae_dev->hw_err_reset_req) {
10067 		enum hnae3_reset_type reset_level;
10068 
10069 		reset_level = hclge_get_reset_level(ae_dev,
10070 						    &ae_dev->hw_err_reset_req);
10071 		hclge_set_def_reset_request(ae_dev, reset_level);
10072 		mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
10073 	}
10074 
10075 	/* Enable MISC vector(vector0) */
10076 	hclge_enable_vector(&hdev->misc_vector, true);
10077 
10078 	hclge_state_init(hdev);
10079 	hdev->last_reset_time = jiffies;
10080 
10081 	dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
10082 		 HCLGE_DRIVER_NAME);
10083 
10084 	hclge_task_schedule(hdev, round_jiffies_relative(HZ));
10085 
10086 	return 0;
10087 
10088 err_mdiobus_unreg:
10089 	if (hdev->hw.mac.phydev)
10090 		mdiobus_unregister(hdev->hw.mac.mdio_bus);
10091 err_msi_irq_uninit:
10092 	hclge_misc_irq_uninit(hdev);
10093 err_msi_uninit:
10094 	pci_free_irq_vectors(pdev);
10095 err_cmd_uninit:
10096 	hclge_cmd_uninit(hdev);
10097 err_pci_uninit:
10098 	pcim_iounmap(pdev, hdev->hw.io_base);
10099 	pci_clear_master(pdev);
10100 	pci_release_regions(pdev);
10101 	pci_disable_device(pdev);
10102 out:
10103 	mutex_destroy(&hdev->vport_lock);
10104 	return ret;
10105 }
10106 
10107 static void hclge_stats_clear(struct hclge_dev *hdev)
10108 {
10109 	memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
10110 }
10111 
10112 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10113 {
10114 	return hclge_config_switch_param(hdev, vf, enable,
10115 					 HCLGE_SWITCH_ANTI_SPOOF_MASK);
10116 }
10117 
10118 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
10119 {
10120 	return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10121 					  HCLGE_FILTER_FE_NIC_INGRESS_B,
10122 					  enable, vf);
10123 }
10124 
10125 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
10126 {
10127 	int ret;
10128 
10129 	ret = hclge_set_mac_spoofchk(hdev, vf, enable);
10130 	if (ret) {
10131 		dev_err(&hdev->pdev->dev,
10132 			"Set vf %d mac spoof check %s failed, ret=%d\n",
10133 			vf, enable ? "on" : "off", ret);
10134 		return ret;
10135 	}
10136 
10137 	ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
10138 	if (ret)
10139 		dev_err(&hdev->pdev->dev,
10140 			"Set vf %d vlan spoof check %s failed, ret=%d\n",
10141 			vf, enable ? "on" : "off", ret);
10142 
10143 	return ret;
10144 }
10145 
10146 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
10147 				 bool enable)
10148 {
10149 	struct hclge_vport *vport = hclge_get_vport(handle);
10150 	struct hclge_dev *hdev = vport->back;
10151 	u32 new_spoofchk = enable ? 1 : 0;
10152 	int ret;
10153 
10154 	if (hdev->pdev->revision == 0x20)
10155 		return -EOPNOTSUPP;
10156 
10157 	vport = hclge_get_vf_vport(hdev, vf);
10158 	if (!vport)
10159 		return -EINVAL;
10160 
10161 	if (vport->vf_info.spoofchk == new_spoofchk)
10162 		return 0;
10163 
10164 	if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
10165 		dev_warn(&hdev->pdev->dev,
10166 			 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
10167 			 vf);
10168 	else if (enable && hclge_is_umv_space_full(vport, true))
10169 		dev_warn(&hdev->pdev->dev,
10170 			 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
10171 			 vf);
10172 
10173 	ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
10174 	if (ret)
10175 		return ret;
10176 
10177 	vport->vf_info.spoofchk = new_spoofchk;
10178 	return 0;
10179 }
10180 
10181 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
10182 {
10183 	struct hclge_vport *vport = hdev->vport;
10184 	int ret;
10185 	int i;
10186 
10187 	if (hdev->pdev->revision == 0x20)
10188 		return 0;
10189 
10190 	/* resume the vf spoof check state after reset */
10191 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10192 		ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
10193 					       vport->vf_info.spoofchk);
10194 		if (ret)
10195 			return ret;
10196 
10197 		vport++;
10198 	}
10199 
10200 	return 0;
10201 }
10202 
10203 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
10204 {
10205 	struct hclge_vport *vport = hclge_get_vport(handle);
10206 	struct hclge_dev *hdev = vport->back;
10207 	u32 new_trusted = enable ? 1 : 0;
10208 	bool en_bc_pmc;
10209 	int ret;
10210 
10211 	vport = hclge_get_vf_vport(hdev, vf);
10212 	if (!vport)
10213 		return -EINVAL;
10214 
10215 	if (vport->vf_info.trusted == new_trusted)
10216 		return 0;
10217 
10218 	/* Disable promisc mode for VF if it is not trusted any more. */
10219 	if (!enable && vport->vf_info.promisc_enable) {
10220 		en_bc_pmc = hdev->pdev->revision != 0x20;
10221 		ret = hclge_set_vport_promisc_mode(vport, false, false,
10222 						   en_bc_pmc);
10223 		if (ret)
10224 			return ret;
10225 		vport->vf_info.promisc_enable = 0;
10226 		hclge_inform_vf_promisc_info(vport);
10227 	}
10228 
10229 	vport->vf_info.trusted = new_trusted;
10230 
10231 	return 0;
10232 }
10233 
10234 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
10235 {
10236 	int ret;
10237 	int vf;
10238 
10239 	/* reset vf rate to default value */
10240 	for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10241 		struct hclge_vport *vport = &hdev->vport[vf];
10242 
10243 		vport->vf_info.max_tx_rate = 0;
10244 		ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
10245 		if (ret)
10246 			dev_err(&hdev->pdev->dev,
10247 				"vf%d failed to reset to default, ret=%d\n",
10248 				vf - HCLGE_VF_VPORT_START_NUM, ret);
10249 	}
10250 }
10251 
10252 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
10253 				     int min_tx_rate, int max_tx_rate)
10254 {
10255 	if (min_tx_rate != 0 ||
10256 	    max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
10257 		dev_err(&hdev->pdev->dev,
10258 			"min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
10259 			min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
10260 		return -EINVAL;
10261 	}
10262 
10263 	return 0;
10264 }
10265 
10266 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
10267 			     int min_tx_rate, int max_tx_rate, bool force)
10268 {
10269 	struct hclge_vport *vport = hclge_get_vport(handle);
10270 	struct hclge_dev *hdev = vport->back;
10271 	int ret;
10272 
10273 	ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
10274 	if (ret)
10275 		return ret;
10276 
10277 	vport = hclge_get_vf_vport(hdev, vf);
10278 	if (!vport)
10279 		return -EINVAL;
10280 
10281 	if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
10282 		return 0;
10283 
10284 	ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
10285 	if (ret)
10286 		return ret;
10287 
10288 	vport->vf_info.max_tx_rate = max_tx_rate;
10289 
10290 	return 0;
10291 }
10292 
10293 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
10294 {
10295 	struct hnae3_handle *handle = &hdev->vport->nic;
10296 	struct hclge_vport *vport;
10297 	int ret;
10298 	int vf;
10299 
10300 	/* resume the vf max_tx_rate after reset */
10301 	for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
10302 		vport = hclge_get_vf_vport(hdev, vf);
10303 		if (!vport)
10304 			return -EINVAL;
10305 
10306 		/* zero means max rate, after reset, firmware already set it to
10307 		 * max rate, so just continue.
10308 		 */
10309 		if (!vport->vf_info.max_tx_rate)
10310 			continue;
10311 
10312 		ret = hclge_set_vf_rate(handle, vf, 0,
10313 					vport->vf_info.max_tx_rate, true);
10314 		if (ret) {
10315 			dev_err(&hdev->pdev->dev,
10316 				"vf%d failed to resume tx_rate:%u, ret=%d\n",
10317 				vf, vport->vf_info.max_tx_rate, ret);
10318 			return ret;
10319 		}
10320 	}
10321 
10322 	return 0;
10323 }
10324 
10325 static void hclge_reset_vport_state(struct hclge_dev *hdev)
10326 {
10327 	struct hclge_vport *vport = hdev->vport;
10328 	int i;
10329 
10330 	for (i = 0; i < hdev->num_alloc_vport; i++) {
10331 		hclge_vport_stop(vport);
10332 		vport++;
10333 	}
10334 }
10335 
10336 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
10337 {
10338 	struct hclge_dev *hdev = ae_dev->priv;
10339 	struct pci_dev *pdev = ae_dev->pdev;
10340 	int ret;
10341 
10342 	set_bit(HCLGE_STATE_DOWN, &hdev->state);
10343 
10344 	hclge_stats_clear(hdev);
10345 	/* NOTE: pf reset needn't to clear or restore pf and vf table entry.
10346 	 * so here should not clean table in memory.
10347 	 */
10348 	if (hdev->reset_type == HNAE3_IMP_RESET ||
10349 	    hdev->reset_type == HNAE3_GLOBAL_RESET) {
10350 		memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
10351 		memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
10352 		bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
10353 		hclge_reset_umv_space(hdev);
10354 	}
10355 
10356 	ret = hclge_cmd_init(hdev);
10357 	if (ret) {
10358 		dev_err(&pdev->dev, "Cmd queue init failed\n");
10359 		return ret;
10360 	}
10361 
10362 	ret = hclge_map_tqp(hdev);
10363 	if (ret) {
10364 		dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
10365 		return ret;
10366 	}
10367 
10368 	ret = hclge_mac_init(hdev);
10369 	if (ret) {
10370 		dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
10371 		return ret;
10372 	}
10373 
10374 	ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
10375 	if (ret) {
10376 		dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
10377 		return ret;
10378 	}
10379 
10380 	ret = hclge_config_gro(hdev, true);
10381 	if (ret)
10382 		return ret;
10383 
10384 	ret = hclge_init_vlan_config(hdev);
10385 	if (ret) {
10386 		dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
10387 		return ret;
10388 	}
10389 
10390 	ret = hclge_tm_init_hw(hdev, true);
10391 	if (ret) {
10392 		dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
10393 		return ret;
10394 	}
10395 
10396 	ret = hclge_rss_init_hw(hdev);
10397 	if (ret) {
10398 		dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
10399 		return ret;
10400 	}
10401 
10402 	ret = init_mgr_tbl(hdev);
10403 	if (ret) {
10404 		dev_err(&pdev->dev,
10405 			"failed to reinit manager table, ret = %d\n", ret);
10406 		return ret;
10407 	}
10408 
10409 	ret = hclge_init_fd_config(hdev);
10410 	if (ret) {
10411 		dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
10412 		return ret;
10413 	}
10414 
10415 	/* Log and clear the hw errors those already occurred */
10416 	hclge_handle_all_hns_hw_errors(ae_dev);
10417 
10418 	/* Re-enable the hw error interrupts because
10419 	 * the interrupts get disabled on global reset.
10420 	 */
10421 	ret = hclge_config_nic_hw_error(hdev, true);
10422 	if (ret) {
10423 		dev_err(&pdev->dev,
10424 			"fail(%d) to re-enable NIC hw error interrupts\n",
10425 			ret);
10426 		return ret;
10427 	}
10428 
10429 	if (hdev->roce_client) {
10430 		ret = hclge_config_rocee_ras_interrupt(hdev, true);
10431 		if (ret) {
10432 			dev_err(&pdev->dev,
10433 				"fail(%d) to re-enable roce ras interrupts\n",
10434 				ret);
10435 			return ret;
10436 		}
10437 	}
10438 
10439 	hclge_reset_vport_state(hdev);
10440 	ret = hclge_reset_vport_spoofchk(hdev);
10441 	if (ret)
10442 		return ret;
10443 
10444 	ret = hclge_resume_vf_rate(hdev);
10445 	if (ret)
10446 		return ret;
10447 
10448 	dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
10449 		 HCLGE_DRIVER_NAME);
10450 
10451 	return 0;
10452 }
10453 
10454 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
10455 {
10456 	struct hclge_dev *hdev = ae_dev->priv;
10457 	struct hclge_mac *mac = &hdev->hw.mac;
10458 
10459 	hclge_reset_vf_rate(hdev);
10460 	hclge_clear_vf_vlan(hdev);
10461 	hclge_misc_affinity_teardown(hdev);
10462 	hclge_state_uninit(hdev);
10463 	hclge_uninit_mac_table(hdev);
10464 
10465 	if (mac->phydev)
10466 		mdiobus_unregister(mac->mdio_bus);
10467 
10468 	/* Disable MISC vector(vector0) */
10469 	hclge_enable_vector(&hdev->misc_vector, false);
10470 	synchronize_irq(hdev->misc_vector.vector_irq);
10471 
10472 	/* Disable all hw interrupts */
10473 	hclge_config_mac_tnl_int(hdev, false);
10474 	hclge_config_nic_hw_error(hdev, false);
10475 	hclge_config_rocee_ras_interrupt(hdev, false);
10476 
10477 	hclge_cmd_uninit(hdev);
10478 	hclge_misc_irq_uninit(hdev);
10479 	hclge_pci_uninit(hdev);
10480 	mutex_destroy(&hdev->vport_lock);
10481 	hclge_uninit_vport_vlan_table(hdev);
10482 	ae_dev->priv = NULL;
10483 }
10484 
10485 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
10486 {
10487 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10488 	struct hclge_vport *vport = hclge_get_vport(handle);
10489 	struct hclge_dev *hdev = vport->back;
10490 
10491 	return min_t(u32, hdev->rss_size_max,
10492 		     vport->alloc_tqps / kinfo->num_tc);
10493 }
10494 
10495 static void hclge_get_channels(struct hnae3_handle *handle,
10496 			       struct ethtool_channels *ch)
10497 {
10498 	ch->max_combined = hclge_get_max_channels(handle);
10499 	ch->other_count = 1;
10500 	ch->max_other = 1;
10501 	ch->combined_count = handle->kinfo.rss_size;
10502 }
10503 
10504 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
10505 					u16 *alloc_tqps, u16 *max_rss_size)
10506 {
10507 	struct hclge_vport *vport = hclge_get_vport(handle);
10508 	struct hclge_dev *hdev = vport->back;
10509 
10510 	*alloc_tqps = vport->alloc_tqps;
10511 	*max_rss_size = hdev->rss_size_max;
10512 }
10513 
10514 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
10515 			      bool rxfh_configured)
10516 {
10517 	struct hclge_vport *vport = hclge_get_vport(handle);
10518 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
10519 	u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
10520 	struct hclge_dev *hdev = vport->back;
10521 	u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
10522 	u16 cur_rss_size = kinfo->rss_size;
10523 	u16 cur_tqps = kinfo->num_tqps;
10524 	u16 tc_valid[HCLGE_MAX_TC_NUM];
10525 	u16 roundup_size;
10526 	u32 *rss_indir;
10527 	unsigned int i;
10528 	int ret;
10529 
10530 	kinfo->req_rss_size = new_tqps_num;
10531 
10532 	ret = hclge_tm_vport_map_update(hdev);
10533 	if (ret) {
10534 		dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
10535 		return ret;
10536 	}
10537 
10538 	roundup_size = roundup_pow_of_two(kinfo->rss_size);
10539 	roundup_size = ilog2(roundup_size);
10540 	/* Set the RSS TC mode according to the new RSS size */
10541 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10542 		tc_valid[i] = 0;
10543 
10544 		if (!(hdev->hw_tc_map & BIT(i)))
10545 			continue;
10546 
10547 		tc_valid[i] = 1;
10548 		tc_size[i] = roundup_size;
10549 		tc_offset[i] = kinfo->rss_size * i;
10550 	}
10551 	ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10552 	if (ret)
10553 		return ret;
10554 
10555 	/* RSS indirection table has been configuared by user */
10556 	if (rxfh_configured)
10557 		goto out;
10558 
10559 	/* Reinitializes the rss indirect table according to the new RSS size */
10560 	rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10561 	if (!rss_indir)
10562 		return -ENOMEM;
10563 
10564 	for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10565 		rss_indir[i] = i % kinfo->rss_size;
10566 
10567 	ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10568 	if (ret)
10569 		dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10570 			ret);
10571 
10572 	kfree(rss_indir);
10573 
10574 out:
10575 	if (!ret)
10576 		dev_info(&hdev->pdev->dev,
10577 			 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10578 			 cur_rss_size, kinfo->rss_size,
10579 			 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10580 
10581 	return ret;
10582 }
10583 
10584 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10585 			      u32 *regs_num_64_bit)
10586 {
10587 	struct hclge_desc desc;
10588 	u32 total_num;
10589 	int ret;
10590 
10591 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10592 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10593 	if (ret) {
10594 		dev_err(&hdev->pdev->dev,
10595 			"Query register number cmd failed, ret = %d.\n", ret);
10596 		return ret;
10597 	}
10598 
10599 	*regs_num_32_bit = le32_to_cpu(desc.data[0]);
10600 	*regs_num_64_bit = le32_to_cpu(desc.data[1]);
10601 
10602 	total_num = *regs_num_32_bit + *regs_num_64_bit;
10603 	if (!total_num)
10604 		return -EINVAL;
10605 
10606 	return 0;
10607 }
10608 
10609 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10610 				 void *data)
10611 {
10612 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10613 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10614 
10615 	struct hclge_desc *desc;
10616 	u32 *reg_val = data;
10617 	__le32 *desc_data;
10618 	int nodata_num;
10619 	int cmd_num;
10620 	int i, k, n;
10621 	int ret;
10622 
10623 	if (regs_num == 0)
10624 		return 0;
10625 
10626 	nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10627 	cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10628 			       HCLGE_32_BIT_REG_RTN_DATANUM);
10629 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10630 	if (!desc)
10631 		return -ENOMEM;
10632 
10633 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10634 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10635 	if (ret) {
10636 		dev_err(&hdev->pdev->dev,
10637 			"Query 32 bit register cmd failed, ret = %d.\n", ret);
10638 		kfree(desc);
10639 		return ret;
10640 	}
10641 
10642 	for (i = 0; i < cmd_num; i++) {
10643 		if (i == 0) {
10644 			desc_data = (__le32 *)(&desc[i].data[0]);
10645 			n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10646 		} else {
10647 			desc_data = (__le32 *)(&desc[i]);
10648 			n = HCLGE_32_BIT_REG_RTN_DATANUM;
10649 		}
10650 		for (k = 0; k < n; k++) {
10651 			*reg_val++ = le32_to_cpu(*desc_data++);
10652 
10653 			regs_num--;
10654 			if (!regs_num)
10655 				break;
10656 		}
10657 	}
10658 
10659 	kfree(desc);
10660 	return 0;
10661 }
10662 
10663 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10664 				 void *data)
10665 {
10666 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10667 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10668 
10669 	struct hclge_desc *desc;
10670 	u64 *reg_val = data;
10671 	__le64 *desc_data;
10672 	int nodata_len;
10673 	int cmd_num;
10674 	int i, k, n;
10675 	int ret;
10676 
10677 	if (regs_num == 0)
10678 		return 0;
10679 
10680 	nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10681 	cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10682 			       HCLGE_64_BIT_REG_RTN_DATANUM);
10683 	desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10684 	if (!desc)
10685 		return -ENOMEM;
10686 
10687 	hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10688 	ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10689 	if (ret) {
10690 		dev_err(&hdev->pdev->dev,
10691 			"Query 64 bit register cmd failed, ret = %d.\n", ret);
10692 		kfree(desc);
10693 		return ret;
10694 	}
10695 
10696 	for (i = 0; i < cmd_num; i++) {
10697 		if (i == 0) {
10698 			desc_data = (__le64 *)(&desc[i].data[0]);
10699 			n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10700 		} else {
10701 			desc_data = (__le64 *)(&desc[i]);
10702 			n = HCLGE_64_BIT_REG_RTN_DATANUM;
10703 		}
10704 		for (k = 0; k < n; k++) {
10705 			*reg_val++ = le64_to_cpu(*desc_data++);
10706 
10707 			regs_num--;
10708 			if (!regs_num)
10709 				break;
10710 		}
10711 	}
10712 
10713 	kfree(desc);
10714 	return 0;
10715 }
10716 
10717 #define MAX_SEPARATE_NUM	4
10718 #define SEPARATOR_VALUE		0xFDFCFBFA
10719 #define REG_NUM_PER_LINE	4
10720 #define REG_LEN_PER_LINE	(REG_NUM_PER_LINE * sizeof(u32))
10721 #define REG_SEPARATOR_LINE	1
10722 #define REG_NUM_REMAIN_MASK	3
10723 #define BD_LIST_MAX_NUM		30
10724 
10725 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10726 {
10727 	int i;
10728 
10729 	/* initialize command BD except the last one */
10730 	for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
10731 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
10732 					   true);
10733 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10734 	}
10735 
10736 	/* initialize the last command BD */
10737 	hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
10738 
10739 	return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
10740 }
10741 
10742 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10743 				    int *bd_num_list,
10744 				    u32 type_num)
10745 {
10746 	u32 entries_per_desc, desc_index, index, offset, i;
10747 	struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
10748 	int ret;
10749 
10750 	ret = hclge_query_bd_num_cmd_send(hdev, desc);
10751 	if (ret) {
10752 		dev_err(&hdev->pdev->dev,
10753 			"Get dfx bd num fail, status is %d.\n", ret);
10754 		return ret;
10755 	}
10756 
10757 	entries_per_desc = ARRAY_SIZE(desc[0].data);
10758 	for (i = 0; i < type_num; i++) {
10759 		offset = hclge_dfx_bd_offset_list[i];
10760 		index = offset % entries_per_desc;
10761 		desc_index = offset / entries_per_desc;
10762 		bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10763 	}
10764 
10765 	return ret;
10766 }
10767 
10768 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10769 				  struct hclge_desc *desc_src, int bd_num,
10770 				  enum hclge_opcode_type cmd)
10771 {
10772 	struct hclge_desc *desc = desc_src;
10773 	int i, ret;
10774 
10775 	hclge_cmd_setup_basic_desc(desc, cmd, true);
10776 	for (i = 0; i < bd_num - 1; i++) {
10777 		desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10778 		desc++;
10779 		hclge_cmd_setup_basic_desc(desc, cmd, true);
10780 	}
10781 
10782 	desc = desc_src;
10783 	ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10784 	if (ret)
10785 		dev_err(&hdev->pdev->dev,
10786 			"Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10787 			cmd, ret);
10788 
10789 	return ret;
10790 }
10791 
10792 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10793 				    void *data)
10794 {
10795 	int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10796 	struct hclge_desc *desc = desc_src;
10797 	u32 *reg = data;
10798 
10799 	entries_per_desc = ARRAY_SIZE(desc->data);
10800 	reg_num = entries_per_desc * bd_num;
10801 	separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10802 	for (i = 0; i < reg_num; i++) {
10803 		index = i % entries_per_desc;
10804 		desc_index = i / entries_per_desc;
10805 		*reg++ = le32_to_cpu(desc[desc_index].data[index]);
10806 	}
10807 	for (i = 0; i < separator_num; i++)
10808 		*reg++ = SEPARATOR_VALUE;
10809 
10810 	return reg_num + separator_num;
10811 }
10812 
10813 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10814 {
10815 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10816 	int data_len_per_desc, bd_num, i;
10817 	int bd_num_list[BD_LIST_MAX_NUM];
10818 	u32 data_len;
10819 	int ret;
10820 
10821 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10822 	if (ret) {
10823 		dev_err(&hdev->pdev->dev,
10824 			"Get dfx reg bd num fail, status is %d.\n", ret);
10825 		return ret;
10826 	}
10827 
10828 	data_len_per_desc = sizeof_field(struct hclge_desc, data);
10829 	*len = 0;
10830 	for (i = 0; i < dfx_reg_type_num; i++) {
10831 		bd_num = bd_num_list[i];
10832 		data_len = data_len_per_desc * bd_num;
10833 		*len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10834 	}
10835 
10836 	return ret;
10837 }
10838 
10839 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10840 {
10841 	u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10842 	int bd_num, bd_num_max, buf_len, i;
10843 	int bd_num_list[BD_LIST_MAX_NUM];
10844 	struct hclge_desc *desc_src;
10845 	u32 *reg = data;
10846 	int ret;
10847 
10848 	ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10849 	if (ret) {
10850 		dev_err(&hdev->pdev->dev,
10851 			"Get dfx reg bd num fail, status is %d.\n", ret);
10852 		return ret;
10853 	}
10854 
10855 	bd_num_max = bd_num_list[0];
10856 	for (i = 1; i < dfx_reg_type_num; i++)
10857 		bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10858 
10859 	buf_len = sizeof(*desc_src) * bd_num_max;
10860 	desc_src = kzalloc(buf_len, GFP_KERNEL);
10861 	if (!desc_src)
10862 		return -ENOMEM;
10863 
10864 	for (i = 0; i < dfx_reg_type_num; i++) {
10865 		bd_num = bd_num_list[i];
10866 		ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10867 					     hclge_dfx_reg_opcode_list[i]);
10868 		if (ret) {
10869 			dev_err(&hdev->pdev->dev,
10870 				"Get dfx reg fail, status is %d.\n", ret);
10871 			break;
10872 		}
10873 
10874 		reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10875 	}
10876 
10877 	kfree(desc_src);
10878 	return ret;
10879 }
10880 
10881 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10882 			      struct hnae3_knic_private_info *kinfo)
10883 {
10884 #define HCLGE_RING_REG_OFFSET		0x200
10885 #define HCLGE_RING_INT_REG_OFFSET	0x4
10886 
10887 	int i, j, reg_num, separator_num;
10888 	int data_num_sum;
10889 	u32 *reg = data;
10890 
10891 	/* fetching per-PF registers valus from PF PCIe register space */
10892 	reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10893 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10894 	for (i = 0; i < reg_num; i++)
10895 		*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10896 	for (i = 0; i < separator_num; i++)
10897 		*reg++ = SEPARATOR_VALUE;
10898 	data_num_sum = reg_num + separator_num;
10899 
10900 	reg_num = ARRAY_SIZE(common_reg_addr_list);
10901 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10902 	for (i = 0; i < reg_num; i++)
10903 		*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10904 	for (i = 0; i < separator_num; i++)
10905 		*reg++ = SEPARATOR_VALUE;
10906 	data_num_sum += reg_num + separator_num;
10907 
10908 	reg_num = ARRAY_SIZE(ring_reg_addr_list);
10909 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10910 	for (j = 0; j < kinfo->num_tqps; j++) {
10911 		for (i = 0; i < reg_num; i++)
10912 			*reg++ = hclge_read_dev(&hdev->hw,
10913 						ring_reg_addr_list[i] +
10914 						HCLGE_RING_REG_OFFSET * j);
10915 		for (i = 0; i < separator_num; i++)
10916 			*reg++ = SEPARATOR_VALUE;
10917 	}
10918 	data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10919 
10920 	reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10921 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10922 	for (j = 0; j < hdev->num_msi_used - 1; j++) {
10923 		for (i = 0; i < reg_num; i++)
10924 			*reg++ = hclge_read_dev(&hdev->hw,
10925 						tqp_intr_reg_addr_list[i] +
10926 						HCLGE_RING_INT_REG_OFFSET * j);
10927 		for (i = 0; i < separator_num; i++)
10928 			*reg++ = SEPARATOR_VALUE;
10929 	}
10930 	data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10931 
10932 	return data_num_sum;
10933 }
10934 
10935 static int hclge_get_regs_len(struct hnae3_handle *handle)
10936 {
10937 	int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10938 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10939 	struct hclge_vport *vport = hclge_get_vport(handle);
10940 	struct hclge_dev *hdev = vport->back;
10941 	int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10942 	int regs_lines_32_bit, regs_lines_64_bit;
10943 	int ret;
10944 
10945 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10946 	if (ret) {
10947 		dev_err(&hdev->pdev->dev,
10948 			"Get register number failed, ret = %d.\n", ret);
10949 		return ret;
10950 	}
10951 
10952 	ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10953 	if (ret) {
10954 		dev_err(&hdev->pdev->dev,
10955 			"Get dfx reg len failed, ret = %d.\n", ret);
10956 		return ret;
10957 	}
10958 
10959 	cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10960 		REG_SEPARATOR_LINE;
10961 	common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10962 		REG_SEPARATOR_LINE;
10963 	ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10964 		REG_SEPARATOR_LINE;
10965 	tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10966 		REG_SEPARATOR_LINE;
10967 	regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10968 		REG_SEPARATOR_LINE;
10969 	regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10970 		REG_SEPARATOR_LINE;
10971 
10972 	return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10973 		tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10974 		regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10975 }
10976 
10977 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10978 			   void *data)
10979 {
10980 	struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10981 	struct hclge_vport *vport = hclge_get_vport(handle);
10982 	struct hclge_dev *hdev = vport->back;
10983 	u32 regs_num_32_bit, regs_num_64_bit;
10984 	int i, reg_num, separator_num, ret;
10985 	u32 *reg = data;
10986 
10987 	*version = hdev->fw_version;
10988 
10989 	ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10990 	if (ret) {
10991 		dev_err(&hdev->pdev->dev,
10992 			"Get register number failed, ret = %d.\n", ret);
10993 		return;
10994 	}
10995 
10996 	reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10997 
10998 	ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10999 	if (ret) {
11000 		dev_err(&hdev->pdev->dev,
11001 			"Get 32 bit register failed, ret = %d.\n", ret);
11002 		return;
11003 	}
11004 	reg_num = regs_num_32_bit;
11005 	reg += reg_num;
11006 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11007 	for (i = 0; i < separator_num; i++)
11008 		*reg++ = SEPARATOR_VALUE;
11009 
11010 	ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
11011 	if (ret) {
11012 		dev_err(&hdev->pdev->dev,
11013 			"Get 64 bit register failed, ret = %d.\n", ret);
11014 		return;
11015 	}
11016 	reg_num = regs_num_64_bit * 2;
11017 	reg += reg_num;
11018 	separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
11019 	for (i = 0; i < separator_num; i++)
11020 		*reg++ = SEPARATOR_VALUE;
11021 
11022 	ret = hclge_get_dfx_reg(hdev, reg);
11023 	if (ret)
11024 		dev_err(&hdev->pdev->dev,
11025 			"Get dfx register failed, ret = %d.\n", ret);
11026 }
11027 
11028 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
11029 {
11030 	struct hclge_set_led_state_cmd *req;
11031 	struct hclge_desc desc;
11032 	int ret;
11033 
11034 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
11035 
11036 	req = (struct hclge_set_led_state_cmd *)desc.data;
11037 	hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
11038 			HCLGE_LED_LOCATE_STATE_S, locate_led_status);
11039 
11040 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11041 	if (ret)
11042 		dev_err(&hdev->pdev->dev,
11043 			"Send set led state cmd error, ret =%d\n", ret);
11044 
11045 	return ret;
11046 }
11047 
11048 enum hclge_led_status {
11049 	HCLGE_LED_OFF,
11050 	HCLGE_LED_ON,
11051 	HCLGE_LED_NO_CHANGE = 0xFF,
11052 };
11053 
11054 static int hclge_set_led_id(struct hnae3_handle *handle,
11055 			    enum ethtool_phys_id_state status)
11056 {
11057 	struct hclge_vport *vport = hclge_get_vport(handle);
11058 	struct hclge_dev *hdev = vport->back;
11059 
11060 	switch (status) {
11061 	case ETHTOOL_ID_ACTIVE:
11062 		return hclge_set_led_status(hdev, HCLGE_LED_ON);
11063 	case ETHTOOL_ID_INACTIVE:
11064 		return hclge_set_led_status(hdev, HCLGE_LED_OFF);
11065 	default:
11066 		return -EINVAL;
11067 	}
11068 }
11069 
11070 static void hclge_get_link_mode(struct hnae3_handle *handle,
11071 				unsigned long *supported,
11072 				unsigned long *advertising)
11073 {
11074 	unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
11075 	struct hclge_vport *vport = hclge_get_vport(handle);
11076 	struct hclge_dev *hdev = vport->back;
11077 	unsigned int idx = 0;
11078 
11079 	for (; idx < size; idx++) {
11080 		supported[idx] = hdev->hw.mac.supported[idx];
11081 		advertising[idx] = hdev->hw.mac.advertising[idx];
11082 	}
11083 }
11084 
11085 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
11086 {
11087 	struct hclge_vport *vport = hclge_get_vport(handle);
11088 	struct hclge_dev *hdev = vport->back;
11089 
11090 	return hclge_config_gro(hdev, enable);
11091 }
11092 
11093 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
11094 {
11095 	struct hclge_vport *vport = &hdev->vport[0];
11096 	struct hnae3_handle *handle = &vport->nic;
11097 	u8 tmp_flags = 0;
11098 	int ret;
11099 
11100 	if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
11101 		set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11102 		vport->last_promisc_flags = vport->overflow_promisc_flags;
11103 	}
11104 
11105 	if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
11106 		tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
11107 		ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
11108 					     tmp_flags & HNAE3_MPE);
11109 		if (!ret) {
11110 			clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
11111 			hclge_enable_vlan_filter(handle,
11112 						 tmp_flags & HNAE3_VLAN_FLTR);
11113 		}
11114 	}
11115 }
11116 
11117 static bool hclge_module_existed(struct hclge_dev *hdev)
11118 {
11119 	struct hclge_desc desc;
11120 	u32 existed;
11121 	int ret;
11122 
11123 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
11124 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11125 	if (ret) {
11126 		dev_err(&hdev->pdev->dev,
11127 			"failed to get SFP exist state, ret = %d\n", ret);
11128 		return false;
11129 	}
11130 
11131 	existed = le32_to_cpu(desc.data[0]);
11132 
11133 	return existed != 0;
11134 }
11135 
11136 /* need 6 bds(total 140 bytes) in one reading
11137  * return the number of bytes actually read, 0 means read failed.
11138  */
11139 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
11140 				     u32 len, u8 *data)
11141 {
11142 	struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
11143 	struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
11144 	u16 read_len;
11145 	u16 copy_len;
11146 	int ret;
11147 	int i;
11148 
11149 	/* setup all 6 bds to read module eeprom info. */
11150 	for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11151 		hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
11152 					   true);
11153 
11154 		/* bd0~bd4 need next flag */
11155 		if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
11156 			desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
11157 	}
11158 
11159 	/* setup bd0, this bd contains offset and read length. */
11160 	sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
11161 	sfp_info_bd0->offset = cpu_to_le16((u16)offset);
11162 	read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
11163 	sfp_info_bd0->read_len = cpu_to_le16(read_len);
11164 
11165 	ret = hclge_cmd_send(&hdev->hw, desc, i);
11166 	if (ret) {
11167 		dev_err(&hdev->pdev->dev,
11168 			"failed to get SFP eeprom info, ret = %d\n", ret);
11169 		return 0;
11170 	}
11171 
11172 	/* copy sfp info from bd0 to out buffer. */
11173 	copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
11174 	memcpy(data, sfp_info_bd0->data, copy_len);
11175 	read_len = copy_len;
11176 
11177 	/* copy sfp info from bd1~bd5 to out buffer if needed. */
11178 	for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
11179 		if (read_len >= len)
11180 			return read_len;
11181 
11182 		copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
11183 		memcpy(data + read_len, desc[i].data, copy_len);
11184 		read_len += copy_len;
11185 	}
11186 
11187 	return read_len;
11188 }
11189 
11190 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
11191 				   u32 len, u8 *data)
11192 {
11193 	struct hclge_vport *vport = hclge_get_vport(handle);
11194 	struct hclge_dev *hdev = vport->back;
11195 	u32 read_len = 0;
11196 	u16 data_len;
11197 
11198 	if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
11199 		return -EOPNOTSUPP;
11200 
11201 	if (!hclge_module_existed(hdev))
11202 		return -ENXIO;
11203 
11204 	while (read_len < len) {
11205 		data_len = hclge_get_sfp_eeprom_info(hdev,
11206 						     offset + read_len,
11207 						     len - read_len,
11208 						     data + read_len);
11209 		if (!data_len)
11210 			return -EIO;
11211 
11212 		read_len += data_len;
11213 	}
11214 
11215 	return 0;
11216 }
11217 
11218 static const struct hnae3_ae_ops hclge_ops = {
11219 	.init_ae_dev = hclge_init_ae_dev,
11220 	.uninit_ae_dev = hclge_uninit_ae_dev,
11221 	.flr_prepare = hclge_flr_prepare,
11222 	.flr_done = hclge_flr_done,
11223 	.init_client_instance = hclge_init_client_instance,
11224 	.uninit_client_instance = hclge_uninit_client_instance,
11225 	.map_ring_to_vector = hclge_map_ring_to_vector,
11226 	.unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
11227 	.get_vector = hclge_get_vector,
11228 	.put_vector = hclge_put_vector,
11229 	.set_promisc_mode = hclge_set_promisc_mode,
11230 	.request_update_promisc_mode = hclge_request_update_promisc_mode,
11231 	.set_loopback = hclge_set_loopback,
11232 	.start = hclge_ae_start,
11233 	.stop = hclge_ae_stop,
11234 	.client_start = hclge_client_start,
11235 	.client_stop = hclge_client_stop,
11236 	.get_status = hclge_get_status,
11237 	.get_ksettings_an_result = hclge_get_ksettings_an_result,
11238 	.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
11239 	.get_media_type = hclge_get_media_type,
11240 	.check_port_speed = hclge_check_port_speed,
11241 	.get_fec = hclge_get_fec,
11242 	.set_fec = hclge_set_fec,
11243 	.get_rss_key_size = hclge_get_rss_key_size,
11244 	.get_rss_indir_size = hclge_get_rss_indir_size,
11245 	.get_rss = hclge_get_rss,
11246 	.set_rss = hclge_set_rss,
11247 	.set_rss_tuple = hclge_set_rss_tuple,
11248 	.get_rss_tuple = hclge_get_rss_tuple,
11249 	.get_tc_size = hclge_get_tc_size,
11250 	.get_mac_addr = hclge_get_mac_addr,
11251 	.set_mac_addr = hclge_set_mac_addr,
11252 	.do_ioctl = hclge_do_ioctl,
11253 	.add_uc_addr = hclge_add_uc_addr,
11254 	.rm_uc_addr = hclge_rm_uc_addr,
11255 	.add_mc_addr = hclge_add_mc_addr,
11256 	.rm_mc_addr = hclge_rm_mc_addr,
11257 	.set_autoneg = hclge_set_autoneg,
11258 	.get_autoneg = hclge_get_autoneg,
11259 	.restart_autoneg = hclge_restart_autoneg,
11260 	.halt_autoneg = hclge_halt_autoneg,
11261 	.get_pauseparam = hclge_get_pauseparam,
11262 	.set_pauseparam = hclge_set_pauseparam,
11263 	.set_mtu = hclge_set_mtu,
11264 	.reset_queue = hclge_reset_tqp,
11265 	.get_stats = hclge_get_stats,
11266 	.get_mac_stats = hclge_get_mac_stat,
11267 	.update_stats = hclge_update_stats,
11268 	.get_strings = hclge_get_strings,
11269 	.get_sset_count = hclge_get_sset_count,
11270 	.get_fw_version = hclge_get_fw_version,
11271 	.get_mdix_mode = hclge_get_mdix_mode,
11272 	.enable_vlan_filter = hclge_enable_vlan_filter,
11273 	.set_vlan_filter = hclge_set_vlan_filter,
11274 	.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
11275 	.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
11276 	.reset_event = hclge_reset_event,
11277 	.get_reset_level = hclge_get_reset_level,
11278 	.set_default_reset_request = hclge_set_def_reset_request,
11279 	.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
11280 	.set_channels = hclge_set_channels,
11281 	.get_channels = hclge_get_channels,
11282 	.get_regs_len = hclge_get_regs_len,
11283 	.get_regs = hclge_get_regs,
11284 	.set_led_id = hclge_set_led_id,
11285 	.get_link_mode = hclge_get_link_mode,
11286 	.add_fd_entry = hclge_add_fd_entry,
11287 	.del_fd_entry = hclge_del_fd_entry,
11288 	.del_all_fd_entries = hclge_del_all_fd_entries,
11289 	.get_fd_rule_cnt = hclge_get_fd_rule_cnt,
11290 	.get_fd_rule_info = hclge_get_fd_rule_info,
11291 	.get_fd_all_rules = hclge_get_all_rules,
11292 	.enable_fd = hclge_enable_fd,
11293 	.add_arfs_entry = hclge_add_fd_entry_by_arfs,
11294 	.dbg_run_cmd = hclge_dbg_run_cmd,
11295 	.handle_hw_ras_error = hclge_handle_hw_ras_error,
11296 	.get_hw_reset_stat = hclge_get_hw_reset_stat,
11297 	.ae_dev_resetting = hclge_ae_dev_resetting,
11298 	.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
11299 	.set_gro_en = hclge_gro_en,
11300 	.get_global_queue_id = hclge_covert_handle_qid_global,
11301 	.set_timer_task = hclge_set_timer_task,
11302 	.mac_connect_phy = hclge_mac_connect_phy,
11303 	.mac_disconnect_phy = hclge_mac_disconnect_phy,
11304 	.get_vf_config = hclge_get_vf_config,
11305 	.set_vf_link_state = hclge_set_vf_link_state,
11306 	.set_vf_spoofchk = hclge_set_vf_spoofchk,
11307 	.set_vf_trust = hclge_set_vf_trust,
11308 	.set_vf_rate = hclge_set_vf_rate,
11309 	.set_vf_mac = hclge_set_vf_mac,
11310 	.get_module_eeprom = hclge_get_module_eeprom,
11311 	.get_cmdq_stat = hclge_get_cmdq_stat,
11312 };
11313 
11314 static struct hnae3_ae_algo ae_algo = {
11315 	.ops = &hclge_ops,
11316 	.pdev_id_table = ae_algo_pci_tbl,
11317 };
11318 
11319 static int hclge_init(void)
11320 {
11321 	pr_info("%s is initializing\n", HCLGE_NAME);
11322 
11323 	hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
11324 	if (!hclge_wq) {
11325 		pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
11326 		return -ENOMEM;
11327 	}
11328 
11329 	hnae3_register_ae_algo(&ae_algo);
11330 
11331 	return 0;
11332 }
11333 
11334 static void hclge_exit(void)
11335 {
11336 	hnae3_unregister_ae_algo(&ae_algo);
11337 	destroy_workqueue(hclge_wq);
11338 }
11339 module_init(hclge_init);
11340 module_exit(hclge_exit);
11341 
11342 MODULE_LICENSE("GPL");
11343 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
11344 MODULE_DESCRIPTION("HCLGE Driver");
11345 MODULE_VERSION(HCLGE_MOD_VERSION);
11346